1 /*
2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/acpi.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/sched.h>
25 #include <linux/interrupt.h>
26 #include <linux/cache.h>
27 #include <linux/profile.h>
28 #include <linux/errno.h>
29 #include <linux/mm.h>
30 #include <linux/err.h>
31 #include <linux/cpu.h>
32 #include <linux/smp.h>
33 #include <linux/seq_file.h>
34 #include <linux/irq.h>
35 #include <linux/percpu.h>
36 #include <linux/clockchips.h>
37 #include <linux/completion.h>
38 #include <linux/of.h>
39 #include <linux/irq_work.h>
40
41 #include <asm/alternative.h>
42 #include <asm/atomic.h>
43 #include <asm/cacheflush.h>
44 #include <asm/cpu.h>
45 #include <asm/cputype.h>
46 #include <asm/cpu_ops.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
49 #include <asm/pgalloc.h>
50 #include <asm/processor.h>
51 #include <asm/smp_plat.h>
52 #include <asm/sections.h>
53 #include <asm/tlbflush.h>
54 #include <asm/ptrace.h>
55 #include <asm/virt.h>
56
57 #define CREATE_TRACE_POINTS
58 #include <trace/events/ipi.h>
59
60 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
61 EXPORT_PER_CPU_SYMBOL(cpu_number);
62
63 /*
64 * as from 2.5, kernels no longer have an init_tasks structure
65 * so we need some other way of telling a new secondary core
66 * where to place its SVC stack
67 */
68 struct secondary_data secondary_data;
69
70 enum ipi_msg_type {
71 IPI_RESCHEDULE,
72 IPI_CALL_FUNC,
73 IPI_CPU_STOP,
74 IPI_TIMER,
75 IPI_IRQ_WORK,
76 IPI_WAKEUP
77 };
78
79 /*
80 * Boot a secondary CPU, and assign it the specified idle task.
81 * This also gives us the initial stack to use for this CPU.
82 */
boot_secondary(unsigned int cpu,struct task_struct * idle)83 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
84 {
85 if (cpu_ops[cpu]->cpu_boot)
86 return cpu_ops[cpu]->cpu_boot(cpu);
87
88 return -EOPNOTSUPP;
89 }
90
91 static DECLARE_COMPLETION(cpu_running);
92
__cpu_up(unsigned int cpu,struct task_struct * idle)93 int __cpu_up(unsigned int cpu, struct task_struct *idle)
94 {
95 int ret;
96
97 /*
98 * We need to tell the secondary core where to find its stack and the
99 * page tables.
100 */
101 #ifdef CONFIG_THREAD_INFO_IN_TASK
102 secondary_data.task = idle;
103 #endif
104 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
105 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
106
107 /*
108 * Now bring the CPU into our world.
109 */
110 ret = boot_secondary(cpu, idle);
111 if (ret == 0) {
112 /*
113 * CPU was successfully started, wait for it to come online or
114 * time out.
115 */
116 wait_for_completion_timeout(&cpu_running,
117 msecs_to_jiffies(1000));
118
119 if (!cpu_online(cpu)) {
120 pr_crit("CPU%u: failed to come online\n", cpu);
121 ret = -EIO;
122 }
123 } else {
124 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
125 return ret;
126 }
127
128 #ifdef CONFIG_THREAD_INFO_IN_TASK
129 secondary_data.task = NULL;
130 #endif
131 secondary_data.stack = NULL;
132
133 return ret;
134 }
135
smp_store_cpu_info(unsigned int cpuid)136 static void smp_store_cpu_info(unsigned int cpuid)
137 {
138 store_cpu_topology(cpuid);
139 }
140
141 /*
142 * This is the secondary CPU boot entry. We're using this CPUs
143 * idle thread stack, but a set of temporary page tables.
144 */
secondary_start_kernel(void)145 asmlinkage notrace void secondary_start_kernel(void)
146 {
147 struct mm_struct *mm = &init_mm;
148 unsigned int cpu;
149
150 cpu = task_cpu(current);
151 set_my_cpu_offset(per_cpu_offset(cpu));
152
153 /*
154 * All kernel threads share the same mm context; grab a
155 * reference and switch to it.
156 */
157 atomic_inc(&mm->mm_count);
158 current->active_mm = mm;
159
160 /*
161 * TTBR0 is only used for the identity mapping at this stage. Make it
162 * point to zero page to avoid speculatively fetching new entries.
163 */
164 cpu_uninstall_idmap();
165
166 preempt_disable();
167 trace_hardirqs_off();
168
169 /*
170 * If the system has established the capabilities, make sure
171 * this CPU ticks all of those. If it doesn't, the CPU will
172 * fail to come online.
173 */
174 verify_local_cpu_capabilities();
175
176 if (cpu_ops[cpu]->cpu_postboot)
177 cpu_ops[cpu]->cpu_postboot();
178
179 /*
180 * Log the CPU info before it is marked online and might get read.
181 */
182 cpuinfo_store_cpu();
183
184 /*
185 * Enable GIC and timers.
186 */
187 notify_cpu_starting(cpu);
188
189 smp_store_cpu_info(cpu);
190
191 /*
192 * OK, now it's safe to let the boot CPU continue. Wait for
193 * the CPU migration code to notice that the CPU is online
194 * before we continue.
195 */
196 pr_info("CPU%u: Booted secondary processor [%08x]\n",
197 cpu, read_cpuid_id());
198 set_cpu_online(cpu, true);
199 complete(&cpu_running);
200
201 local_irq_enable();
202 local_async_enable();
203
204 /*
205 * OK, it's off to the idle thread for us
206 */
207 cpu_startup_entry(CPUHP_ONLINE);
208 }
209
210 #ifdef CONFIG_HOTPLUG_CPU
op_cpu_disable(unsigned int cpu)211 static int op_cpu_disable(unsigned int cpu)
212 {
213 /*
214 * If we don't have a cpu_die method, abort before we reach the point
215 * of no return. CPU0 may not have an cpu_ops, so test for it.
216 */
217 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
218 return -EOPNOTSUPP;
219
220 /*
221 * We may need to abort a hot unplug for some other mechanism-specific
222 * reason.
223 */
224 if (cpu_ops[cpu]->cpu_disable)
225 return cpu_ops[cpu]->cpu_disable(cpu);
226
227 return 0;
228 }
229
230 /*
231 * __cpu_disable runs on the processor to be shutdown.
232 */
__cpu_disable(void)233 int __cpu_disable(void)
234 {
235 unsigned int cpu = smp_processor_id();
236 int ret;
237
238 ret = op_cpu_disable(cpu);
239 if (ret)
240 return ret;
241
242 /*
243 * Take this CPU offline. Once we clear this, we can't return,
244 * and we must not schedule until we're ready to give up the cpu.
245 */
246 set_cpu_online(cpu, false);
247
248 /*
249 * OK - migrate IRQs away from this CPU
250 */
251 irq_migrate_all_off_this_cpu();
252
253 return 0;
254 }
255
op_cpu_kill(unsigned int cpu)256 static int op_cpu_kill(unsigned int cpu)
257 {
258 /*
259 * If we have no means of synchronising with the dying CPU, then assume
260 * that it is really dead. We can only wait for an arbitrary length of
261 * time and hope that it's dead, so let's skip the wait and just hope.
262 */
263 if (!cpu_ops[cpu]->cpu_kill)
264 return 0;
265
266 return cpu_ops[cpu]->cpu_kill(cpu);
267 }
268
269 /*
270 * called on the thread which is asking for a CPU to be shutdown -
271 * waits until shutdown has completed, or it is timed out.
272 */
__cpu_die(unsigned int cpu)273 void __cpu_die(unsigned int cpu)
274 {
275 int err;
276
277 if (!cpu_wait_death(cpu, 5)) {
278 pr_crit("CPU%u: cpu didn't die\n", cpu);
279 return;
280 }
281 pr_notice("CPU%u: shutdown\n", cpu);
282
283 /*
284 * Now that the dying CPU is beyond the point of no return w.r.t.
285 * in-kernel synchronisation, try to get the firwmare to help us to
286 * verify that it has really left the kernel before we consider
287 * clobbering anything it might still be using.
288 */
289 err = op_cpu_kill(cpu);
290 if (err)
291 pr_warn("CPU%d may not have shut down cleanly: %d\n",
292 cpu, err);
293 }
294
295 /*
296 * Called from the idle thread for the CPU which has been shutdown.
297 *
298 * Note that we disable IRQs here, but do not re-enable them
299 * before returning to the caller. This is also the behaviour
300 * of the other hotplug-cpu capable cores, so presumably coming
301 * out of idle fixes this.
302 */
cpu_die(void)303 void cpu_die(void)
304 {
305 unsigned int cpu = smp_processor_id();
306
307 idle_task_exit();
308
309 local_irq_disable();
310
311 /* Tell __cpu_die() that this CPU is now safe to dispose of */
312 (void)cpu_report_death();
313
314 /*
315 * Actually shutdown the CPU. This must never fail. The specific hotplug
316 * mechanism must perform all required cache maintenance to ensure that
317 * no dirty lines are lost in the process of shutting down the CPU.
318 */
319 cpu_ops[cpu]->cpu_die(cpu);
320
321 BUG();
322 }
323 #endif
324
hyp_mode_check(void)325 static void __init hyp_mode_check(void)
326 {
327 if (is_hyp_mode_available())
328 pr_info("CPU: All CPU(s) started at EL2\n");
329 else if (is_hyp_mode_mismatched())
330 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
331 "CPU: CPUs started in inconsistent modes");
332 else
333 pr_info("CPU: All CPU(s) started at EL1\n");
334 }
335
smp_cpus_done(unsigned int max_cpus)336 void __init smp_cpus_done(unsigned int max_cpus)
337 {
338 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
339 setup_cpu_features();
340 hyp_mode_check();
341 apply_alternatives_all();
342 }
343
smp_prepare_boot_cpu(void)344 void __init smp_prepare_boot_cpu(void)
345 {
346 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
347 cpuinfo_store_boot_cpu();
348 }
349
of_get_cpu_mpidr(struct device_node * dn)350 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
351 {
352 const __be32 *cell;
353 u64 hwid;
354
355 /*
356 * A cpu node with missing "reg" property is
357 * considered invalid to build a cpu_logical_map
358 * entry.
359 */
360 cell = of_get_property(dn, "reg", NULL);
361 if (!cell) {
362 pr_err("%s: missing reg property\n", dn->full_name);
363 return INVALID_HWID;
364 }
365
366 hwid = of_read_number(cell, of_n_addr_cells(dn));
367 /*
368 * Non affinity bits must be set to 0 in the DT
369 */
370 if (hwid & ~MPIDR_HWID_BITMASK) {
371 pr_err("%s: invalid reg property\n", dn->full_name);
372 return INVALID_HWID;
373 }
374 return hwid;
375 }
376
377 /*
378 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
379 * entries and check for duplicates. If any is found just ignore the
380 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
381 * matching valid MPIDR values.
382 */
is_mpidr_duplicate(unsigned int cpu,u64 hwid)383 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
384 {
385 unsigned int i;
386
387 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
388 if (cpu_logical_map(i) == hwid)
389 return true;
390 return false;
391 }
392
393 /*
394 * Initialize cpu operations for a logical cpu and
395 * set it in the possible mask on success
396 */
smp_cpu_setup(int cpu)397 static int __init smp_cpu_setup(int cpu)
398 {
399 if (cpu_read_ops(cpu))
400 return -ENODEV;
401
402 if (cpu_ops[cpu]->cpu_init(cpu))
403 return -ENODEV;
404
405 set_cpu_possible(cpu, true);
406
407 return 0;
408 }
409
410 static bool bootcpu_valid __initdata;
411 static unsigned int cpu_count = 1;
412
413 #ifdef CONFIG_ACPI
414 /*
415 * acpi_map_gic_cpu_interface - parse processor MADT entry
416 *
417 * Carry out sanity checks on MADT processor entry and initialize
418 * cpu_logical_map on success
419 */
420 static void __init
acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt * processor)421 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
422 {
423 u64 hwid = processor->arm_mpidr;
424
425 if (!(processor->flags & ACPI_MADT_ENABLED)) {
426 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
427 return;
428 }
429
430 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
431 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
432 return;
433 }
434
435 if (is_mpidr_duplicate(cpu_count, hwid)) {
436 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
437 return;
438 }
439
440 /* Check if GICC structure of boot CPU is available in the MADT */
441 if (cpu_logical_map(0) == hwid) {
442 if (bootcpu_valid) {
443 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
444 hwid);
445 return;
446 }
447 bootcpu_valid = true;
448 return;
449 }
450
451 if (cpu_count >= NR_CPUS)
452 return;
453
454 /* map the logical cpu id to cpu MPIDR */
455 cpu_logical_map(cpu_count) = hwid;
456
457 /*
458 * Set-up the ACPI parking protocol cpu entries
459 * while initializing the cpu_logical_map to
460 * avoid parsing MADT entries multiple times for
461 * nothing (ie a valid cpu_logical_map entry should
462 * contain a valid parking protocol data set to
463 * initialize the cpu if the parking protocol is
464 * the only available enable method).
465 */
466 acpi_set_mailbox_entry(cpu_count, processor);
467
468 cpu_count++;
469 }
470
471 static int __init
acpi_parse_gic_cpu_interface(struct acpi_subtable_header * header,const unsigned long end)472 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
473 const unsigned long end)
474 {
475 struct acpi_madt_generic_interrupt *processor;
476
477 processor = (struct acpi_madt_generic_interrupt *)header;
478 if (BAD_MADT_GICC_ENTRY(processor, end))
479 return -EINVAL;
480
481 acpi_table_print_madt_entry(header);
482
483 acpi_map_gic_cpu_interface(processor);
484
485 return 0;
486 }
487 #else
488 #define acpi_table_parse_madt(...) do { } while (0)
489 #endif
490
491 /*
492 * Enumerate the possible CPU set from the device tree and build the
493 * cpu logical map array containing MPIDR values related to logical
494 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
495 */
of_parse_and_init_cpus(void)496 static void __init of_parse_and_init_cpus(void)
497 {
498 struct device_node *dn = NULL;
499
500 while ((dn = of_find_node_by_type(dn, "cpu"))) {
501 u64 hwid = of_get_cpu_mpidr(dn);
502
503 if (hwid == INVALID_HWID)
504 goto next;
505
506 if (is_mpidr_duplicate(cpu_count, hwid)) {
507 pr_err("%s: duplicate cpu reg properties in the DT\n",
508 dn->full_name);
509 goto next;
510 }
511
512 /*
513 * The numbering scheme requires that the boot CPU
514 * must be assigned logical id 0. Record it so that
515 * the logical map built from DT is validated and can
516 * be used.
517 */
518 if (hwid == cpu_logical_map(0)) {
519 if (bootcpu_valid) {
520 pr_err("%s: duplicate boot cpu reg property in DT\n",
521 dn->full_name);
522 goto next;
523 }
524
525 bootcpu_valid = true;
526
527 /*
528 * cpu_logical_map has already been
529 * initialized and the boot cpu doesn't need
530 * the enable-method so continue without
531 * incrementing cpu.
532 */
533 continue;
534 }
535
536 if (cpu_count >= NR_CPUS)
537 goto next;
538
539 pr_debug("cpu logical map 0x%llx\n", hwid);
540 cpu_logical_map(cpu_count) = hwid;
541 next:
542 cpu_count++;
543 }
544 }
545
546 /*
547 * Enumerate the possible CPU set from the device tree or ACPI and build the
548 * cpu logical map array containing MPIDR values related to logical
549 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
550 */
smp_init_cpus(void)551 void __init smp_init_cpus(void)
552 {
553 int i;
554
555 if (acpi_disabled)
556 of_parse_and_init_cpus();
557 else
558 /*
559 * do a walk of MADT to determine how many CPUs
560 * we have including disabled CPUs, and get information
561 * we need for SMP init
562 */
563 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
564 acpi_parse_gic_cpu_interface, 0);
565
566 if (cpu_count > NR_CPUS)
567 pr_warn("no. of cores (%d) greater than configured maximum of %d - clipping\n",
568 cpu_count, NR_CPUS);
569
570 if (!bootcpu_valid) {
571 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
572 return;
573 }
574
575 /*
576 * We need to set the cpu_logical_map entries before enabling
577 * the cpus so that cpu processor description entries (DT cpu nodes
578 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
579 * with entries in cpu_logical_map while initializing the cpus.
580 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
581 */
582 for (i = 1; i < NR_CPUS; i++) {
583 if (cpu_logical_map(i) != INVALID_HWID) {
584 if (smp_cpu_setup(i))
585 cpu_logical_map(i) = INVALID_HWID;
586 }
587 }
588 }
589
smp_prepare_cpus(unsigned int max_cpus)590 void __init smp_prepare_cpus(unsigned int max_cpus)
591 {
592 int err;
593 unsigned int cpu, ncores = num_possible_cpus();
594
595 init_cpu_topology();
596
597 smp_store_cpu_info(smp_processor_id());
598
599 /*
600 * are we trying to boot more cores than exist?
601 */
602 if (max_cpus > ncores)
603 max_cpus = ncores;
604
605 /* Don't bother if we're effectively UP */
606 if (max_cpus <= 1)
607 return;
608
609 /*
610 * Initialise the present map (which describes the set of CPUs
611 * actually populated at the present time) and release the
612 * secondaries from the bootloader.
613 *
614 * Make sure we online at most (max_cpus - 1) additional CPUs.
615 */
616 max_cpus--;
617 for_each_possible_cpu(cpu) {
618 if (max_cpus == 0)
619 break;
620
621 per_cpu(cpu_number, cpu) = cpu;
622
623 if (cpu == smp_processor_id())
624 continue;
625
626 if (!cpu_ops[cpu])
627 continue;
628
629 err = cpu_ops[cpu]->cpu_prepare(cpu);
630 if (err)
631 continue;
632
633 set_cpu_present(cpu, true);
634 max_cpus--;
635 }
636 }
637
638 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
639
set_smp_cross_call(void (* fn)(const struct cpumask *,unsigned int))640 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
641 {
642 __smp_cross_call = fn;
643 }
644
645 static const char *ipi_types[NR_IPI] __tracepoint_string = {
646 #define S(x,s) [x] = s
647 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
648 S(IPI_CALL_FUNC, "Function call interrupts"),
649 S(IPI_CPU_STOP, "CPU stop interrupts"),
650 S(IPI_TIMER, "Timer broadcast interrupts"),
651 S(IPI_IRQ_WORK, "IRQ work interrupts"),
652 S(IPI_WAKEUP, "CPU wake-up interrupts"),
653 };
654
smp_cross_call(const struct cpumask * target,unsigned int ipinr)655 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
656 {
657 trace_ipi_raise(target, ipi_types[ipinr]);
658 __smp_cross_call(target, ipinr);
659 }
660
show_ipi_list(struct seq_file * p,int prec)661 void show_ipi_list(struct seq_file *p, int prec)
662 {
663 unsigned int cpu, i;
664
665 for (i = 0; i < NR_IPI; i++) {
666 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
667 prec >= 4 ? " " : "");
668 for_each_online_cpu(cpu)
669 seq_printf(p, "%10u ",
670 __get_irq_stat(cpu, ipi_irqs[i]));
671 seq_printf(p, " %s\n", ipi_types[i]);
672 }
673 }
674
smp_irq_stat_cpu(unsigned int cpu)675 u64 smp_irq_stat_cpu(unsigned int cpu)
676 {
677 u64 sum = 0;
678 int i;
679
680 for (i = 0; i < NR_IPI; i++)
681 sum += __get_irq_stat(cpu, ipi_irqs[i]);
682
683 return sum;
684 }
685
arch_send_call_function_ipi_mask(const struct cpumask * mask)686 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
687 {
688 smp_cross_call(mask, IPI_CALL_FUNC);
689 }
690
arch_send_call_function_single_ipi(int cpu)691 void arch_send_call_function_single_ipi(int cpu)
692 {
693 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
694 }
695
696 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
arch_send_wakeup_ipi_mask(const struct cpumask * mask)697 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
698 {
699 smp_cross_call(mask, IPI_WAKEUP);
700 }
701 #endif
702
703 #ifdef CONFIG_IRQ_WORK
arch_irq_work_raise(void)704 void arch_irq_work_raise(void)
705 {
706 if (__smp_cross_call)
707 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
708 }
709 #endif
710
711 static DEFINE_RAW_SPINLOCK(stop_lock);
712
713 /*
714 * ipi_cpu_stop - handle IPI from smp_send_stop()
715 */
ipi_cpu_stop(unsigned int cpu)716 static void ipi_cpu_stop(unsigned int cpu)
717 {
718 if (system_state == SYSTEM_BOOTING ||
719 system_state == SYSTEM_RUNNING) {
720 raw_spin_lock(&stop_lock);
721 pr_crit("CPU%u: stopping\n", cpu);
722 dump_stack();
723 raw_spin_unlock(&stop_lock);
724 }
725
726 set_cpu_online(cpu, false);
727
728 local_irq_disable();
729
730 while (1)
731 cpu_relax();
732 }
733
734 /*
735 * Main handler for inter-processor interrupts
736 */
handle_IPI(int ipinr,struct pt_regs * regs)737 void handle_IPI(int ipinr, struct pt_regs *regs)
738 {
739 unsigned int cpu = smp_processor_id();
740 struct pt_regs *old_regs = set_irq_regs(regs);
741
742 if ((unsigned)ipinr < NR_IPI) {
743 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
744 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
745 }
746
747 switch (ipinr) {
748 case IPI_RESCHEDULE:
749 scheduler_ipi();
750 break;
751
752 case IPI_CALL_FUNC:
753 irq_enter();
754 generic_smp_call_function_interrupt();
755 irq_exit();
756 break;
757
758 case IPI_CPU_STOP:
759 irq_enter();
760 ipi_cpu_stop(cpu);
761 irq_exit();
762 break;
763
764 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
765 case IPI_TIMER:
766 irq_enter();
767 tick_receive_broadcast();
768 irq_exit();
769 break;
770 #endif
771
772 #ifdef CONFIG_IRQ_WORK
773 case IPI_IRQ_WORK:
774 irq_enter();
775 irq_work_run();
776 irq_exit();
777 break;
778 #endif
779
780 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
781 case IPI_WAKEUP:
782 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
783 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
784 cpu);
785 break;
786 #endif
787
788 default:
789 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
790 break;
791 }
792
793 if ((unsigned)ipinr < NR_IPI)
794 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
795 set_irq_regs(old_regs);
796 }
797
smp_send_reschedule(int cpu)798 void smp_send_reschedule(int cpu)
799 {
800 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
801 }
802
803 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)804 void tick_broadcast(const struct cpumask *mask)
805 {
806 smp_cross_call(mask, IPI_TIMER);
807 }
808 #endif
809
810 /*
811 * The number of CPUs online, not counting this CPU (which may not be
812 * fully online and so not counted in num_online_cpus()).
813 */
num_other_online_cpus(void)814 static inline unsigned int num_other_online_cpus(void)
815 {
816 unsigned int this_cpu_online = cpu_online(smp_processor_id());
817
818 return num_online_cpus() - this_cpu_online;
819 }
820
smp_send_stop(void)821 void smp_send_stop(void)
822 {
823 unsigned long timeout;
824
825 if (num_other_online_cpus()) {
826 cpumask_t mask;
827
828 cpumask_copy(&mask, cpu_online_mask);
829 cpumask_clear_cpu(smp_processor_id(), &mask);
830
831 smp_cross_call(&mask, IPI_CPU_STOP);
832 }
833
834 /* Wait up to one second for other CPUs to stop */
835 timeout = USEC_PER_SEC;
836 while (num_other_online_cpus() && timeout--)
837 udelay(1);
838
839 if (num_other_online_cpus())
840 pr_warning("SMP: failed to stop secondary CPUs\n");
841 }
842
843 /*
844 * not supported here
845 */
setup_profiling_timer(unsigned int multiplier)846 int setup_profiling_timer(unsigned int multiplier)
847 {
848 return -EINVAL;
849 }
850