1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 *
5 * RajeshwarR: Dec 11, 2007
6 * -- Added support for Inter Processor Interrupts
7 *
8 * Vineetg: Nov 1st, 2007
9 * -- Initial Write (Borrowed heavily from ARM)
10 */
11
12 #include <linux/spinlock.h>
13 #include <linux/sched/mm.h>
14 #include <linux/interrupt.h>
15 #include <linux/profile.h>
16 #include <linux/mm.h>
17 #include <linux/cpu.h>
18 #include <linux/irq.h>
19 #include <linux/atomic.h>
20 #include <linux/cpumask.h>
21 #include <linux/reboot.h>
22 #include <linux/irqdomain.h>
23 #include <linux/export.h>
24 #include <linux/of_fdt.h>
25
26 #include <asm/processor.h>
27 #include <asm/setup.h>
28 #include <asm/mach_desc.h>
29
30 #ifndef CONFIG_ARC_HAS_LLSC
31 arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
32 arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
33
34 EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
35 EXPORT_SYMBOL_GPL(smp_bitops_lock);
36 #endif
37
38 struct plat_smp_ops __weak plat_smp_ops;
39
40 /* XXX: per cpu ? Only needed once in early seconday boot */
41 struct task_struct *secondary_idle_tsk;
42
43 /* Called from start_kernel */
smp_prepare_boot_cpu(void)44 void __init smp_prepare_boot_cpu(void)
45 {
46 }
47
arc_get_cpu_map(const char * name,struct cpumask * cpumask)48 static int __init arc_get_cpu_map(const char *name, struct cpumask *cpumask)
49 {
50 unsigned long dt_root = of_get_flat_dt_root();
51 const char *buf;
52
53 buf = of_get_flat_dt_prop(dt_root, name, NULL);
54 if (!buf)
55 return -EINVAL;
56
57 if (cpulist_parse(buf, cpumask))
58 return -EINVAL;
59
60 return 0;
61 }
62
63 /*
64 * Read from DeviceTree and setup cpu possible mask. If there is no
65 * "possible-cpus" property in DeviceTree pretend all [0..NR_CPUS-1] exist.
66 */
arc_init_cpu_possible(void)67 static void __init arc_init_cpu_possible(void)
68 {
69 struct cpumask cpumask;
70
71 if (arc_get_cpu_map("possible-cpus", &cpumask)) {
72 pr_warn("Failed to get possible-cpus from dtb, pretending all %u cpus exist\n",
73 NR_CPUS);
74
75 cpumask_setall(&cpumask);
76 }
77
78 if (!cpumask_test_cpu(0, &cpumask))
79 panic("Master cpu (cpu[0]) is missed in cpu possible mask!");
80
81 init_cpu_possible(&cpumask);
82 }
83
84 /*
85 * Called from setup_arch() before calling setup_processor()
86 *
87 * - Initialise the CPU possible map early - this describes the CPUs
88 * which may be present or become present in the system.
89 * - Call early smp init hook. This can initialize a specific multi-core
90 * IP which is say common to several platforms (hence not part of
91 * platform specific int_early() hook)
92 */
smp_init_cpus(void)93 void __init smp_init_cpus(void)
94 {
95 arc_init_cpu_possible();
96
97 if (plat_smp_ops.init_early_smp)
98 plat_smp_ops.init_early_smp();
99 }
100
101 /* called from init ( ) => process 1 */
smp_prepare_cpus(unsigned int max_cpus)102 void __init smp_prepare_cpus(unsigned int max_cpus)
103 {
104 /*
105 * if platform didn't set the present map already, do it now
106 * boot cpu is set to present already by init/main.c
107 */
108 if (num_present_cpus() <= 1)
109 init_cpu_present(cpu_possible_mask);
110 }
111
smp_cpus_done(unsigned int max_cpus)112 void __init smp_cpus_done(unsigned int max_cpus)
113 {
114
115 }
116
117 /*
118 * Default smp boot helper for Run-on-reset case where all cores start off
119 * together. Non-masters need to wait for Master to start running.
120 * This is implemented using a flag in memory, which Non-masters spin-wait on.
121 * Master sets it to cpu-id of core to "ungate" it.
122 */
123 static volatile int wake_flag;
124
125 #ifdef CONFIG_ISA_ARCOMPACT
126
127 #define __boot_read(f) f
128 #define __boot_write(f, v) f = v
129
130 #else
131
132 #define __boot_read(f) arc_read_uncached_32(&f)
133 #define __boot_write(f, v) arc_write_uncached_32(&f, v)
134
135 #endif
136
arc_default_smp_cpu_kick(int cpu,unsigned long pc)137 static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
138 {
139 BUG_ON(cpu == 0);
140
141 __boot_write(wake_flag, cpu);
142 }
143
arc_platform_smp_wait_to_boot(int cpu)144 void arc_platform_smp_wait_to_boot(int cpu)
145 {
146 /* for halt-on-reset, we've waited already */
147 if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
148 return;
149
150 while (__boot_read(wake_flag) != cpu)
151 ;
152
153 __boot_write(wake_flag, 0);
154 }
155
arc_platform_smp_cpuinfo(void)156 const char *arc_platform_smp_cpuinfo(void)
157 {
158 return plat_smp_ops.info ? : "";
159 }
160
161 /*
162 * The very first "C" code executed by secondary
163 * Called from asm stub in head.S
164 * "current"/R25 already setup by low level boot code
165 */
start_kernel_secondary(void)166 void start_kernel_secondary(void)
167 {
168 struct mm_struct *mm = &init_mm;
169 unsigned int cpu = smp_processor_id();
170
171 /* MMU, Caches, Vector Table, Interrupts etc */
172 setup_processor();
173
174 mmget(mm);
175 mmgrab(mm);
176 current->active_mm = mm;
177 cpumask_set_cpu(cpu, mm_cpumask(mm));
178
179 /* Some SMP H/w setup - for each cpu */
180 if (plat_smp_ops.init_per_cpu)
181 plat_smp_ops.init_per_cpu(cpu);
182
183 if (machine_desc->init_per_cpu)
184 machine_desc->init_per_cpu(cpu);
185
186 notify_cpu_starting(cpu);
187 set_cpu_online(cpu, true);
188
189 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
190
191 local_irq_enable();
192 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
193 }
194
195 /*
196 * Called from kernel_init( ) -> smp_init( ) - for each CPU
197 *
198 * At this point, Secondary Processor is "HALT"ed:
199 * -It booted, but was halted in head.S
200 * -It was configured to halt-on-reset
201 * So need to wake it up.
202 *
203 * Essential requirements being where to run from (PC) and stack (SP)
204 */
__cpu_up(unsigned int cpu,struct task_struct * idle)205 int __cpu_up(unsigned int cpu, struct task_struct *idle)
206 {
207 unsigned long wait_till;
208
209 secondary_idle_tsk = idle;
210
211 pr_info("Idle Task [%d] %p", cpu, idle);
212 pr_info("Trying to bring up CPU%u ...\n", cpu);
213
214 if (plat_smp_ops.cpu_kick)
215 plat_smp_ops.cpu_kick(cpu,
216 (unsigned long)first_lines_of_secondary);
217 else
218 arc_default_smp_cpu_kick(cpu, (unsigned long)NULL);
219
220 /* wait for 1 sec after kicking the secondary */
221 wait_till = jiffies + HZ;
222 while (time_before(jiffies, wait_till)) {
223 if (cpu_online(cpu))
224 break;
225 }
226
227 if (!cpu_online(cpu)) {
228 pr_info("Timeout: CPU%u FAILED to come up !!!\n", cpu);
229 return -1;
230 }
231
232 secondary_idle_tsk = NULL;
233
234 return 0;
235 }
236
237 /*
238 * not supported here
239 */
setup_profiling_timer(unsigned int multiplier)240 int setup_profiling_timer(unsigned int multiplier)
241 {
242 return -EINVAL;
243 }
244
245 /*****************************************************************************/
246 /* Inter Processor Interrupt Handling */
247 /*****************************************************************************/
248
249 enum ipi_msg_type {
250 IPI_EMPTY = 0,
251 IPI_RESCHEDULE = 1,
252 IPI_CALL_FUNC,
253 IPI_CPU_STOP,
254 };
255
256 /*
257 * In arches with IRQ for each msg type (above), receiver can use IRQ-id to
258 * figure out what msg was sent. For those which don't (ARC has dedicated IPI
259 * IRQ), the msg-type needs to be conveyed via per-cpu data
260 */
261
262 static DEFINE_PER_CPU(unsigned long, ipi_data);
263
ipi_send_msg_one(int cpu,enum ipi_msg_type msg)264 static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
265 {
266 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu);
267 unsigned long old, new;
268 unsigned long flags;
269
270 pr_debug("%d Sending msg [%d] to %d\n", smp_processor_id(), msg, cpu);
271
272 local_irq_save(flags);
273
274 /*
275 * Atomically write new msg bit (in case others are writing too),
276 * and read back old value
277 */
278 do {
279 new = old = READ_ONCE(*ipi_data_ptr);
280 new |= 1U << msg;
281 } while (cmpxchg(ipi_data_ptr, old, new) != old);
282
283 /*
284 * Call the platform specific IPI kick function, but avoid if possible:
285 * Only do so if there's no pending msg from other concurrent sender(s).
286 * Otherwise, recevier will see this msg as well when it takes the
287 * IPI corresponding to that msg. This is true, even if it is already in
288 * IPI handler, because !@old means it has not yet dequeued the msg(s)
289 * so @new msg can be a free-loader
290 */
291 if (plat_smp_ops.ipi_send && !old)
292 plat_smp_ops.ipi_send(cpu);
293
294 local_irq_restore(flags);
295 }
296
ipi_send_msg(const struct cpumask * callmap,enum ipi_msg_type msg)297 static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
298 {
299 unsigned int cpu;
300
301 for_each_cpu(cpu, callmap)
302 ipi_send_msg_one(cpu, msg);
303 }
304
smp_send_reschedule(int cpu)305 void smp_send_reschedule(int cpu)
306 {
307 ipi_send_msg_one(cpu, IPI_RESCHEDULE);
308 }
309
smp_send_stop(void)310 void smp_send_stop(void)
311 {
312 struct cpumask targets;
313 cpumask_copy(&targets, cpu_online_mask);
314 cpumask_clear_cpu(smp_processor_id(), &targets);
315 ipi_send_msg(&targets, IPI_CPU_STOP);
316 }
317
arch_send_call_function_single_ipi(int cpu)318 void arch_send_call_function_single_ipi(int cpu)
319 {
320 ipi_send_msg_one(cpu, IPI_CALL_FUNC);
321 }
322
arch_send_call_function_ipi_mask(const struct cpumask * mask)323 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
324 {
325 ipi_send_msg(mask, IPI_CALL_FUNC);
326 }
327
328 /*
329 * ipi_cpu_stop - handle IPI from smp_send_stop()
330 */
ipi_cpu_stop(void)331 static void ipi_cpu_stop(void)
332 {
333 machine_halt();
334 }
335
__do_IPI(unsigned long msg)336 static inline int __do_IPI(unsigned long msg)
337 {
338 int rc = 0;
339
340 switch (msg) {
341 case IPI_RESCHEDULE:
342 scheduler_ipi();
343 break;
344
345 case IPI_CALL_FUNC:
346 generic_smp_call_function_interrupt();
347 break;
348
349 case IPI_CPU_STOP:
350 ipi_cpu_stop();
351 break;
352
353 default:
354 rc = 1;
355 }
356
357 return rc;
358 }
359
360 /*
361 * arch-common ISR to handle for inter-processor interrupts
362 * Has hooks for platform specific IPI
363 */
do_IPI(int irq,void * dev_id)364 irqreturn_t do_IPI(int irq, void *dev_id)
365 {
366 unsigned long pending;
367 unsigned long __maybe_unused copy;
368
369 pr_debug("IPI [%ld] received on cpu %d\n",
370 *this_cpu_ptr(&ipi_data), smp_processor_id());
371
372 if (plat_smp_ops.ipi_clear)
373 plat_smp_ops.ipi_clear(irq);
374
375 /*
376 * "dequeue" the msg corresponding to this IPI (and possibly other
377 * piggybacked msg from elided IPIs: see ipi_send_msg_one() above)
378 */
379 copy = pending = xchg(this_cpu_ptr(&ipi_data), 0);
380
381 do {
382 unsigned long msg = __ffs(pending);
383 int rc;
384
385 rc = __do_IPI(msg);
386 if (rc)
387 pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
388 pending &= ~(1U << msg);
389 } while (pending);
390
391 return IRQ_HANDLED;
392 }
393
394 /*
395 * API called by platform code to hookup arch-common ISR to their IPI IRQ
396 *
397 * Note: If IPI is provided by platform (vs. say ARC MCIP), their intc setup/map
398 * function needs to call call irq_set_percpu_devid() for IPI IRQ, otherwise
399 * request_percpu_irq() below will fail
400 */
401 static DEFINE_PER_CPU(int, ipi_dev);
402
smp_ipi_irq_setup(int cpu,irq_hw_number_t hwirq)403 int smp_ipi_irq_setup(int cpu, irq_hw_number_t hwirq)
404 {
405 int *dev = per_cpu_ptr(&ipi_dev, cpu);
406 unsigned int virq = irq_find_mapping(NULL, hwirq);
407
408 if (!virq)
409 panic("Cannot find virq for root domain and hwirq=%lu", hwirq);
410
411 /* Boot cpu calls request, all call enable */
412 if (!cpu) {
413 int rc;
414
415 rc = request_percpu_irq(virq, do_IPI, "IPI Interrupt", dev);
416 if (rc)
417 panic("Percpu IRQ request failed for %u\n", virq);
418 }
419
420 enable_percpu_irq(virq, 0);
421
422 return 0;
423 }
424