• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 ** SMP Support
4 **
5 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
6 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
7 ** Copyright (C) 2001,2004 Grant Grundler <grundler@parisc-linux.org>
8 **
9 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
10 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11 **
12 ** Thanks to John Curry and Ullas Ponnadi. I learned a lot from their work.
13 ** -grant (1/12/2001)
14 **
15 */
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/sched/mm.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/mm.h>
27 #include <linux/err.h>
28 #include <linux/delay.h>
29 #include <linux/bitops.h>
30 #include <linux/ftrace.h>
31 #include <linux/cpu.h>
32 #include <linux/kgdb.h>
33 
34 #include <linux/atomic.h>
35 #include <asm/current.h>
36 #include <asm/delay.h>
37 #include <asm/tlbflush.h>
38 
39 #include <asm/io.h>
40 #include <asm/irq.h>		/* for CPU_IRQ_REGION and friends */
41 #include <asm/mmu_context.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/processor.h>
46 #include <asm/ptrace.h>
47 #include <asm/unistd.h>
48 #include <asm/cacheflush.h>
49 
50 #undef DEBUG_SMP
51 #ifdef DEBUG_SMP
52 static int smp_debug_lvl = 0;
53 #define smp_debug(lvl, printargs...)		\
54 		if (lvl >= smp_debug_lvl)	\
55 			printk(printargs);
56 #else
57 #define smp_debug(lvl, ...)	do { } while(0)
58 #endif /* DEBUG_SMP */
59 
60 volatile struct task_struct *smp_init_current_idle_task;
61 
62 /* track which CPU is booting */
63 static volatile int cpu_now_booting;
64 
65 static int parisc_max_cpus = 1;
66 
67 static DEFINE_PER_CPU(spinlock_t, ipi_lock);
68 
69 enum ipi_message_type {
70 	IPI_NOP=0,
71 	IPI_RESCHEDULE=1,
72 	IPI_CALL_FUNC,
73 	IPI_CPU_START,
74 	IPI_CPU_STOP,
75 	IPI_CPU_TEST,
76 #ifdef CONFIG_KGDB
77 	IPI_ENTER_KGDB,
78 #endif
79 };
80 
81 
82 /********** SMP inter processor interrupt and communication routines */
83 
84 #undef PER_CPU_IRQ_REGION
85 #ifdef PER_CPU_IRQ_REGION
86 /* XXX REVISIT Ignore for now.
87 **    *May* need this "hook" to register IPI handler
88 **    once we have perCPU ExtIntr switch tables.
89 */
90 static void
ipi_init(int cpuid)91 ipi_init(int cpuid)
92 {
93 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
94 
95 	if(cpu_online(cpuid) )
96 	{
97 		switch_to_idle_task(current);
98 	}
99 
100 	return;
101 }
102 #endif
103 
104 
105 /*
106 ** Yoink this CPU from the runnable list...
107 **
108 */
109 static void
halt_processor(void)110 halt_processor(void)
111 {
112 	/* REVISIT : redirect I/O Interrupts to another CPU? */
113 	/* REVISIT : does PM *know* this CPU isn't available? */
114 	set_cpu_online(smp_processor_id(), false);
115 	local_irq_disable();
116 	__pdc_cpu_rendezvous();
117 	for (;;)
118 		;
119 }
120 
121 
122 irqreturn_t __irq_entry
ipi_interrupt(int irq,void * dev_id)123 ipi_interrupt(int irq, void *dev_id)
124 {
125 	int this_cpu = smp_processor_id();
126 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu);
127 	unsigned long ops;
128 	unsigned long flags;
129 
130 	for (;;) {
131 		spinlock_t *lock = &per_cpu(ipi_lock, this_cpu);
132 		spin_lock_irqsave(lock, flags);
133 		ops = p->pending_ipi;
134 		p->pending_ipi = 0;
135 		spin_unlock_irqrestore(lock, flags);
136 
137 		mb(); /* Order bit clearing and data access. */
138 
139 		if (!ops)
140 		    break;
141 
142 		while (ops) {
143 			unsigned long which = ffz(~ops);
144 
145 			ops &= ~(1 << which);
146 
147 			switch (which) {
148 			case IPI_NOP:
149 				smp_debug(100, KERN_DEBUG "CPU%d IPI_NOP\n", this_cpu);
150 				break;
151 
152 			case IPI_RESCHEDULE:
153 				smp_debug(100, KERN_DEBUG "CPU%d IPI_RESCHEDULE\n", this_cpu);
154 				inc_irq_stat(irq_resched_count);
155 				scheduler_ipi();
156 				break;
157 
158 			case IPI_CALL_FUNC:
159 				smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu);
160 				inc_irq_stat(irq_call_count);
161 				generic_smp_call_function_interrupt();
162 				break;
163 
164 			case IPI_CPU_START:
165 				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
166 				break;
167 
168 			case IPI_CPU_STOP:
169 				smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_STOP\n", this_cpu);
170 				halt_processor();
171 				break;
172 
173 			case IPI_CPU_TEST:
174 				smp_debug(100, KERN_DEBUG "CPU%d is alive!\n", this_cpu);
175 				break;
176 #ifdef CONFIG_KGDB
177 			case IPI_ENTER_KGDB:
178 				smp_debug(100, KERN_DEBUG "CPU%d ENTER_KGDB\n", this_cpu);
179 				kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
180 				break;
181 #endif
182 			default:
183 				printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
184 					this_cpu, which);
185 				return IRQ_NONE;
186 			} /* Switch */
187 		/* let in any pending interrupts */
188 		local_irq_enable();
189 		local_irq_disable();
190 		} /* while (ops) */
191 	}
192 	return IRQ_HANDLED;
193 }
194 
195 
196 static inline void
ipi_send(int cpu,enum ipi_message_type op)197 ipi_send(int cpu, enum ipi_message_type op)
198 {
199 	struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu);
200 	spinlock_t *lock = &per_cpu(ipi_lock, cpu);
201 	unsigned long flags;
202 
203 	spin_lock_irqsave(lock, flags);
204 	p->pending_ipi |= 1 << op;
205 	gsc_writel(IPI_IRQ - CPU_IRQ_BASE, p->hpa);
206 	spin_unlock_irqrestore(lock, flags);
207 }
208 
209 static void
send_IPI_mask(const struct cpumask * mask,enum ipi_message_type op)210 send_IPI_mask(const struct cpumask *mask, enum ipi_message_type op)
211 {
212 	int cpu;
213 
214 	for_each_cpu(cpu, mask)
215 		ipi_send(cpu, op);
216 }
217 
218 static inline void
send_IPI_single(int dest_cpu,enum ipi_message_type op)219 send_IPI_single(int dest_cpu, enum ipi_message_type op)
220 {
221 	BUG_ON(dest_cpu == NO_PROC_ID);
222 
223 	ipi_send(dest_cpu, op);
224 }
225 
226 static inline void
send_IPI_allbutself(enum ipi_message_type op)227 send_IPI_allbutself(enum ipi_message_type op)
228 {
229 	int i;
230 
231 	for_each_online_cpu(i) {
232 		if (i != smp_processor_id())
233 			send_IPI_single(i, op);
234 	}
235 }
236 
237 #ifdef CONFIG_KGDB
kgdb_roundup_cpus(void)238 void kgdb_roundup_cpus(void)
239 {
240 	send_IPI_allbutself(IPI_ENTER_KGDB);
241 }
242 #endif
243 
244 inline void
smp_send_stop(void)245 smp_send_stop(void)	{ send_IPI_allbutself(IPI_CPU_STOP); }
246 
247 void
smp_send_reschedule(int cpu)248 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
249 
250 void
smp_send_all_nop(void)251 smp_send_all_nop(void)
252 {
253 	send_IPI_allbutself(IPI_NOP);
254 }
255 
arch_send_call_function_ipi_mask(const struct cpumask * mask)256 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
257 {
258 	send_IPI_mask(mask, IPI_CALL_FUNC);
259 }
260 
arch_send_call_function_single_ipi(int cpu)261 void arch_send_call_function_single_ipi(int cpu)
262 {
263 	send_IPI_single(cpu, IPI_CALL_FUNC);
264 }
265 
266 /*
267  * Called by secondaries to update state and initialize CPU registers.
268  */
269 static void __init
smp_cpu_init(int cpunum)270 smp_cpu_init(int cpunum)
271 {
272 	extern void init_IRQ(void);    /* arch/parisc/kernel/irq.c */
273 	extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */
274 
275 	/* Set modes and Enable floating point coprocessor */
276 	init_per_cpu(cpunum);
277 
278 	disable_sr_hashing();
279 
280 	mb();
281 
282 	/* Well, support 2.4 linux scheme as well. */
283 	if (cpu_online(cpunum))	{
284 		extern void machine_halt(void); /* arch/parisc.../process.c */
285 
286 		printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
287 		machine_halt();
288 	}
289 
290 	notify_cpu_starting(cpunum);
291 
292 	set_cpu_online(cpunum, true);
293 
294 	/* Initialise the idle task for this CPU */
295 	mmgrab(&init_mm);
296 	current->active_mm = &init_mm;
297 	BUG_ON(current->mm);
298 	enter_lazy_tlb(&init_mm, current);
299 
300 	init_IRQ();   /* make sure no IRQs are enabled or pending */
301 	start_cpu_itimer();
302 }
303 
304 
305 /*
306  * Slaves start using C here. Indirectly called from smp_slave_stext.
307  * Do what start_kernel() and main() do for boot strap processor (aka monarch)
308  */
smp_callin(unsigned long pdce_proc)309 void __init smp_callin(unsigned long pdce_proc)
310 {
311 	int slave_id = cpu_now_booting;
312 
313 #ifdef CONFIG_64BIT
314 	WARN_ON(((unsigned long)(PAGE0->mem_pdc_hi) << 32
315 			| PAGE0->mem_pdc) != pdce_proc);
316 #endif
317 
318 	smp_cpu_init(slave_id);
319 	preempt_disable();
320 
321 	flush_cache_all_local(); /* start with known state */
322 	flush_tlb_all_local(NULL);
323 
324 	local_irq_enable();  /* Interrupts have been off until now */
325 
326 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
327 
328 	/* NOTREACHED */
329 	panic("smp_callin() AAAAaaaaahhhh....\n");
330 }
331 
332 /*
333  * Bring one cpu online.
334  */
smp_boot_one_cpu(int cpuid,struct task_struct * idle)335 int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
336 {
337 	const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
338 	long timeout;
339 
340 	task_thread_info(idle)->cpu = cpuid;
341 
342 	/* Let _start know what logical CPU we're booting
343 	** (offset into init_tasks[],cpu_data[])
344 	*/
345 	cpu_now_booting = cpuid;
346 
347 	/*
348 	** boot strap code needs to know the task address since
349 	** it also contains the process stack.
350 	*/
351 	smp_init_current_idle_task = idle ;
352 	mb();
353 
354 	printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa);
355 
356 	/*
357 	** This gets PDC to release the CPU from a very tight loop.
358 	**
359 	** From the PA-RISC 2.0 Firmware Architecture Reference Specification:
360 	** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which
361 	** is executed after receiving the rendezvous signal (an interrupt to
362 	** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the
363 	** contents of memory are valid."
364 	*/
365 	gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa);
366 	mb();
367 
368 	/*
369 	 * OK, wait a bit for that CPU to finish staggering about.
370 	 * Slave will set a bit when it reaches smp_cpu_init().
371 	 * Once the "monarch CPU" sees the bit change, it can move on.
372 	 */
373 	for (timeout = 0; timeout < 10000; timeout++) {
374 		if(cpu_online(cpuid)) {
375 			/* Which implies Slave has started up */
376 			cpu_now_booting = 0;
377 			smp_init_current_idle_task = NULL;
378 			goto alive ;
379 		}
380 		udelay(100);
381 		barrier();
382 	}
383 	printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
384 	return -1;
385 
386 alive:
387 	/* Remember the Slave data */
388 	smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n",
389 		cpuid, timeout * 100);
390 	return 0;
391 }
392 
smp_prepare_boot_cpu(void)393 void __init smp_prepare_boot_cpu(void)
394 {
395 	int bootstrap_processor = per_cpu(cpu_data, 0).cpuid;
396 
397 	/* Setup BSP mappings */
398 	printk(KERN_INFO "SMP: bootstrap CPU ID is %d\n", bootstrap_processor);
399 
400 	set_cpu_online(bootstrap_processor, true);
401 	set_cpu_present(bootstrap_processor, true);
402 }
403 
404 
405 
406 /*
407 ** inventory.c:do_inventory() hasn't yet been run and thus we
408 ** don't 'discover' the additional CPUs until later.
409 */
smp_prepare_cpus(unsigned int max_cpus)410 void __init smp_prepare_cpus(unsigned int max_cpus)
411 {
412 	int cpu;
413 
414 	for_each_possible_cpu(cpu)
415 		spin_lock_init(&per_cpu(ipi_lock, cpu));
416 
417 	init_cpu_present(cpumask_of(0));
418 
419 	parisc_max_cpus = max_cpus;
420 	if (!max_cpus)
421 		printk(KERN_INFO "SMP mode deactivated.\n");
422 }
423 
424 
smp_cpus_done(unsigned int cpu_max)425 void smp_cpus_done(unsigned int cpu_max)
426 {
427 	return;
428 }
429 
430 
__cpu_up(unsigned int cpu,struct task_struct * tidle)431 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
432 {
433 	if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
434 		return -ENOSYS;
435 
436 	return cpu_online(cpu) ? 0 : -ENOSYS;
437 }
438 
439 #ifdef CONFIG_PROC_FS
setup_profiling_timer(unsigned int multiplier)440 int setup_profiling_timer(unsigned int multiplier)
441 {
442 	return -EINVAL;
443 }
444 #endif
445