• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SMP Support
3  *
4  * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5  * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * Lots of stuff stolen from arch/alpha/kernel/smp.c
8  *
9  * 01/05/16 Rohit Seth <rohit.seth@intel.com>  IA64-SMP functions. Reorganized
10  * the existing code (on the lines of x86 port).
11  * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy
12  * calibration on each CPU.
13  * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id
14  * 00/03/31 Rohit Seth <rohit.seth@intel.com>	Fixes for Bootstrap Processor
15  * & cpu_online_map now gets done here (instead of setup.c)
16  * 99/10/05 davidm	Update to bring it in sync with new command-line processing
17  *  scheme.
18  * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and
19  *		smp_call_function_single to resend IPI on timeouts
20  */
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/smp.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/mm.h>
29 #include <linux/cache.h>
30 #include <linux/delay.h>
31 #include <linux/efi.h>
32 #include <linux/bitops.h>
33 #include <linux/kexec.h>
34 
35 #include <asm/atomic.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
38 #include <asm/machvec.h>
39 #include <asm/io.h>
40 #include <asm/irq.h>
41 #include <asm/page.h>
42 #include <asm/pgalloc.h>
43 #include <asm/pgtable.h>
44 #include <asm/processor.h>
45 #include <asm/ptrace.h>
46 #include <asm/sal.h>
47 #include <asm/system.h>
48 #include <asm/tlbflush.h>
49 #include <asm/unistd.h>
50 #include <asm/mca.h>
51 
52 /*
53  * Note: alignment of 4 entries/cacheline was empirically determined
54  * to be a good tradeoff between hot cachelines & spreading the array
55  * across too many cacheline.
56  */
57 static struct local_tlb_flush_counts {
58 	unsigned int count;
59 } __attribute__((__aligned__(32))) local_tlb_flush_counts[NR_CPUS];
60 
61 static DEFINE_PER_CPU(unsigned short, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned;
62 
63 #define IPI_CALL_FUNC		0
64 #define IPI_CPU_STOP		1
65 #define IPI_CALL_FUNC_SINGLE	2
66 #define IPI_KDUMP_CPU_STOP	3
67 
68 /* This needs to be cacheline aligned because it is written to by *other* CPUs.  */
69 static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation);
70 
71 extern void cpu_halt (void);
72 
73 static void
stop_this_cpu(void)74 stop_this_cpu(void)
75 {
76 	/*
77 	 * Remove this CPU:
78 	 */
79 	cpu_clear(smp_processor_id(), cpu_online_map);
80 	max_xtp();
81 	local_irq_disable();
82 	cpu_halt();
83 }
84 
85 void
cpu_die(void)86 cpu_die(void)
87 {
88 	max_xtp();
89 	local_irq_disable();
90 	cpu_halt();
91 	/* Should never be here */
92 	BUG();
93 	for (;;);
94 }
95 
96 irqreturn_t
handle_IPI(int irq,void * dev_id)97 handle_IPI (int irq, void *dev_id)
98 {
99 	int this_cpu = get_cpu();
100 	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
101 	unsigned long ops;
102 
103 	mb();	/* Order interrupt and bit testing. */
104 	while ((ops = xchg(pending_ipis, 0)) != 0) {
105 		mb();	/* Order bit clearing and data access. */
106 		do {
107 			unsigned long which;
108 
109 			which = ffz(~ops);
110 			ops &= ~(1 << which);
111 
112 			switch (which) {
113 			case IPI_CPU_STOP:
114 				stop_this_cpu();
115 				break;
116 			case IPI_CALL_FUNC:
117 				generic_smp_call_function_interrupt();
118 				break;
119 			case IPI_CALL_FUNC_SINGLE:
120 				generic_smp_call_function_single_interrupt();
121 				break;
122 #ifdef CONFIG_KEXEC
123 			case IPI_KDUMP_CPU_STOP:
124 				unw_init_running(kdump_cpu_freeze, NULL);
125 				break;
126 #endif
127 			default:
128 				printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
129 						this_cpu, which);
130 				break;
131 			}
132 		} while (ops);
133 		mb();	/* Order data access and bit testing. */
134 	}
135 	put_cpu();
136 	return IRQ_HANDLED;
137 }
138 
139 
140 
141 /*
142  * Called with preemption disabled.
143  */
144 static inline void
send_IPI_single(int dest_cpu,int op)145 send_IPI_single (int dest_cpu, int op)
146 {
147 	set_bit(op, &per_cpu(ipi_operation, dest_cpu));
148 	platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
149 }
150 
151 /*
152  * Called with preemption disabled.
153  */
154 static inline void
send_IPI_allbutself(int op)155 send_IPI_allbutself (int op)
156 {
157 	unsigned int i;
158 
159 	for_each_online_cpu(i) {
160 		if (i != smp_processor_id())
161 			send_IPI_single(i, op);
162 	}
163 }
164 
165 /*
166  * Called with preemption disabled.
167  */
168 static inline void
send_IPI_mask(cpumask_t mask,int op)169 send_IPI_mask(cpumask_t mask, int op)
170 {
171 	unsigned int cpu;
172 
173 	for_each_cpu_mask(cpu, mask) {
174 			send_IPI_single(cpu, op);
175 	}
176 }
177 
178 /*
179  * Called with preemption disabled.
180  */
181 static inline void
send_IPI_all(int op)182 send_IPI_all (int op)
183 {
184 	int i;
185 
186 	for_each_online_cpu(i) {
187 		send_IPI_single(i, op);
188 	}
189 }
190 
191 /*
192  * Called with preemption disabled.
193  */
194 static inline void
send_IPI_self(int op)195 send_IPI_self (int op)
196 {
197 	send_IPI_single(smp_processor_id(), op);
198 }
199 
200 #ifdef CONFIG_KEXEC
201 void
kdump_smp_send_stop(void)202 kdump_smp_send_stop(void)
203 {
204  	send_IPI_allbutself(IPI_KDUMP_CPU_STOP);
205 }
206 
207 void
kdump_smp_send_init(void)208 kdump_smp_send_init(void)
209 {
210 	unsigned int cpu, self_cpu;
211 	self_cpu = smp_processor_id();
212 	for_each_online_cpu(cpu) {
213 		if (cpu != self_cpu) {
214 			if(kdump_status[cpu] == 0)
215 				platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
216 		}
217 	}
218 }
219 #endif
220 /*
221  * Called with preemption disabled.
222  */
223 void
smp_send_reschedule(int cpu)224 smp_send_reschedule (int cpu)
225 {
226 	platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
227 }
228 
229 /*
230  * Called with preemption disabled.
231  */
232 static void
smp_send_local_flush_tlb(int cpu)233 smp_send_local_flush_tlb (int cpu)
234 {
235 	platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
236 }
237 
238 void
smp_local_flush_tlb(void)239 smp_local_flush_tlb(void)
240 {
241 	/*
242 	 * Use atomic ops. Otherwise, the load/increment/store sequence from
243 	 * a "++" operation can have the line stolen between the load & store.
244 	 * The overhead of the atomic op in negligible in this case & offers
245 	 * significant benefit for the brief periods where lots of cpus
246 	 * are simultaneously flushing TLBs.
247 	 */
248 	ia64_fetchadd(1, &local_tlb_flush_counts[smp_processor_id()].count, acq);
249 	local_flush_tlb_all();
250 }
251 
252 #define FLUSH_DELAY	5 /* Usec backoff to eliminate excessive cacheline bouncing */
253 
254 void
smp_flush_tlb_cpumask(cpumask_t xcpumask)255 smp_flush_tlb_cpumask(cpumask_t xcpumask)
256 {
257 	unsigned short *counts = __ia64_per_cpu_var(shadow_flush_counts);
258 	cpumask_t cpumask = xcpumask;
259 	int mycpu, cpu, flush_mycpu = 0;
260 
261 	preempt_disable();
262 	mycpu = smp_processor_id();
263 
264 	for_each_cpu_mask(cpu, cpumask)
265 		counts[cpu] = local_tlb_flush_counts[cpu].count & 0xffff;
266 
267 	mb();
268 	for_each_cpu_mask(cpu, cpumask) {
269 		if (cpu == mycpu)
270 			flush_mycpu = 1;
271 		else
272 			smp_send_local_flush_tlb(cpu);
273 	}
274 
275 	if (flush_mycpu)
276 		smp_local_flush_tlb();
277 
278 	for_each_cpu_mask(cpu, cpumask)
279 		while(counts[cpu] == (local_tlb_flush_counts[cpu].count & 0xffff))
280 			udelay(FLUSH_DELAY);
281 
282 	preempt_enable();
283 }
284 
285 void
smp_flush_tlb_all(void)286 smp_flush_tlb_all (void)
287 {
288 	on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1);
289 }
290 
291 void
smp_flush_tlb_mm(struct mm_struct * mm)292 smp_flush_tlb_mm (struct mm_struct *mm)
293 {
294 	preempt_disable();
295 	/* this happens for the common case of a single-threaded fork():  */
296 	if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
297 	{
298 		local_finish_flush_tlb_mm(mm);
299 		preempt_enable();
300 		return;
301 	}
302 
303 	preempt_enable();
304 	/*
305 	 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
306 	 * have been running in the address space.  It's not clear that this is worth the
307 	 * trouble though: to avoid races, we have to raise the IPI on the target CPU
308 	 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
309 	 * rather trivial.
310 	 */
311 	on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
312 }
313 
arch_send_call_function_single_ipi(int cpu)314 void arch_send_call_function_single_ipi(int cpu)
315 {
316 	send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
317 }
318 
arch_send_call_function_ipi(cpumask_t mask)319 void arch_send_call_function_ipi(cpumask_t mask)
320 {
321 	send_IPI_mask(mask, IPI_CALL_FUNC);
322 }
323 
324 /*
325  * this function calls the 'stop' function on all other CPUs in the system.
326  */
327 void
smp_send_stop(void)328 smp_send_stop (void)
329 {
330 	send_IPI_allbutself(IPI_CPU_STOP);
331 }
332 
333 int
setup_profiling_timer(unsigned int multiplier)334 setup_profiling_timer (unsigned int multiplier)
335 {
336 	return -EINVAL;
337 }
338