1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/spinlock.h>
26 #include <linux/threads.h>
27 #include <linux/module.h>
28 #include <linux/time.h>
29 #include <linux/timex.h>
30 #include <linux/sched.h>
31 #include <linux/cpumask.h>
32 #include <linux/cpu.h>
33 #include <linux/err.h>
34
35 #include <asm/atomic.h>
36 #include <asm/cpu.h>
37 #include <asm/processor.h>
38 #include <asm/r4k-timer.h>
39 #include <asm/system.h>
40 #include <asm/mmu_context.h>
41 #include <asm/time.h>
42
43 #ifdef CONFIG_MIPS_MT_SMTC
44 #include <asm/mipsmtregs.h>
45 #endif /* CONFIG_MIPS_MT_SMTC */
46
47 volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
48 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
49 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
50
51 extern void cpu_idle(void);
52
53 /* Number of TCs (or siblings in Intel speak) per CPU core */
54 int smp_num_siblings = 1;
55 EXPORT_SYMBOL(smp_num_siblings);
56
57 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
58 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
59 EXPORT_SYMBOL(cpu_sibling_map);
60
61 /* representing cpus for which sibling maps can be computed */
62 static cpumask_t cpu_sibling_setup_map;
63
set_cpu_sibling_map(int cpu)64 static inline void set_cpu_sibling_map(int cpu)
65 {
66 int i;
67
68 cpu_set(cpu, cpu_sibling_setup_map);
69
70 if (smp_num_siblings > 1) {
71 for_each_cpu_mask(i, cpu_sibling_setup_map) {
72 if (cpu_data[cpu].core == cpu_data[i].core) {
73 cpu_set(i, cpu_sibling_map[cpu]);
74 cpu_set(cpu, cpu_sibling_map[i]);
75 }
76 }
77 } else
78 cpu_set(cpu, cpu_sibling_map[cpu]);
79 }
80
81 struct plat_smp_ops *mp_ops;
82
register_smp_ops(struct plat_smp_ops * ops)83 __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
84 {
85 if (mp_ops)
86 printk(KERN_WARNING "Overriding previously set SMP ops\n");
87
88 mp_ops = ops;
89 }
90
91 /*
92 * First C code run on the secondary CPUs after being started up by
93 * the master.
94 */
start_secondary(void)95 asmlinkage __cpuinit void start_secondary(void)
96 {
97 unsigned int cpu;
98
99 #ifdef CONFIG_MIPS_MT_SMTC
100 /* Only do cpu_probe for first TC of CPU */
101 if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
102 #endif /* CONFIG_MIPS_MT_SMTC */
103 cpu_probe();
104 cpu_report();
105 per_cpu_trap_init();
106 mips_clockevent_init();
107 mp_ops->init_secondary();
108
109 /*
110 * XXX parity protection should be folded in here when it's converted
111 * to an option instead of something based on .cputype
112 */
113
114 calibrate_delay();
115 preempt_disable();
116 cpu = smp_processor_id();
117 cpu_data[cpu].udelay_val = loops_per_jiffy;
118
119 notify_cpu_starting(cpu);
120
121 mp_ops->smp_finish();
122 set_cpu_sibling_map(cpu);
123
124 cpu_set(cpu, cpu_callin_map);
125
126 synchronise_count_slave();
127
128 cpu_idle();
129 }
130
arch_send_call_function_ipi(cpumask_t mask)131 void arch_send_call_function_ipi(cpumask_t mask)
132 {
133 mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
134 }
135
136 /*
137 * We reuse the same vector for the single IPI
138 */
arch_send_call_function_single_ipi(int cpu)139 void arch_send_call_function_single_ipi(int cpu)
140 {
141 mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
142 }
143
144 /*
145 * Call into both interrupt handlers, as we share the IPI for them
146 */
smp_call_function_interrupt(void)147 void smp_call_function_interrupt(void)
148 {
149 irq_enter();
150 generic_smp_call_function_single_interrupt();
151 generic_smp_call_function_interrupt();
152 irq_exit();
153 }
154
stop_this_cpu(void * dummy)155 static void stop_this_cpu(void *dummy)
156 {
157 /*
158 * Remove this CPU:
159 */
160 cpu_clear(smp_processor_id(), cpu_online_map);
161 for (;;) {
162 if (cpu_wait)
163 (*cpu_wait)(); /* Wait if available. */
164 }
165 }
166
smp_send_stop(void)167 void smp_send_stop(void)
168 {
169 smp_call_function(stop_this_cpu, NULL, 0);
170 }
171
smp_cpus_done(unsigned int max_cpus)172 void __init smp_cpus_done(unsigned int max_cpus)
173 {
174 mp_ops->cpus_done();
175 synchronise_count_master();
176 }
177
178 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)179 void __init smp_prepare_cpus(unsigned int max_cpus)
180 {
181 init_new_context(current, &init_mm);
182 current_thread_info()->cpu = 0;
183 mp_ops->prepare_cpus(max_cpus);
184 set_cpu_sibling_map(0);
185 #ifndef CONFIG_HOTPLUG_CPU
186 cpu_present_map = cpu_possible_map;
187 #endif
188 }
189
190 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)191 void __devinit smp_prepare_boot_cpu(void)
192 {
193 cpu_set(0, cpu_possible_map);
194 cpu_set(0, cpu_online_map);
195 cpu_set(0, cpu_callin_map);
196 }
197
198 /*
199 * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu
200 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
201 * physical, not logical.
202 */
__cpu_up(unsigned int cpu)203 int __cpuinit __cpu_up(unsigned int cpu)
204 {
205 struct task_struct *idle;
206
207 /*
208 * Processor goes to start_secondary(), sets online flag
209 * The following code is purely to make sure
210 * Linux can schedule processes on this slave.
211 */
212 idle = fork_idle(cpu);
213 if (IS_ERR(idle))
214 panic(KERN_ERR "Fork failed for CPU %d", cpu);
215
216 mp_ops->boot_secondary(cpu, idle);
217
218 /*
219 * Trust is futile. We should really have timeouts ...
220 */
221 while (!cpu_isset(cpu, cpu_callin_map))
222 udelay(100);
223
224 cpu_set(cpu, cpu_online_map);
225
226 return 0;
227 }
228
229 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)230 int setup_profiling_timer(unsigned int multiplier)
231 {
232 return 0;
233 }
234
flush_tlb_all_ipi(void * info)235 static void flush_tlb_all_ipi(void *info)
236 {
237 local_flush_tlb_all();
238 }
239
flush_tlb_all(void)240 void flush_tlb_all(void)
241 {
242 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
243 }
244
flush_tlb_mm_ipi(void * mm)245 static void flush_tlb_mm_ipi(void *mm)
246 {
247 local_flush_tlb_mm((struct mm_struct *)mm);
248 }
249
250 /*
251 * Special Variant of smp_call_function for use by TLB functions:
252 *
253 * o No return value
254 * o collapses to normal function call on UP kernels
255 * o collapses to normal function call on systems with a single shared
256 * primary cache.
257 * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
258 */
smp_on_other_tlbs(void (* func)(void * info),void * info)259 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
260 {
261 #ifndef CONFIG_MIPS_MT_SMTC
262 smp_call_function(func, info, 1);
263 #endif
264 }
265
smp_on_each_tlb(void (* func)(void * info),void * info)266 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
267 {
268 preempt_disable();
269
270 smp_on_other_tlbs(func, info);
271 func(info);
272
273 preempt_enable();
274 }
275
276 /*
277 * The following tlb flush calls are invoked when old translations are
278 * being torn down, or pte attributes are changing. For single threaded
279 * address spaces, a new context is obtained on the current cpu, and tlb
280 * context on other cpus are invalidated to force a new context allocation
281 * at switch_mm time, should the mm ever be used on other cpus. For
282 * multithreaded address spaces, intercpu interrupts have to be sent.
283 * Another case where intercpu interrupts are required is when the target
284 * mm might be active on another cpu (eg debuggers doing the flushes on
285 * behalf of debugees, kswapd stealing pages from another process etc).
286 * Kanoj 07/00.
287 */
288
flush_tlb_mm(struct mm_struct * mm)289 void flush_tlb_mm(struct mm_struct *mm)
290 {
291 preempt_disable();
292
293 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
294 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
295 } else {
296 cpumask_t mask = cpu_online_map;
297 unsigned int cpu;
298
299 cpu_clear(smp_processor_id(), mask);
300 for_each_cpu_mask(cpu, mask)
301 if (cpu_context(cpu, mm))
302 cpu_context(cpu, mm) = 0;
303 }
304 local_flush_tlb_mm(mm);
305
306 preempt_enable();
307 }
308
309 struct flush_tlb_data {
310 struct vm_area_struct *vma;
311 unsigned long addr1;
312 unsigned long addr2;
313 };
314
flush_tlb_range_ipi(void * info)315 static void flush_tlb_range_ipi(void *info)
316 {
317 struct flush_tlb_data *fd = info;
318
319 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
320 }
321
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)322 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
323 {
324 struct mm_struct *mm = vma->vm_mm;
325
326 preempt_disable();
327 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
328 struct flush_tlb_data fd = {
329 .vma = vma,
330 .addr1 = start,
331 .addr2 = end,
332 };
333
334 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
335 } else {
336 cpumask_t mask = cpu_online_map;
337 unsigned int cpu;
338
339 cpu_clear(smp_processor_id(), mask);
340 for_each_cpu_mask(cpu, mask)
341 if (cpu_context(cpu, mm))
342 cpu_context(cpu, mm) = 0;
343 }
344 local_flush_tlb_range(vma, start, end);
345 preempt_enable();
346 }
347
flush_tlb_kernel_range_ipi(void * info)348 static void flush_tlb_kernel_range_ipi(void *info)
349 {
350 struct flush_tlb_data *fd = info;
351
352 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
353 }
354
flush_tlb_kernel_range(unsigned long start,unsigned long end)355 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
356 {
357 struct flush_tlb_data fd = {
358 .addr1 = start,
359 .addr2 = end,
360 };
361
362 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
363 }
364
flush_tlb_page_ipi(void * info)365 static void flush_tlb_page_ipi(void *info)
366 {
367 struct flush_tlb_data *fd = info;
368
369 local_flush_tlb_page(fd->vma, fd->addr1);
370 }
371
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)372 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
373 {
374 preempt_disable();
375 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
376 struct flush_tlb_data fd = {
377 .vma = vma,
378 .addr1 = page,
379 };
380
381 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
382 } else {
383 cpumask_t mask = cpu_online_map;
384 unsigned int cpu;
385
386 cpu_clear(smp_processor_id(), mask);
387 for_each_cpu_mask(cpu, mask)
388 if (cpu_context(cpu, vma->vm_mm))
389 cpu_context(cpu, vma->vm_mm) = 0;
390 }
391 local_flush_tlb_page(vma, page);
392 preempt_enable();
393 }
394
flush_tlb_one_ipi(void * info)395 static void flush_tlb_one_ipi(void *info)
396 {
397 unsigned long vaddr = (unsigned long) info;
398
399 local_flush_tlb_one(vaddr);
400 }
401
flush_tlb_one(unsigned long vaddr)402 void flush_tlb_one(unsigned long vaddr)
403 {
404 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
405 }
406
407 EXPORT_SYMBOL(flush_tlb_page);
408 EXPORT_SYMBOL(flush_tlb_one);
409