1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36
37 #include <linux/atomic.h>
38 #include <asm/cpu.h>
39 #include <asm/processor.h>
40 #include <asm/idle.h>
41 #include <asm/r4k-timer.h>
42 #include <asm/mmu_context.h>
43 #include <asm/time.h>
44 #include <asm/setup.h>
45 #include <asm/maar.h>
46
47 cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
48
49 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
50 EXPORT_SYMBOL(__cpu_number_map);
51
52 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
53 EXPORT_SYMBOL(__cpu_logical_map);
54
55 /* Number of TCs (or siblings in Intel speak) per CPU core */
56 int smp_num_siblings = 1;
57 EXPORT_SYMBOL(smp_num_siblings);
58
59 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
60 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
61 EXPORT_SYMBOL(cpu_sibling_map);
62
63 /* representing the core map of multi-core chips of each logical CPU */
64 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
65 EXPORT_SYMBOL(cpu_core_map);
66
67 static DECLARE_COMPLETION(cpu_starting);
68 static DECLARE_COMPLETION(cpu_running);
69
70 /*
71 * A logcal cpu mask containing only one VPE per core to
72 * reduce the number of IPIs on large MT systems.
73 */
74 cpumask_t cpu_foreign_map __read_mostly;
75 EXPORT_SYMBOL(cpu_foreign_map);
76
77 /* representing cpus for which sibling maps can be computed */
78 static cpumask_t cpu_sibling_setup_map;
79
80 /* representing cpus for which core maps can be computed */
81 static cpumask_t cpu_core_setup_map;
82
83 cpumask_t cpu_coherent_mask;
84
set_cpu_sibling_map(int cpu)85 static inline void set_cpu_sibling_map(int cpu)
86 {
87 int i;
88
89 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
90
91 if (smp_num_siblings > 1) {
92 for_each_cpu(i, &cpu_sibling_setup_map) {
93 if (cpu_data[cpu].package == cpu_data[i].package &&
94 cpu_data[cpu].core == cpu_data[i].core) {
95 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
96 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
97 }
98 }
99 } else
100 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
101 }
102
set_cpu_core_map(int cpu)103 static inline void set_cpu_core_map(int cpu)
104 {
105 int i;
106
107 cpumask_set_cpu(cpu, &cpu_core_setup_map);
108
109 for_each_cpu(i, &cpu_core_setup_map) {
110 if (cpu_data[cpu].package == cpu_data[i].package) {
111 cpumask_set_cpu(i, &cpu_core_map[cpu]);
112 cpumask_set_cpu(cpu, &cpu_core_map[i]);
113 }
114 }
115 }
116
117 /*
118 * Calculate a new cpu_foreign_map mask whenever a
119 * new cpu appears or disappears.
120 */
calculate_cpu_foreign_map(void)121 void calculate_cpu_foreign_map(void)
122 {
123 int i, k, core_present;
124 cpumask_t temp_foreign_map;
125
126 /* Re-calculate the mask */
127 cpumask_clear(&temp_foreign_map);
128 for_each_online_cpu(i) {
129 core_present = 0;
130 for_each_cpu(k, &temp_foreign_map)
131 if (cpu_data[i].package == cpu_data[k].package &&
132 cpu_data[i].core == cpu_data[k].core)
133 core_present = 1;
134 if (!core_present)
135 cpumask_set_cpu(i, &temp_foreign_map);
136 }
137
138 cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
139 }
140
141 struct plat_smp_ops *mp_ops;
142 EXPORT_SYMBOL(mp_ops);
143
register_smp_ops(struct plat_smp_ops * ops)144 void register_smp_ops(struct plat_smp_ops *ops)
145 {
146 if (mp_ops)
147 printk(KERN_WARNING "Overriding previously set SMP ops\n");
148
149 mp_ops = ops;
150 }
151
152 /*
153 * First C code run on the secondary CPUs after being started up by
154 * the master.
155 */
start_secondary(void)156 asmlinkage void start_secondary(void)
157 {
158 unsigned int cpu;
159
160 cpu_probe();
161 per_cpu_trap_init(false);
162 mips_clockevent_init();
163 mp_ops->init_secondary();
164 cpu_report();
165 maar_init();
166
167 /*
168 * XXX parity protection should be folded in here when it's converted
169 * to an option instead of something based on .cputype
170 */
171
172 calibrate_delay();
173 preempt_disable();
174 cpu = smp_processor_id();
175 cpu_data[cpu].udelay_val = loops_per_jiffy;
176
177 cpumask_set_cpu(cpu, &cpu_coherent_mask);
178 notify_cpu_starting(cpu);
179
180 /* Notify boot CPU that we're starting & ready to sync counters */
181 complete(&cpu_starting);
182
183 synchronise_count_slave(cpu);
184
185 /* The CPU is running and counters synchronised, now mark it online */
186 set_cpu_online(cpu, true);
187
188 set_cpu_sibling_map(cpu);
189 set_cpu_core_map(cpu);
190
191 calculate_cpu_foreign_map();
192
193 /*
194 * Notify boot CPU that we're up & online and it can safely return
195 * from __cpu_up
196 */
197 complete(&cpu_running);
198
199 /*
200 * irq will be enabled in ->smp_finish(), enabling it too early
201 * is dangerous.
202 */
203 WARN_ON_ONCE(!irqs_disabled());
204 mp_ops->smp_finish();
205
206 cpu_startup_entry(CPUHP_ONLINE);
207 }
208
stop_this_cpu(void * dummy)209 static void stop_this_cpu(void *dummy)
210 {
211 /*
212 * Remove this CPU. Be a bit slow here and
213 * set the bits for every online CPU so we don't miss
214 * any IPI whilst taking this VPE down.
215 */
216
217 cpumask_copy(&cpu_foreign_map, cpu_online_mask);
218
219 /* Make it visible to every other CPU */
220 smp_mb();
221
222 set_cpu_online(smp_processor_id(), false);
223 calculate_cpu_foreign_map();
224 local_irq_disable();
225 while (1);
226 }
227
smp_send_stop(void)228 void smp_send_stop(void)
229 {
230 smp_call_function(stop_this_cpu, NULL, 0);
231 }
232
smp_cpus_done(unsigned int max_cpus)233 void __init smp_cpus_done(unsigned int max_cpus)
234 {
235 }
236
237 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)238 void __init smp_prepare_cpus(unsigned int max_cpus)
239 {
240 init_new_context(current, &init_mm);
241 current_thread_info()->cpu = 0;
242 mp_ops->prepare_cpus(max_cpus);
243 set_cpu_sibling_map(0);
244 set_cpu_core_map(0);
245 calculate_cpu_foreign_map();
246 #ifndef CONFIG_HOTPLUG_CPU
247 init_cpu_present(cpu_possible_mask);
248 #endif
249 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
250 }
251
252 /* preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)253 void smp_prepare_boot_cpu(void)
254 {
255 set_cpu_possible(0, true);
256 set_cpu_online(0, true);
257 }
258
__cpu_up(unsigned int cpu,struct task_struct * tidle)259 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
260 {
261 mp_ops->boot_secondary(cpu, tidle);
262
263 /* Wait for CPU to start and be ready to sync counters */
264 if (!wait_for_completion_timeout(&cpu_starting,
265 msecs_to_jiffies(1000))) {
266 pr_crit("CPU%u: failed to start\n", cpu);
267 return -EIO;
268 }
269
270 synchronise_count_master(cpu);
271
272 /* Wait for CPU to finish startup & mark itself online before return */
273 wait_for_completion(&cpu_running);
274 return 0;
275 }
276
277 /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)278 int setup_profiling_timer(unsigned int multiplier)
279 {
280 return 0;
281 }
282
flush_tlb_all_ipi(void * info)283 static void flush_tlb_all_ipi(void *info)
284 {
285 local_flush_tlb_all();
286 }
287
flush_tlb_all(void)288 void flush_tlb_all(void)
289 {
290 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
291 }
292
flush_tlb_mm_ipi(void * mm)293 static void flush_tlb_mm_ipi(void *mm)
294 {
295 local_flush_tlb_mm((struct mm_struct *)mm);
296 }
297
298 /*
299 * Special Variant of smp_call_function for use by TLB functions:
300 *
301 * o No return value
302 * o collapses to normal function call on UP kernels
303 * o collapses to normal function call on systems with a single shared
304 * primary cache.
305 */
smp_on_other_tlbs(void (* func)(void * info),void * info)306 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
307 {
308 smp_call_function(func, info, 1);
309 }
310
smp_on_each_tlb(void (* func)(void * info),void * info)311 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
312 {
313 preempt_disable();
314
315 smp_on_other_tlbs(func, info);
316 func(info);
317
318 preempt_enable();
319 }
320
321 /*
322 * The following tlb flush calls are invoked when old translations are
323 * being torn down, or pte attributes are changing. For single threaded
324 * address spaces, a new context is obtained on the current cpu, and tlb
325 * context on other cpus are invalidated to force a new context allocation
326 * at switch_mm time, should the mm ever be used on other cpus. For
327 * multithreaded address spaces, intercpu interrupts have to be sent.
328 * Another case where intercpu interrupts are required is when the target
329 * mm might be active on another cpu (eg debuggers doing the flushes on
330 * behalf of debugees, kswapd stealing pages from another process etc).
331 * Kanoj 07/00.
332 */
333
flush_tlb_mm(struct mm_struct * mm)334 void flush_tlb_mm(struct mm_struct *mm)
335 {
336 preempt_disable();
337
338 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
339 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
340 } else {
341 unsigned int cpu;
342
343 for_each_online_cpu(cpu) {
344 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
345 cpu_context(cpu, mm) = 0;
346 }
347 }
348 local_flush_tlb_mm(mm);
349
350 preempt_enable();
351 }
352
353 struct flush_tlb_data {
354 struct vm_area_struct *vma;
355 unsigned long addr1;
356 unsigned long addr2;
357 };
358
flush_tlb_range_ipi(void * info)359 static void flush_tlb_range_ipi(void *info)
360 {
361 struct flush_tlb_data *fd = info;
362
363 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
364 }
365
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)366 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
367 {
368 struct mm_struct *mm = vma->vm_mm;
369
370 preempt_disable();
371 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
372 struct flush_tlb_data fd = {
373 .vma = vma,
374 .addr1 = start,
375 .addr2 = end,
376 };
377
378 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
379 } else {
380 unsigned int cpu;
381
382 for_each_online_cpu(cpu) {
383 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
384 cpu_context(cpu, mm) = 0;
385 }
386 }
387 local_flush_tlb_range(vma, start, end);
388 preempt_enable();
389 }
390
flush_tlb_kernel_range_ipi(void * info)391 static void flush_tlb_kernel_range_ipi(void *info)
392 {
393 struct flush_tlb_data *fd = info;
394
395 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
396 }
397
flush_tlb_kernel_range(unsigned long start,unsigned long end)398 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
399 {
400 struct flush_tlb_data fd = {
401 .addr1 = start,
402 .addr2 = end,
403 };
404
405 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
406 }
407
flush_tlb_page_ipi(void * info)408 static void flush_tlb_page_ipi(void *info)
409 {
410 struct flush_tlb_data *fd = info;
411
412 local_flush_tlb_page(fd->vma, fd->addr1);
413 }
414
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)415 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
416 {
417 preempt_disable();
418 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
419 struct flush_tlb_data fd = {
420 .vma = vma,
421 .addr1 = page,
422 };
423
424 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
425 } else {
426 unsigned int cpu;
427
428 for_each_online_cpu(cpu) {
429 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
430 cpu_context(cpu, vma->vm_mm) = 0;
431 }
432 }
433 local_flush_tlb_page(vma, page);
434 preempt_enable();
435 }
436
flush_tlb_one_ipi(void * info)437 static void flush_tlb_one_ipi(void *info)
438 {
439 unsigned long vaddr = (unsigned long) info;
440
441 local_flush_tlb_one(vaddr);
442 }
443
flush_tlb_one(unsigned long vaddr)444 void flush_tlb_one(unsigned long vaddr)
445 {
446 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
447 }
448
449 EXPORT_SYMBOL(flush_tlb_page);
450 EXPORT_SYMBOL(flush_tlb_one);
451
452 #if defined(CONFIG_KEXEC)
453 void (*dump_ipi_function_ptr)(void *) = NULL;
dump_send_ipi(void (* dump_ipi_callback)(void *))454 void dump_send_ipi(void (*dump_ipi_callback)(void *))
455 {
456 int i;
457 int cpu = smp_processor_id();
458
459 dump_ipi_function_ptr = dump_ipi_callback;
460 smp_mb();
461 for_each_online_cpu(i)
462 if (i != cpu)
463 mp_ops->send_ipi_single(i, SMP_DUMP);
464
465 }
466 EXPORT_SYMBOL(dump_send_ipi);
467 #endif
468
469 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
470
471 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
472 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
473
tick_broadcast(const struct cpumask * mask)474 void tick_broadcast(const struct cpumask *mask)
475 {
476 atomic_t *count;
477 struct call_single_data *csd;
478 int cpu;
479
480 for_each_cpu(cpu, mask) {
481 count = &per_cpu(tick_broadcast_count, cpu);
482 csd = &per_cpu(tick_broadcast_csd, cpu);
483
484 if (atomic_inc_return(count) == 1)
485 smp_call_function_single_async(cpu, csd);
486 }
487 }
488
tick_broadcast_callee(void * info)489 static void tick_broadcast_callee(void *info)
490 {
491 int cpu = smp_processor_id();
492 tick_receive_broadcast();
493 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
494 }
495
tick_broadcast_init(void)496 static int __init tick_broadcast_init(void)
497 {
498 struct call_single_data *csd;
499 int cpu;
500
501 for (cpu = 0; cpu < NR_CPUS; cpu++) {
502 csd = &per_cpu(tick_broadcast_csd, cpu);
503 csd->func = tick_broadcast_callee;
504 }
505
506 return 0;
507 }
508 early_initcall(tick_broadcast_init);
509
510 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
511