1 /* smp.c: Sparc64 SMP support.
2 *
3 * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
28 #include <linux/kgdb.h>
29
30 #include <asm/head.h>
31 #include <asm/ptrace.h>
32 #include <linux/atomic.h>
33 #include <asm/tlbflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/cpudata.h>
36 #include <asm/hvtramp.h>
37 #include <asm/io.h>
38 #include <asm/timer.h>
39 #include <asm/setup.h>
40
41 #include <asm/irq.h>
42 #include <asm/irq_regs.h>
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/oplib.h>
46 #include <asm/uaccess.h>
47 #include <asm/starfire.h>
48 #include <asm/tlb.h>
49 #include <asm/sections.h>
50 #include <asm/prom.h>
51 #include <asm/mdesc.h>
52 #include <asm/ldc.h>
53 #include <asm/hypervisor.h>
54 #include <asm/pcr.h>
55
56 #include "cpumap.h"
57 #include "kernel.h"
58
59 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62
63 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
65
66 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
67 EXPORT_SYMBOL(cpu_core_map);
68 EXPORT_SYMBOL(cpu_core_sib_map);
69
70 static cpumask_t smp_commenced_mask;
71
smp_info(struct seq_file * m)72 void smp_info(struct seq_file *m)
73 {
74 int i;
75
76 seq_printf(m, "State:\n");
77 for_each_online_cpu(i)
78 seq_printf(m, "CPU%d:\t\tonline\n", i);
79 }
80
smp_bogo(struct seq_file * m)81 void smp_bogo(struct seq_file *m)
82 {
83 int i;
84
85 for_each_online_cpu(i)
86 seq_printf(m,
87 "Cpu%dClkTck\t: %016lx\n",
88 i, cpu_data(i).clock_tick);
89 }
90
91 extern void setup_sparc64_timer(void);
92
93 static volatile unsigned long callin_flag = 0;
94
smp_callin(void)95 void smp_callin(void)
96 {
97 int cpuid = hard_smp_processor_id();
98
99 __local_per_cpu_offset = __per_cpu_offset(cpuid);
100
101 if (tlb_type == hypervisor)
102 sun4v_ktsb_register();
103
104 __flush_tlb_all();
105
106 setup_sparc64_timer();
107
108 if (cheetah_pcache_forced_on)
109 cheetah_enable_pcache();
110
111 callin_flag = 1;
112 __asm__ __volatile__("membar #Sync\n\t"
113 "flush %%g6" : : : "memory");
114
115 /* Clear this or we will die instantly when we
116 * schedule back to this idler...
117 */
118 current_thread_info()->new_child = 0;
119
120 /* Attach to the address space of init_task. */
121 atomic_inc(&init_mm.mm_count);
122 current->active_mm = &init_mm;
123
124 /* inform the notifiers about the new cpu */
125 notify_cpu_starting(cpuid);
126
127 while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
128 rmb();
129
130 set_cpu_online(cpuid, true);
131
132 /* idle thread is expected to have preempt disabled */
133 preempt_disable();
134
135 local_irq_enable();
136
137 cpu_startup_entry(CPUHP_ONLINE);
138 }
139
cpu_panic(void)140 void cpu_panic(void)
141 {
142 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
143 panic("SMP bolixed\n");
144 }
145
146 /* This tick register synchronization scheme is taken entirely from
147 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
148 *
149 * The only change I've made is to rework it so that the master
150 * initiates the synchonization instead of the slave. -DaveM
151 */
152
153 #define MASTER 0
154 #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
155
156 #define NUM_ROUNDS 64 /* magic value */
157 #define NUM_ITERS 5 /* likewise */
158
159 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
160 static unsigned long go[SLAVE + 1];
161
162 #define DEBUG_TICK_SYNC 0
163
get_delta(long * rt,long * master)164 static inline long get_delta (long *rt, long *master)
165 {
166 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
167 unsigned long tcenter, t0, t1, tm;
168 unsigned long i;
169
170 for (i = 0; i < NUM_ITERS; i++) {
171 t0 = tick_ops->get_tick();
172 go[MASTER] = 1;
173 membar_safe("#StoreLoad");
174 while (!(tm = go[SLAVE]))
175 rmb();
176 go[SLAVE] = 0;
177 wmb();
178 t1 = tick_ops->get_tick();
179
180 if (t1 - t0 < best_t1 - best_t0)
181 best_t0 = t0, best_t1 = t1, best_tm = tm;
182 }
183
184 *rt = best_t1 - best_t0;
185 *master = best_tm - best_t0;
186
187 /* average best_t0 and best_t1 without overflow: */
188 tcenter = (best_t0/2 + best_t1/2);
189 if (best_t0 % 2 + best_t1 % 2 == 2)
190 tcenter++;
191 return tcenter - best_tm;
192 }
193
smp_synchronize_tick_client(void)194 void smp_synchronize_tick_client(void)
195 {
196 long i, delta, adj, adjust_latency = 0, done = 0;
197 unsigned long flags, rt, master_time_stamp;
198 #if DEBUG_TICK_SYNC
199 struct {
200 long rt; /* roundtrip time */
201 long master; /* master's timestamp */
202 long diff; /* difference between midpoint and master's timestamp */
203 long lat; /* estimate of itc adjustment latency */
204 } t[NUM_ROUNDS];
205 #endif
206
207 go[MASTER] = 1;
208
209 while (go[MASTER])
210 rmb();
211
212 local_irq_save(flags);
213 {
214 for (i = 0; i < NUM_ROUNDS; i++) {
215 delta = get_delta(&rt, &master_time_stamp);
216 if (delta == 0)
217 done = 1; /* let's lock on to this... */
218
219 if (!done) {
220 if (i > 0) {
221 adjust_latency += -delta;
222 adj = -delta + adjust_latency/4;
223 } else
224 adj = -delta;
225
226 tick_ops->add_tick(adj);
227 }
228 #if DEBUG_TICK_SYNC
229 t[i].rt = rt;
230 t[i].master = master_time_stamp;
231 t[i].diff = delta;
232 t[i].lat = adjust_latency/4;
233 #endif
234 }
235 }
236 local_irq_restore(flags);
237
238 #if DEBUG_TICK_SYNC
239 for (i = 0; i < NUM_ROUNDS; i++)
240 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
241 t[i].rt, t[i].master, t[i].diff, t[i].lat);
242 #endif
243
244 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
245 "(last diff %ld cycles, maxerr %lu cycles)\n",
246 smp_processor_id(), delta, rt);
247 }
248
249 static void smp_start_sync_tick_client(int cpu);
250
smp_synchronize_one_tick(int cpu)251 static void smp_synchronize_one_tick(int cpu)
252 {
253 unsigned long flags, i;
254
255 go[MASTER] = 0;
256
257 smp_start_sync_tick_client(cpu);
258
259 /* wait for client to be ready */
260 while (!go[MASTER])
261 rmb();
262
263 /* now let the client proceed into his loop */
264 go[MASTER] = 0;
265 membar_safe("#StoreLoad");
266
267 raw_spin_lock_irqsave(&itc_sync_lock, flags);
268 {
269 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
270 while (!go[MASTER])
271 rmb();
272 go[MASTER] = 0;
273 wmb();
274 go[SLAVE] = tick_ops->get_tick();
275 membar_safe("#StoreLoad");
276 }
277 }
278 raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
279 }
280
281 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
ldom_startcpu_cpuid(unsigned int cpu,unsigned long thread_reg,void ** descrp)282 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
283 void **descrp)
284 {
285 extern unsigned long sparc64_ttable_tl0;
286 extern unsigned long kern_locked_tte_data;
287 struct hvtramp_descr *hdesc;
288 unsigned long trampoline_ra;
289 struct trap_per_cpu *tb;
290 u64 tte_vaddr, tte_data;
291 unsigned long hv_err;
292 int i;
293
294 hdesc = kzalloc(sizeof(*hdesc) +
295 (sizeof(struct hvtramp_mapping) *
296 num_kernel_image_mappings - 1),
297 GFP_KERNEL);
298 if (!hdesc) {
299 printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
300 "hvtramp_descr.\n");
301 return;
302 }
303 *descrp = hdesc;
304
305 hdesc->cpu = cpu;
306 hdesc->num_mappings = num_kernel_image_mappings;
307
308 tb = &trap_block[cpu];
309
310 hdesc->fault_info_va = (unsigned long) &tb->fault_info;
311 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
312
313 hdesc->thread_reg = thread_reg;
314
315 tte_vaddr = (unsigned long) KERNBASE;
316 tte_data = kern_locked_tte_data;
317
318 for (i = 0; i < hdesc->num_mappings; i++) {
319 hdesc->maps[i].vaddr = tte_vaddr;
320 hdesc->maps[i].tte = tte_data;
321 tte_vaddr += 0x400000;
322 tte_data += 0x400000;
323 }
324
325 trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
326
327 hv_err = sun4v_cpu_start(cpu, trampoline_ra,
328 kimage_addr_to_ra(&sparc64_ttable_tl0),
329 __pa(hdesc));
330 if (hv_err)
331 printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
332 "gives error %lu\n", hv_err);
333 }
334 #endif
335
336 extern unsigned long sparc64_cpu_startup;
337
338 /* The OBP cpu startup callback truncates the 3rd arg cookie to
339 * 32-bits (I think) so to be safe we have it read the pointer
340 * contained here so we work on >4GB machines. -DaveM
341 */
342 static struct thread_info *cpu_new_thread = NULL;
343
smp_boot_one_cpu(unsigned int cpu,struct task_struct * idle)344 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
345 {
346 unsigned long entry =
347 (unsigned long)(&sparc64_cpu_startup);
348 unsigned long cookie =
349 (unsigned long)(&cpu_new_thread);
350 void *descr = NULL;
351 int timeout, ret;
352
353 callin_flag = 0;
354 cpu_new_thread = task_thread_info(idle);
355
356 if (tlb_type == hypervisor) {
357 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
358 if (ldom_domaining_enabled)
359 ldom_startcpu_cpuid(cpu,
360 (unsigned long) cpu_new_thread,
361 &descr);
362 else
363 #endif
364 prom_startcpu_cpuid(cpu, entry, cookie);
365 } else {
366 struct device_node *dp = of_find_node_by_cpuid(cpu);
367
368 prom_startcpu(dp->phandle, entry, cookie);
369 }
370
371 for (timeout = 0; timeout < 50000; timeout++) {
372 if (callin_flag)
373 break;
374 udelay(100);
375 }
376
377 if (callin_flag) {
378 ret = 0;
379 } else {
380 printk("Processor %d is stuck.\n", cpu);
381 ret = -ENODEV;
382 }
383 cpu_new_thread = NULL;
384
385 kfree(descr);
386
387 return ret;
388 }
389
spitfire_xcall_helper(u64 data0,u64 data1,u64 data2,u64 pstate,unsigned long cpu)390 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
391 {
392 u64 result, target;
393 int stuck, tmp;
394
395 if (this_is_starfire) {
396 /* map to real upaid */
397 cpu = (((cpu & 0x3c) << 1) |
398 ((cpu & 0x40) >> 4) |
399 (cpu & 0x3));
400 }
401
402 target = (cpu << 14) | 0x70;
403 again:
404 /* Ok, this is the real Spitfire Errata #54.
405 * One must read back from a UDB internal register
406 * after writes to the UDB interrupt dispatch, but
407 * before the membar Sync for that write.
408 * So we use the high UDB control register (ASI 0x7f,
409 * ADDR 0x20) for the dummy read. -DaveM
410 */
411 tmp = 0x40;
412 __asm__ __volatile__(
413 "wrpr %1, %2, %%pstate\n\t"
414 "stxa %4, [%0] %3\n\t"
415 "stxa %5, [%0+%8] %3\n\t"
416 "add %0, %8, %0\n\t"
417 "stxa %6, [%0+%8] %3\n\t"
418 "membar #Sync\n\t"
419 "stxa %%g0, [%7] %3\n\t"
420 "membar #Sync\n\t"
421 "mov 0x20, %%g1\n\t"
422 "ldxa [%%g1] 0x7f, %%g0\n\t"
423 "membar #Sync"
424 : "=r" (tmp)
425 : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
426 "r" (data0), "r" (data1), "r" (data2), "r" (target),
427 "r" (0x10), "0" (tmp)
428 : "g1");
429
430 /* NOTE: PSTATE_IE is still clear. */
431 stuck = 100000;
432 do {
433 __asm__ __volatile__("ldxa [%%g0] %1, %0"
434 : "=r" (result)
435 : "i" (ASI_INTR_DISPATCH_STAT));
436 if (result == 0) {
437 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
438 : : "r" (pstate));
439 return;
440 }
441 stuck -= 1;
442 if (stuck == 0)
443 break;
444 } while (result & 0x1);
445 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
446 : : "r" (pstate));
447 if (stuck == 0) {
448 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
449 smp_processor_id(), result);
450 } else {
451 udelay(2);
452 goto again;
453 }
454 }
455
spitfire_xcall_deliver(struct trap_per_cpu * tb,int cnt)456 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
457 {
458 u64 *mondo, data0, data1, data2;
459 u16 *cpu_list;
460 u64 pstate;
461 int i;
462
463 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
464 cpu_list = __va(tb->cpu_list_pa);
465 mondo = __va(tb->cpu_mondo_block_pa);
466 data0 = mondo[0];
467 data1 = mondo[1];
468 data2 = mondo[2];
469 for (i = 0; i < cnt; i++)
470 spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
471 }
472
473 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
474 * packet, but we have no use for that. However we do take advantage of
475 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
476 */
cheetah_xcall_deliver(struct trap_per_cpu * tb,int cnt)477 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
478 {
479 int nack_busy_id, is_jbus, need_more;
480 u64 *mondo, pstate, ver, busy_mask;
481 u16 *cpu_list;
482
483 cpu_list = __va(tb->cpu_list_pa);
484 mondo = __va(tb->cpu_mondo_block_pa);
485
486 /* Unfortunately, someone at Sun had the brilliant idea to make the
487 * busy/nack fields hard-coded by ITID number for this Ultra-III
488 * derivative processor.
489 */
490 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
491 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
492 (ver >> 32) == __SERRANO_ID);
493
494 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
495
496 retry:
497 need_more = 0;
498 __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
499 : : "r" (pstate), "i" (PSTATE_IE));
500
501 /* Setup the dispatch data registers. */
502 __asm__ __volatile__("stxa %0, [%3] %6\n\t"
503 "stxa %1, [%4] %6\n\t"
504 "stxa %2, [%5] %6\n\t"
505 "membar #Sync\n\t"
506 : /* no outputs */
507 : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
508 "r" (0x40), "r" (0x50), "r" (0x60),
509 "i" (ASI_INTR_W));
510
511 nack_busy_id = 0;
512 busy_mask = 0;
513 {
514 int i;
515
516 for (i = 0; i < cnt; i++) {
517 u64 target, nr;
518
519 nr = cpu_list[i];
520 if (nr == 0xffff)
521 continue;
522
523 target = (nr << 14) | 0x70;
524 if (is_jbus) {
525 busy_mask |= (0x1UL << (nr * 2));
526 } else {
527 target |= (nack_busy_id << 24);
528 busy_mask |= (0x1UL <<
529 (nack_busy_id * 2));
530 }
531 __asm__ __volatile__(
532 "stxa %%g0, [%0] %1\n\t"
533 "membar #Sync\n\t"
534 : /* no outputs */
535 : "r" (target), "i" (ASI_INTR_W));
536 nack_busy_id++;
537 if (nack_busy_id == 32) {
538 need_more = 1;
539 break;
540 }
541 }
542 }
543
544 /* Now, poll for completion. */
545 {
546 u64 dispatch_stat, nack_mask;
547 long stuck;
548
549 stuck = 100000 * nack_busy_id;
550 nack_mask = busy_mask << 1;
551 do {
552 __asm__ __volatile__("ldxa [%%g0] %1, %0"
553 : "=r" (dispatch_stat)
554 : "i" (ASI_INTR_DISPATCH_STAT));
555 if (!(dispatch_stat & (busy_mask | nack_mask))) {
556 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
557 : : "r" (pstate));
558 if (unlikely(need_more)) {
559 int i, this_cnt = 0;
560 for (i = 0; i < cnt; i++) {
561 if (cpu_list[i] == 0xffff)
562 continue;
563 cpu_list[i] = 0xffff;
564 this_cnt++;
565 if (this_cnt == 32)
566 break;
567 }
568 goto retry;
569 }
570 return;
571 }
572 if (!--stuck)
573 break;
574 } while (dispatch_stat & busy_mask);
575
576 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
577 : : "r" (pstate));
578
579 if (dispatch_stat & busy_mask) {
580 /* Busy bits will not clear, continue instead
581 * of freezing up on this cpu.
582 */
583 printk("CPU[%d]: mondo stuckage result[%016llx]\n",
584 smp_processor_id(), dispatch_stat);
585 } else {
586 int i, this_busy_nack = 0;
587
588 /* Delay some random time with interrupts enabled
589 * to prevent deadlock.
590 */
591 udelay(2 * nack_busy_id);
592
593 /* Clear out the mask bits for cpus which did not
594 * NACK us.
595 */
596 for (i = 0; i < cnt; i++) {
597 u64 check_mask, nr;
598
599 nr = cpu_list[i];
600 if (nr == 0xffff)
601 continue;
602
603 if (is_jbus)
604 check_mask = (0x2UL << (2*nr));
605 else
606 check_mask = (0x2UL <<
607 this_busy_nack);
608 if ((dispatch_stat & check_mask) == 0)
609 cpu_list[i] = 0xffff;
610 this_busy_nack += 2;
611 if (this_busy_nack == 64)
612 break;
613 }
614
615 goto retry;
616 }
617 }
618 }
619
620 #define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
621 #define MONDO_USEC_WAIT_MIN 2
622 #define MONDO_USEC_WAIT_MAX 100
623 #define MONDO_RETRY_LIMIT 500000
624
625 /* Multi-cpu list version.
626 *
627 * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
628 * Sometimes not all cpus receive the mondo, requiring us to re-send
629 * the mondo until all cpus have received, or cpus are truly stuck
630 * unable to receive mondo, and we timeout.
631 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
632 * perform guest service, such as PCIe error handling. Consider the
633 * service time, 1 second overall wait is reasonable for 1 cpu.
634 * Here two in-between mondo check wait time are defined: 2 usec for
635 * single cpu quick turn around and up to 100usec for large cpu count.
636 * Deliver mondo to large number of cpus could take longer, we adjusts
637 * the retry count as long as target cpus are making forward progress.
638 */
hypervisor_xcall_deliver(struct trap_per_cpu * tb,int cnt)639 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
640 {
641 int this_cpu, tot_cpus, prev_sent, i, rem;
642 int usec_wait, retries, tot_retries;
643 u16 first_cpu = 0xffff;
644 unsigned long xc_rcvd = 0;
645 unsigned long status;
646 int ecpuerror_id = 0;
647 int enocpu_id = 0;
648 u16 *cpu_list;
649 u16 cpu;
650
651 this_cpu = smp_processor_id();
652 cpu_list = __va(tb->cpu_list_pa);
653 usec_wait = cnt * MONDO_USEC_WAIT_MIN;
654 if (usec_wait > MONDO_USEC_WAIT_MAX)
655 usec_wait = MONDO_USEC_WAIT_MAX;
656 retries = tot_retries = 0;
657 tot_cpus = cnt;
658 prev_sent = 0;
659
660 do {
661 int n_sent, mondo_delivered, target_cpu_busy;
662
663 status = sun4v_cpu_mondo_send(cnt,
664 tb->cpu_list_pa,
665 tb->cpu_mondo_block_pa);
666
667 /* HV_EOK means all cpus received the xcall, we're done. */
668 if (likely(status == HV_EOK))
669 goto xcall_done;
670
671 /* If not these non-fatal errors, panic */
672 if (unlikely((status != HV_EWOULDBLOCK) &&
673 (status != HV_ECPUERROR) &&
674 (status != HV_ENOCPU)))
675 goto fatal_errors;
676
677 /* First, see if we made any forward progress.
678 *
679 * Go through the cpu_list, count the target cpus that have
680 * received our mondo (n_sent), and those that did not (rem).
681 * Re-pack cpu_list with the cpus remain to be retried in the
682 * front - this simplifies tracking the truly stalled cpus.
683 *
684 * The hypervisor indicates successful sends by setting
685 * cpu list entries to the value 0xffff.
686 *
687 * EWOULDBLOCK means some target cpus did not receive the
688 * mondo and retry usually helps.
689 *
690 * ECPUERROR means at least one target cpu is in error state,
691 * it's usually safe to skip the faulty cpu and retry.
692 *
693 * ENOCPU means one of the target cpu doesn't belong to the
694 * domain, perhaps offlined which is unexpected, but not
695 * fatal and it's okay to skip the offlined cpu.
696 */
697 rem = 0;
698 n_sent = 0;
699 for (i = 0; i < cnt; i++) {
700 cpu = cpu_list[i];
701 if (likely(cpu == 0xffff)) {
702 n_sent++;
703 } else if ((status == HV_ECPUERROR) &&
704 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
705 ecpuerror_id = cpu + 1;
706 } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
707 enocpu_id = cpu + 1;
708 } else {
709 cpu_list[rem++] = cpu;
710 }
711 }
712
713 /* No cpu remained, we're done. */
714 if (rem == 0)
715 break;
716
717 /* Otherwise, update the cpu count for retry. */
718 cnt = rem;
719
720 /* Record the overall number of mondos received by the
721 * first of the remaining cpus.
722 */
723 if (first_cpu != cpu_list[0]) {
724 first_cpu = cpu_list[0];
725 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
726 }
727
728 /* Was any mondo delivered successfully? */
729 mondo_delivered = (n_sent > prev_sent);
730 prev_sent = n_sent;
731
732 /* or, was any target cpu busy processing other mondos? */
733 target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
734 xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
735
736 /* Retry count is for no progress. If we're making progress,
737 * reset the retry count.
738 */
739 if (likely(mondo_delivered || target_cpu_busy)) {
740 tot_retries += retries;
741 retries = 0;
742 } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
743 goto fatal_mondo_timeout;
744 }
745
746 /* Delay a little bit to let other cpus catch up on
747 * their cpu mondo queue work.
748 */
749 if (!mondo_delivered)
750 udelay(usec_wait);
751
752 retries++;
753 } while (1);
754
755 xcall_done:
756 if (unlikely(ecpuerror_id > 0)) {
757 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
758 this_cpu, ecpuerror_id - 1);
759 } else if (unlikely(enocpu_id > 0)) {
760 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
761 this_cpu, enocpu_id - 1);
762 }
763 return;
764
765 fatal_errors:
766 /* fatal errors include bad alignment, etc */
767 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
768 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
769 panic("Unexpected SUN4V mondo error %lu\n", status);
770
771 fatal_mondo_timeout:
772 /* some cpus being non-responsive to the cpu mondo */
773 pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
774 this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
775 panic("SUN4V mondo timeout panic\n");
776 }
777
778 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
779
xcall_deliver(u64 data0,u64 data1,u64 data2,const cpumask_t * mask)780 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
781 {
782 struct trap_per_cpu *tb;
783 int this_cpu, i, cnt;
784 unsigned long flags;
785 u16 *cpu_list;
786 u64 *mondo;
787
788 /* We have to do this whole thing with interrupts fully disabled.
789 * Otherwise if we send an xcall from interrupt context it will
790 * corrupt both our mondo block and cpu list state.
791 *
792 * One consequence of this is that we cannot use timeout mechanisms
793 * that depend upon interrupts being delivered locally. So, for
794 * example, we cannot sample jiffies and expect it to advance.
795 *
796 * Fortunately, udelay() uses %stick/%tick so we can use that.
797 */
798 local_irq_save(flags);
799
800 this_cpu = smp_processor_id();
801 tb = &trap_block[this_cpu];
802
803 mondo = __va(tb->cpu_mondo_block_pa);
804 mondo[0] = data0;
805 mondo[1] = data1;
806 mondo[2] = data2;
807 wmb();
808
809 cpu_list = __va(tb->cpu_list_pa);
810
811 /* Setup the initial cpu list. */
812 cnt = 0;
813 for_each_cpu(i, mask) {
814 if (i == this_cpu || !cpu_online(i))
815 continue;
816 cpu_list[cnt++] = i;
817 }
818
819 if (cnt)
820 xcall_deliver_impl(tb, cnt);
821
822 local_irq_restore(flags);
823 }
824
825 /* Send cross call to all processors mentioned in MASK_P
826 * except self. Really, there are only two cases currently,
827 * "cpu_online_mask" and "mm_cpumask(mm)".
828 */
smp_cross_call_masked(unsigned long * func,u32 ctx,u64 data1,u64 data2,const cpumask_t * mask)829 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
830 {
831 u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
832
833 xcall_deliver(data0, data1, data2, mask);
834 }
835
836 /* Send cross call to all processors except self. */
smp_cross_call(unsigned long * func,u32 ctx,u64 data1,u64 data2)837 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
838 {
839 smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
840 }
841
842 extern unsigned long xcall_sync_tick;
843
smp_start_sync_tick_client(int cpu)844 static void smp_start_sync_tick_client(int cpu)
845 {
846 xcall_deliver((u64) &xcall_sync_tick, 0, 0,
847 cpumask_of(cpu));
848 }
849
850 extern unsigned long xcall_call_function;
851
arch_send_call_function_ipi_mask(const struct cpumask * mask)852 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
853 {
854 xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
855 }
856
857 extern unsigned long xcall_call_function_single;
858
arch_send_call_function_single_ipi(int cpu)859 void arch_send_call_function_single_ipi(int cpu)
860 {
861 xcall_deliver((u64) &xcall_call_function_single, 0, 0,
862 cpumask_of(cpu));
863 }
864
smp_call_function_client(int irq,struct pt_regs * regs)865 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
866 {
867 clear_softint(1 << irq);
868 irq_enter();
869 generic_smp_call_function_interrupt();
870 irq_exit();
871 }
872
smp_call_function_single_client(int irq,struct pt_regs * regs)873 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
874 {
875 clear_softint(1 << irq);
876 irq_enter();
877 generic_smp_call_function_single_interrupt();
878 irq_exit();
879 }
880
tsb_sync(void * info)881 static void tsb_sync(void *info)
882 {
883 struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
884 struct mm_struct *mm = info;
885
886 /* It is not valid to test "current->active_mm == mm" here.
887 *
888 * The value of "current" is not changed atomically with
889 * switch_mm(). But that's OK, we just need to check the
890 * current cpu's trap block PGD physical address.
891 */
892 if (tp->pgd_paddr == __pa(mm->pgd))
893 tsb_context_switch(mm);
894 }
895
smp_tsb_sync(struct mm_struct * mm)896 void smp_tsb_sync(struct mm_struct *mm)
897 {
898 smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
899 }
900
901 extern unsigned long xcall_flush_tlb_mm;
902 extern unsigned long xcall_flush_tlb_page;
903 extern unsigned long xcall_flush_tlb_kernel_range;
904 extern unsigned long xcall_fetch_glob_regs;
905 extern unsigned long xcall_fetch_glob_pmu;
906 extern unsigned long xcall_fetch_glob_pmu_n4;
907 extern unsigned long xcall_receive_signal;
908 extern unsigned long xcall_new_mmu_context_version;
909 #ifdef CONFIG_KGDB
910 extern unsigned long xcall_kgdb_capture;
911 #endif
912
913 #ifdef DCACHE_ALIASING_POSSIBLE
914 extern unsigned long xcall_flush_dcache_page_cheetah;
915 #endif
916 extern unsigned long xcall_flush_dcache_page_spitfire;
917
__local_flush_dcache_page(struct page * page)918 static inline void __local_flush_dcache_page(struct page *page)
919 {
920 #ifdef DCACHE_ALIASING_POSSIBLE
921 __flush_dcache_page(page_address(page),
922 ((tlb_type == spitfire) &&
923 page_mapping(page) != NULL));
924 #else
925 if (page_mapping(page) != NULL &&
926 tlb_type == spitfire)
927 __flush_icache_page(__pa(page_address(page)));
928 #endif
929 }
930
smp_flush_dcache_page_impl(struct page * page,int cpu)931 void smp_flush_dcache_page_impl(struct page *page, int cpu)
932 {
933 int this_cpu;
934
935 if (tlb_type == hypervisor)
936 return;
937
938 #ifdef CONFIG_DEBUG_DCFLUSH
939 atomic_inc(&dcpage_flushes);
940 #endif
941
942 this_cpu = get_cpu();
943
944 if (cpu == this_cpu) {
945 __local_flush_dcache_page(page);
946 } else if (cpu_online(cpu)) {
947 void *pg_addr = page_address(page);
948 u64 data0 = 0;
949
950 if (tlb_type == spitfire) {
951 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
952 if (page_mapping(page) != NULL)
953 data0 |= ((u64)1 << 32);
954 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
955 #ifdef DCACHE_ALIASING_POSSIBLE
956 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
957 #endif
958 }
959 if (data0) {
960 xcall_deliver(data0, __pa(pg_addr),
961 (u64) pg_addr, cpumask_of(cpu));
962 #ifdef CONFIG_DEBUG_DCFLUSH
963 atomic_inc(&dcpage_flushes_xcall);
964 #endif
965 }
966 }
967
968 put_cpu();
969 }
970
flush_dcache_page_all(struct mm_struct * mm,struct page * page)971 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
972 {
973 void *pg_addr;
974 u64 data0;
975
976 if (tlb_type == hypervisor)
977 return;
978
979 preempt_disable();
980
981 #ifdef CONFIG_DEBUG_DCFLUSH
982 atomic_inc(&dcpage_flushes);
983 #endif
984 data0 = 0;
985 pg_addr = page_address(page);
986 if (tlb_type == spitfire) {
987 data0 = ((u64)&xcall_flush_dcache_page_spitfire);
988 if (page_mapping(page) != NULL)
989 data0 |= ((u64)1 << 32);
990 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
991 #ifdef DCACHE_ALIASING_POSSIBLE
992 data0 = ((u64)&xcall_flush_dcache_page_cheetah);
993 #endif
994 }
995 if (data0) {
996 xcall_deliver(data0, __pa(pg_addr),
997 (u64) pg_addr, cpu_online_mask);
998 #ifdef CONFIG_DEBUG_DCFLUSH
999 atomic_inc(&dcpage_flushes_xcall);
1000 #endif
1001 }
1002 __local_flush_dcache_page(page);
1003
1004 preempt_enable();
1005 }
1006
1007 #ifdef CONFIG_KGDB
kgdb_roundup_cpus(unsigned long flags)1008 void kgdb_roundup_cpus(unsigned long flags)
1009 {
1010 smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1011 }
1012 #endif
1013
smp_fetch_global_regs(void)1014 void smp_fetch_global_regs(void)
1015 {
1016 smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1017 }
1018
smp_fetch_global_pmu(void)1019 void smp_fetch_global_pmu(void)
1020 {
1021 if (tlb_type == hypervisor &&
1022 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1023 smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1024 else
1025 smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1026 }
1027
1028 /* We know that the window frames of the user have been flushed
1029 * to the stack before we get here because all callers of us
1030 * are flush_tlb_*() routines, and these run after flush_cache_*()
1031 * which performs the flushw.
1032 *
1033 * mm->cpu_vm_mask is a bit mask of which cpus an address
1034 * space has (potentially) executed on, this is the heuristic
1035 * we use to limit cross calls.
1036 */
1037
1038 /* This currently is only used by the hugetlb arch pre-fault
1039 * hook on UltraSPARC-III+ and later when changing the pagesize
1040 * bits of the context register for an address space.
1041 */
smp_flush_tlb_mm(struct mm_struct * mm)1042 void smp_flush_tlb_mm(struct mm_struct *mm)
1043 {
1044 u32 ctx = CTX_HWBITS(mm->context);
1045
1046 get_cpu();
1047
1048 smp_cross_call_masked(&xcall_flush_tlb_mm,
1049 ctx, 0, 0,
1050 mm_cpumask(mm));
1051
1052 __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1053
1054 put_cpu();
1055 }
1056
1057 struct tlb_pending_info {
1058 unsigned long ctx;
1059 unsigned long nr;
1060 unsigned long *vaddrs;
1061 };
1062
tlb_pending_func(void * info)1063 static void tlb_pending_func(void *info)
1064 {
1065 struct tlb_pending_info *t = info;
1066
1067 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1068 }
1069
smp_flush_tlb_pending(struct mm_struct * mm,unsigned long nr,unsigned long * vaddrs)1070 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1071 {
1072 u32 ctx = CTX_HWBITS(mm->context);
1073 struct tlb_pending_info info;
1074
1075 get_cpu();
1076
1077 info.ctx = ctx;
1078 info.nr = nr;
1079 info.vaddrs = vaddrs;
1080
1081 smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1082 &info, 1);
1083
1084 __flush_tlb_pending(ctx, nr, vaddrs);
1085
1086 put_cpu();
1087 }
1088
smp_flush_tlb_page(struct mm_struct * mm,unsigned long vaddr)1089 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1090 {
1091 unsigned long context = CTX_HWBITS(mm->context);
1092
1093 get_cpu();
1094
1095 smp_cross_call_masked(&xcall_flush_tlb_page,
1096 context, vaddr, 0,
1097 mm_cpumask(mm));
1098
1099 __flush_tlb_page(context, vaddr);
1100
1101 put_cpu();
1102 }
1103
smp_flush_tlb_kernel_range(unsigned long start,unsigned long end)1104 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1105 {
1106 start &= PAGE_MASK;
1107 end = PAGE_ALIGN(end);
1108 if (start != end) {
1109 smp_cross_call(&xcall_flush_tlb_kernel_range,
1110 0, start, end);
1111
1112 __flush_tlb_kernel_range(start, end);
1113 }
1114 }
1115
1116 /* CPU capture. */
1117 /* #define CAPTURE_DEBUG */
1118 extern unsigned long xcall_capture;
1119
1120 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1121 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1122 static unsigned long penguins_are_doing_time;
1123
smp_capture(void)1124 void smp_capture(void)
1125 {
1126 int result = atomic_add_return(1, &smp_capture_depth);
1127
1128 if (result == 1) {
1129 int ncpus = num_online_cpus();
1130
1131 #ifdef CAPTURE_DEBUG
1132 printk("CPU[%d]: Sending penguins to jail...",
1133 smp_processor_id());
1134 #endif
1135 penguins_are_doing_time = 1;
1136 atomic_inc(&smp_capture_registry);
1137 smp_cross_call(&xcall_capture, 0, 0, 0);
1138 while (atomic_read(&smp_capture_registry) != ncpus)
1139 rmb();
1140 #ifdef CAPTURE_DEBUG
1141 printk("done\n");
1142 #endif
1143 }
1144 }
1145
smp_release(void)1146 void smp_release(void)
1147 {
1148 if (atomic_dec_and_test(&smp_capture_depth)) {
1149 #ifdef CAPTURE_DEBUG
1150 printk("CPU[%d]: Giving pardon to "
1151 "imprisoned penguins\n",
1152 smp_processor_id());
1153 #endif
1154 penguins_are_doing_time = 0;
1155 membar_safe("#StoreLoad");
1156 atomic_dec(&smp_capture_registry);
1157 }
1158 }
1159
1160 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1161 * set, so they can service tlb flush xcalls...
1162 */
1163 extern void prom_world(int);
1164
smp_penguin_jailcell(int irq,struct pt_regs * regs)1165 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1166 {
1167 clear_softint(1 << irq);
1168
1169 preempt_disable();
1170
1171 __asm__ __volatile__("flushw");
1172 prom_world(1);
1173 atomic_inc(&smp_capture_registry);
1174 membar_safe("#StoreLoad");
1175 while (penguins_are_doing_time)
1176 rmb();
1177 atomic_dec(&smp_capture_registry);
1178 prom_world(0);
1179
1180 preempt_enable();
1181 }
1182
1183 /* /proc/profile writes can call this, don't __init it please. */
setup_profiling_timer(unsigned int multiplier)1184 int setup_profiling_timer(unsigned int multiplier)
1185 {
1186 return -EINVAL;
1187 }
1188
smp_prepare_cpus(unsigned int max_cpus)1189 void __init smp_prepare_cpus(unsigned int max_cpus)
1190 {
1191 }
1192
smp_prepare_boot_cpu(void)1193 void smp_prepare_boot_cpu(void)
1194 {
1195 }
1196
smp_setup_processor_id(void)1197 void __init smp_setup_processor_id(void)
1198 {
1199 if (tlb_type == spitfire)
1200 xcall_deliver_impl = spitfire_xcall_deliver;
1201 else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1202 xcall_deliver_impl = cheetah_xcall_deliver;
1203 else
1204 xcall_deliver_impl = hypervisor_xcall_deliver;
1205 }
1206
smp_fill_in_sib_core_maps(void)1207 void smp_fill_in_sib_core_maps(void)
1208 {
1209 unsigned int i;
1210
1211 for_each_present_cpu(i) {
1212 unsigned int j;
1213
1214 cpumask_clear(&cpu_core_map[i]);
1215 if (cpu_data(i).core_id == 0) {
1216 cpumask_set_cpu(i, &cpu_core_map[i]);
1217 continue;
1218 }
1219
1220 for_each_present_cpu(j) {
1221 if (cpu_data(i).core_id ==
1222 cpu_data(j).core_id)
1223 cpumask_set_cpu(j, &cpu_core_map[i]);
1224 }
1225 }
1226
1227 for_each_present_cpu(i) {
1228 unsigned int j;
1229
1230 for_each_present_cpu(j) {
1231 if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1232 cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1233 }
1234 }
1235
1236 for_each_present_cpu(i) {
1237 unsigned int j;
1238
1239 cpumask_clear(&per_cpu(cpu_sibling_map, i));
1240 if (cpu_data(i).proc_id == -1) {
1241 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1242 continue;
1243 }
1244
1245 for_each_present_cpu(j) {
1246 if (cpu_data(i).proc_id ==
1247 cpu_data(j).proc_id)
1248 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1249 }
1250 }
1251 }
1252
__cpu_up(unsigned int cpu,struct task_struct * tidle)1253 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1254 {
1255 int ret = smp_boot_one_cpu(cpu, tidle);
1256
1257 if (!ret) {
1258 cpumask_set_cpu(cpu, &smp_commenced_mask);
1259 while (!cpu_online(cpu))
1260 mb();
1261 if (!cpu_online(cpu)) {
1262 ret = -ENODEV;
1263 } else {
1264 /* On SUN4V, writes to %tick and %stick are
1265 * not allowed.
1266 */
1267 if (tlb_type != hypervisor)
1268 smp_synchronize_one_tick(cpu);
1269 }
1270 }
1271 return ret;
1272 }
1273
1274 #ifdef CONFIG_HOTPLUG_CPU
cpu_play_dead(void)1275 void cpu_play_dead(void)
1276 {
1277 int cpu = smp_processor_id();
1278 unsigned long pstate;
1279
1280 idle_task_exit();
1281
1282 if (tlb_type == hypervisor) {
1283 struct trap_per_cpu *tb = &trap_block[cpu];
1284
1285 sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1286 tb->cpu_mondo_pa, 0);
1287 sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1288 tb->dev_mondo_pa, 0);
1289 sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1290 tb->resum_mondo_pa, 0);
1291 sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1292 tb->nonresum_mondo_pa, 0);
1293 }
1294
1295 cpumask_clear_cpu(cpu, &smp_commenced_mask);
1296 membar_safe("#Sync");
1297
1298 local_irq_disable();
1299
1300 __asm__ __volatile__(
1301 "rdpr %%pstate, %0\n\t"
1302 "wrpr %0, %1, %%pstate"
1303 : "=r" (pstate)
1304 : "i" (PSTATE_IE));
1305
1306 while (1)
1307 barrier();
1308 }
1309
__cpu_disable(void)1310 int __cpu_disable(void)
1311 {
1312 int cpu = smp_processor_id();
1313 cpuinfo_sparc *c;
1314 int i;
1315
1316 for_each_cpu(i, &cpu_core_map[cpu])
1317 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1318 cpumask_clear(&cpu_core_map[cpu]);
1319
1320 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1321 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1322 cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1323
1324 c = &cpu_data(cpu);
1325
1326 c->core_id = 0;
1327 c->proc_id = -1;
1328
1329 smp_wmb();
1330
1331 /* Make sure no interrupts point to this cpu. */
1332 fixup_irqs();
1333
1334 local_irq_enable();
1335 mdelay(1);
1336 local_irq_disable();
1337
1338 set_cpu_online(cpu, false);
1339
1340 cpu_map_rebuild();
1341
1342 return 0;
1343 }
1344
__cpu_die(unsigned int cpu)1345 void __cpu_die(unsigned int cpu)
1346 {
1347 int i;
1348
1349 for (i = 0; i < 100; i++) {
1350 smp_rmb();
1351 if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1352 break;
1353 msleep(100);
1354 }
1355 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1356 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1357 } else {
1358 #if defined(CONFIG_SUN_LDOMS)
1359 unsigned long hv_err;
1360 int limit = 100;
1361
1362 do {
1363 hv_err = sun4v_cpu_stop(cpu);
1364 if (hv_err == HV_EOK) {
1365 set_cpu_present(cpu, false);
1366 break;
1367 }
1368 } while (--limit > 0);
1369 if (limit <= 0) {
1370 printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1371 hv_err);
1372 }
1373 #endif
1374 }
1375 }
1376 #endif
1377
smp_cpus_done(unsigned int max_cpus)1378 void __init smp_cpus_done(unsigned int max_cpus)
1379 {
1380 }
1381
smp_send_reschedule(int cpu)1382 void smp_send_reschedule(int cpu)
1383 {
1384 if (cpu == smp_processor_id()) {
1385 WARN_ON_ONCE(preemptible());
1386 set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1387 } else {
1388 xcall_deliver((u64) &xcall_receive_signal,
1389 0, 0, cpumask_of(cpu));
1390 }
1391 }
1392
smp_receive_signal_client(int irq,struct pt_regs * regs)1393 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1394 {
1395 clear_softint(1 << irq);
1396 scheduler_ipi();
1397 }
1398
stop_this_cpu(void * dummy)1399 static void stop_this_cpu(void *dummy)
1400 {
1401 prom_stopself();
1402 }
1403
smp_send_stop(void)1404 void smp_send_stop(void)
1405 {
1406 int cpu;
1407
1408 if (tlb_type == hypervisor) {
1409 int this_cpu = smp_processor_id();
1410 #ifdef CONFIG_SERIAL_SUNHV
1411 sunhv_migrate_hvcons_irq(this_cpu);
1412 #endif
1413 for_each_online_cpu(cpu) {
1414 if (cpu == this_cpu)
1415 continue;
1416 #ifdef CONFIG_SUN_LDOMS
1417 if (ldom_domaining_enabled) {
1418 unsigned long hv_err;
1419 hv_err = sun4v_cpu_stop(cpu);
1420 if (hv_err)
1421 printk(KERN_ERR "sun4v_cpu_stop() "
1422 "failed err=%lu\n", hv_err);
1423 } else
1424 #endif
1425 prom_stopcpu_cpuid(cpu);
1426 }
1427 } else
1428 smp_call_function(stop_this_cpu, NULL, 0);
1429 }
1430
1431 /**
1432 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1433 * @cpu: cpu to allocate for
1434 * @size: size allocation in bytes
1435 * @align: alignment
1436 *
1437 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
1438 * does the right thing for NUMA regardless of the current
1439 * configuration.
1440 *
1441 * RETURNS:
1442 * Pointer to the allocated area on success, NULL on failure.
1443 */
pcpu_alloc_bootmem(unsigned int cpu,size_t size,size_t align)1444 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1445 size_t align)
1446 {
1447 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1448 #ifdef CONFIG_NEED_MULTIPLE_NODES
1449 int node = cpu_to_node(cpu);
1450 void *ptr;
1451
1452 if (!node_online(node) || !NODE_DATA(node)) {
1453 ptr = __alloc_bootmem(size, align, goal);
1454 pr_info("cpu %d has no node %d or node-local memory\n",
1455 cpu, node);
1456 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1457 cpu, size, __pa(ptr));
1458 } else {
1459 ptr = __alloc_bootmem_node(NODE_DATA(node),
1460 size, align, goal);
1461 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1462 "%016lx\n", cpu, size, node, __pa(ptr));
1463 }
1464 return ptr;
1465 #else
1466 return __alloc_bootmem(size, align, goal);
1467 #endif
1468 }
1469
pcpu_free_bootmem(void * ptr,size_t size)1470 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1471 {
1472 free_bootmem(__pa(ptr), size);
1473 }
1474
pcpu_cpu_distance(unsigned int from,unsigned int to)1475 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1476 {
1477 if (cpu_to_node(from) == cpu_to_node(to))
1478 return LOCAL_DISTANCE;
1479 else
1480 return REMOTE_DISTANCE;
1481 }
1482
pcpu_populate_pte(unsigned long addr)1483 static void __init pcpu_populate_pte(unsigned long addr)
1484 {
1485 pgd_t *pgd = pgd_offset_k(addr);
1486 pud_t *pud;
1487 pmd_t *pmd;
1488
1489 if (pgd_none(*pgd)) {
1490 pud_t *new;
1491
1492 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1493 pgd_populate(&init_mm, pgd, new);
1494 }
1495
1496 pud = pud_offset(pgd, addr);
1497 if (pud_none(*pud)) {
1498 pmd_t *new;
1499
1500 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1501 pud_populate(&init_mm, pud, new);
1502 }
1503
1504 pmd = pmd_offset(pud, addr);
1505 if (!pmd_present(*pmd)) {
1506 pte_t *new;
1507
1508 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1509 pmd_populate_kernel(&init_mm, pmd, new);
1510 }
1511 }
1512
setup_per_cpu_areas(void)1513 void __init setup_per_cpu_areas(void)
1514 {
1515 unsigned long delta;
1516 unsigned int cpu;
1517 int rc = -EINVAL;
1518
1519 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1520 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1521 PERCPU_DYNAMIC_RESERVE, 4 << 20,
1522 pcpu_cpu_distance,
1523 pcpu_alloc_bootmem,
1524 pcpu_free_bootmem);
1525 if (rc)
1526 pr_warning("PERCPU: %s allocator failed (%d), "
1527 "falling back to page size\n",
1528 pcpu_fc_names[pcpu_chosen_fc], rc);
1529 }
1530 if (rc < 0)
1531 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1532 pcpu_alloc_bootmem,
1533 pcpu_free_bootmem,
1534 pcpu_populate_pte);
1535 if (rc < 0)
1536 panic("cannot initialize percpu area (err=%d)", rc);
1537
1538 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1539 for_each_possible_cpu(cpu)
1540 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1541
1542 /* Setup %g5 for the boot cpu. */
1543 __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1544
1545 of_fill_in_cpu_data();
1546 if (tlb_type == hypervisor)
1547 mdesc_fill_in_cpu_data(cpu_all_mask);
1548 }
1549