• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* smp.c: Sparc64 SMP support.
2  *
3  * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
4  */
5 
6 #include <linux/export.h>
7 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/mm.h>
10 #include <linux/pagemap.h>
11 #include <linux/threads.h>
12 #include <linux/smp.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17 #include <linux/spinlock.h>
18 #include <linux/fs.h>
19 #include <linux/seq_file.h>
20 #include <linux/cache.h>
21 #include <linux/jiffies.h>
22 #include <linux/profile.h>
23 #include <linux/bootmem.h>
24 #include <linux/vmalloc.h>
25 #include <linux/ftrace.h>
26 #include <linux/cpu.h>
27 #include <linux/slab.h>
28 #include <linux/kgdb.h>
29 
30 #include <asm/head.h>
31 #include <asm/ptrace.h>
32 #include <linux/atomic.h>
33 #include <asm/tlbflush.h>
34 #include <asm/mmu_context.h>
35 #include <asm/cpudata.h>
36 #include <asm/hvtramp.h>
37 #include <asm/io.h>
38 #include <asm/timer.h>
39 #include <asm/setup.h>
40 
41 #include <asm/irq.h>
42 #include <asm/irq_regs.h>
43 #include <asm/page.h>
44 #include <asm/pgtable.h>
45 #include <asm/oplib.h>
46 #include <asm/uaccess.h>
47 #include <asm/starfire.h>
48 #include <asm/tlb.h>
49 #include <asm/sections.h>
50 #include <asm/prom.h>
51 #include <asm/mdesc.h>
52 #include <asm/ldc.h>
53 #include <asm/hypervisor.h>
54 #include <asm/pcr.h>
55 
56 #include "cpumap.h"
57 #include "kernel.h"
58 
59 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
60 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
61 	{ [0 ... NR_CPUS-1] = CPU_MASK_NONE };
62 
63 cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
64 	[0 ... NR_CPUS-1] = CPU_MASK_NONE };
65 
66 cpumask_t cpu_core_sib_cache_map[NR_CPUS] __read_mostly = {
67 	[0 ... NR_CPUS - 1] = CPU_MASK_NONE };
68 
69 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
70 EXPORT_SYMBOL(cpu_core_map);
71 EXPORT_SYMBOL(cpu_core_sib_map);
72 EXPORT_SYMBOL(cpu_core_sib_cache_map);
73 
74 static cpumask_t smp_commenced_mask;
75 
smp_info(struct seq_file * m)76 void smp_info(struct seq_file *m)
77 {
78 	int i;
79 
80 	seq_printf(m, "State:\n");
81 	for_each_online_cpu(i)
82 		seq_printf(m, "CPU%d:\t\tonline\n", i);
83 }
84 
smp_bogo(struct seq_file * m)85 void smp_bogo(struct seq_file *m)
86 {
87 	int i;
88 
89 	for_each_online_cpu(i)
90 		seq_printf(m,
91 			   "Cpu%dClkTck\t: %016lx\n",
92 			   i, cpu_data(i).clock_tick);
93 }
94 
95 extern void setup_sparc64_timer(void);
96 
97 static volatile unsigned long callin_flag = 0;
98 
smp_callin(void)99 void smp_callin(void)
100 {
101 	int cpuid = hard_smp_processor_id();
102 
103 	__local_per_cpu_offset = __per_cpu_offset(cpuid);
104 
105 	if (tlb_type == hypervisor)
106 		sun4v_ktsb_register();
107 
108 	__flush_tlb_all();
109 
110 	setup_sparc64_timer();
111 
112 	if (cheetah_pcache_forced_on)
113 		cheetah_enable_pcache();
114 
115 	callin_flag = 1;
116 	__asm__ __volatile__("membar #Sync\n\t"
117 			     "flush  %%g6" : : : "memory");
118 
119 	/* Clear this or we will die instantly when we
120 	 * schedule back to this idler...
121 	 */
122 	current_thread_info()->new_child = 0;
123 
124 	/* Attach to the address space of init_task. */
125 	atomic_inc(&init_mm.mm_count);
126 	current->active_mm = &init_mm;
127 
128 	/* inform the notifiers about the new cpu */
129 	notify_cpu_starting(cpuid);
130 
131 	while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
132 		rmb();
133 
134 	set_cpu_online(cpuid, true);
135 
136 	/* idle thread is expected to have preempt disabled */
137 	preempt_disable();
138 
139 	local_irq_enable();
140 
141 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
142 }
143 
cpu_panic(void)144 void cpu_panic(void)
145 {
146 	printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
147 	panic("SMP bolixed\n");
148 }
149 
150 /* This tick register synchronization scheme is taken entirely from
151  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
152  *
153  * The only change I've made is to rework it so that the master
154  * initiates the synchonization instead of the slave. -DaveM
155  */
156 
157 #define MASTER	0
158 #define SLAVE	(SMP_CACHE_BYTES/sizeof(unsigned long))
159 
160 #define NUM_ROUNDS	64	/* magic value */
161 #define NUM_ITERS	5	/* likewise */
162 
163 static DEFINE_RAW_SPINLOCK(itc_sync_lock);
164 static unsigned long go[SLAVE + 1];
165 
166 #define DEBUG_TICK_SYNC	0
167 
get_delta(long * rt,long * master)168 static inline long get_delta (long *rt, long *master)
169 {
170 	unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
171 	unsigned long tcenter, t0, t1, tm;
172 	unsigned long i;
173 
174 	for (i = 0; i < NUM_ITERS; i++) {
175 		t0 = tick_ops->get_tick();
176 		go[MASTER] = 1;
177 		membar_safe("#StoreLoad");
178 		while (!(tm = go[SLAVE]))
179 			rmb();
180 		go[SLAVE] = 0;
181 		wmb();
182 		t1 = tick_ops->get_tick();
183 
184 		if (t1 - t0 < best_t1 - best_t0)
185 			best_t0 = t0, best_t1 = t1, best_tm = tm;
186 	}
187 
188 	*rt = best_t1 - best_t0;
189 	*master = best_tm - best_t0;
190 
191 	/* average best_t0 and best_t1 without overflow: */
192 	tcenter = (best_t0/2 + best_t1/2);
193 	if (best_t0 % 2 + best_t1 % 2 == 2)
194 		tcenter++;
195 	return tcenter - best_tm;
196 }
197 
smp_synchronize_tick_client(void)198 void smp_synchronize_tick_client(void)
199 {
200 	long i, delta, adj, adjust_latency = 0, done = 0;
201 	unsigned long flags, rt, master_time_stamp;
202 #if DEBUG_TICK_SYNC
203 	struct {
204 		long rt;	/* roundtrip time */
205 		long master;	/* master's timestamp */
206 		long diff;	/* difference between midpoint and master's timestamp */
207 		long lat;	/* estimate of itc adjustment latency */
208 	} t[NUM_ROUNDS];
209 #endif
210 
211 	go[MASTER] = 1;
212 
213 	while (go[MASTER])
214 		rmb();
215 
216 	local_irq_save(flags);
217 	{
218 		for (i = 0; i < NUM_ROUNDS; i++) {
219 			delta = get_delta(&rt, &master_time_stamp);
220 			if (delta == 0)
221 				done = 1;	/* let's lock on to this... */
222 
223 			if (!done) {
224 				if (i > 0) {
225 					adjust_latency += -delta;
226 					adj = -delta + adjust_latency/4;
227 				} else
228 					adj = -delta;
229 
230 				tick_ops->add_tick(adj);
231 			}
232 #if DEBUG_TICK_SYNC
233 			t[i].rt = rt;
234 			t[i].master = master_time_stamp;
235 			t[i].diff = delta;
236 			t[i].lat = adjust_latency/4;
237 #endif
238 		}
239 	}
240 	local_irq_restore(flags);
241 
242 #if DEBUG_TICK_SYNC
243 	for (i = 0; i < NUM_ROUNDS; i++)
244 		printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
245 		       t[i].rt, t[i].master, t[i].diff, t[i].lat);
246 #endif
247 
248 	printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
249 	       "(last diff %ld cycles, maxerr %lu cycles)\n",
250 	       smp_processor_id(), delta, rt);
251 }
252 
253 static void smp_start_sync_tick_client(int cpu);
254 
smp_synchronize_one_tick(int cpu)255 static void smp_synchronize_one_tick(int cpu)
256 {
257 	unsigned long flags, i;
258 
259 	go[MASTER] = 0;
260 
261 	smp_start_sync_tick_client(cpu);
262 
263 	/* wait for client to be ready */
264 	while (!go[MASTER])
265 		rmb();
266 
267 	/* now let the client proceed into his loop */
268 	go[MASTER] = 0;
269 	membar_safe("#StoreLoad");
270 
271 	raw_spin_lock_irqsave(&itc_sync_lock, flags);
272 	{
273 		for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
274 			while (!go[MASTER])
275 				rmb();
276 			go[MASTER] = 0;
277 			wmb();
278 			go[SLAVE] = tick_ops->get_tick();
279 			membar_safe("#StoreLoad");
280 		}
281 	}
282 	raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
283 }
284 
285 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
ldom_startcpu_cpuid(unsigned int cpu,unsigned long thread_reg,void ** descrp)286 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
287 				void **descrp)
288 {
289 	extern unsigned long sparc64_ttable_tl0;
290 	extern unsigned long kern_locked_tte_data;
291 	struct hvtramp_descr *hdesc;
292 	unsigned long trampoline_ra;
293 	struct trap_per_cpu *tb;
294 	u64 tte_vaddr, tte_data;
295 	unsigned long hv_err;
296 	int i;
297 
298 	hdesc = kzalloc(sizeof(*hdesc) +
299 			(sizeof(struct hvtramp_mapping) *
300 			 num_kernel_image_mappings - 1),
301 			GFP_KERNEL);
302 	if (!hdesc) {
303 		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
304 		       "hvtramp_descr.\n");
305 		return;
306 	}
307 	*descrp = hdesc;
308 
309 	hdesc->cpu = cpu;
310 	hdesc->num_mappings = num_kernel_image_mappings;
311 
312 	tb = &trap_block[cpu];
313 
314 	hdesc->fault_info_va = (unsigned long) &tb->fault_info;
315 	hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
316 
317 	hdesc->thread_reg = thread_reg;
318 
319 	tte_vaddr = (unsigned long) KERNBASE;
320 	tte_data = kern_locked_tte_data;
321 
322 	for (i = 0; i < hdesc->num_mappings; i++) {
323 		hdesc->maps[i].vaddr = tte_vaddr;
324 		hdesc->maps[i].tte   = tte_data;
325 		tte_vaddr += 0x400000;
326 		tte_data  += 0x400000;
327 	}
328 
329 	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
330 
331 	hv_err = sun4v_cpu_start(cpu, trampoline_ra,
332 				 kimage_addr_to_ra(&sparc64_ttable_tl0),
333 				 __pa(hdesc));
334 	if (hv_err)
335 		printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
336 		       "gives error %lu\n", hv_err);
337 }
338 #endif
339 
340 extern unsigned long sparc64_cpu_startup;
341 
342 /* The OBP cpu startup callback truncates the 3rd arg cookie to
343  * 32-bits (I think) so to be safe we have it read the pointer
344  * contained here so we work on >4GB machines. -DaveM
345  */
346 static struct thread_info *cpu_new_thread = NULL;
347 
smp_boot_one_cpu(unsigned int cpu,struct task_struct * idle)348 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
349 {
350 	unsigned long entry =
351 		(unsigned long)(&sparc64_cpu_startup);
352 	unsigned long cookie =
353 		(unsigned long)(&cpu_new_thread);
354 	void *descr = NULL;
355 	int timeout, ret;
356 
357 	callin_flag = 0;
358 	cpu_new_thread = task_thread_info(idle);
359 
360 	if (tlb_type == hypervisor) {
361 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
362 		if (ldom_domaining_enabled)
363 			ldom_startcpu_cpuid(cpu,
364 					    (unsigned long) cpu_new_thread,
365 					    &descr);
366 		else
367 #endif
368 			prom_startcpu_cpuid(cpu, entry, cookie);
369 	} else {
370 		struct device_node *dp = of_find_node_by_cpuid(cpu);
371 
372 		prom_startcpu(dp->phandle, entry, cookie);
373 	}
374 
375 	for (timeout = 0; timeout < 50000; timeout++) {
376 		if (callin_flag)
377 			break;
378 		udelay(100);
379 	}
380 
381 	if (callin_flag) {
382 		ret = 0;
383 	} else {
384 		printk("Processor %d is stuck.\n", cpu);
385 		ret = -ENODEV;
386 	}
387 	cpu_new_thread = NULL;
388 
389 	kfree(descr);
390 
391 	return ret;
392 }
393 
spitfire_xcall_helper(u64 data0,u64 data1,u64 data2,u64 pstate,unsigned long cpu)394 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
395 {
396 	u64 result, target;
397 	int stuck, tmp;
398 
399 	if (this_is_starfire) {
400 		/* map to real upaid */
401 		cpu = (((cpu & 0x3c) << 1) |
402 			((cpu & 0x40) >> 4) |
403 			(cpu & 0x3));
404 	}
405 
406 	target = (cpu << 14) | 0x70;
407 again:
408 	/* Ok, this is the real Spitfire Errata #54.
409 	 * One must read back from a UDB internal register
410 	 * after writes to the UDB interrupt dispatch, but
411 	 * before the membar Sync for that write.
412 	 * So we use the high UDB control register (ASI 0x7f,
413 	 * ADDR 0x20) for the dummy read. -DaveM
414 	 */
415 	tmp = 0x40;
416 	__asm__ __volatile__(
417 	"wrpr	%1, %2, %%pstate\n\t"
418 	"stxa	%4, [%0] %3\n\t"
419 	"stxa	%5, [%0+%8] %3\n\t"
420 	"add	%0, %8, %0\n\t"
421 	"stxa	%6, [%0+%8] %3\n\t"
422 	"membar	#Sync\n\t"
423 	"stxa	%%g0, [%7] %3\n\t"
424 	"membar	#Sync\n\t"
425 	"mov	0x20, %%g1\n\t"
426 	"ldxa	[%%g1] 0x7f, %%g0\n\t"
427 	"membar	#Sync"
428 	: "=r" (tmp)
429 	: "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
430 	  "r" (data0), "r" (data1), "r" (data2), "r" (target),
431 	  "r" (0x10), "0" (tmp)
432         : "g1");
433 
434 	/* NOTE: PSTATE_IE is still clear. */
435 	stuck = 100000;
436 	do {
437 		__asm__ __volatile__("ldxa [%%g0] %1, %0"
438 			: "=r" (result)
439 			: "i" (ASI_INTR_DISPATCH_STAT));
440 		if (result == 0) {
441 			__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
442 					     : : "r" (pstate));
443 			return;
444 		}
445 		stuck -= 1;
446 		if (stuck == 0)
447 			break;
448 	} while (result & 0x1);
449 	__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
450 			     : : "r" (pstate));
451 	if (stuck == 0) {
452 		printk("CPU[%d]: mondo stuckage result[%016llx]\n",
453 		       smp_processor_id(), result);
454 	} else {
455 		udelay(2);
456 		goto again;
457 	}
458 }
459 
spitfire_xcall_deliver(struct trap_per_cpu * tb,int cnt)460 static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
461 {
462 	u64 *mondo, data0, data1, data2;
463 	u16 *cpu_list;
464 	u64 pstate;
465 	int i;
466 
467 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 	cpu_list = __va(tb->cpu_list_pa);
469 	mondo = __va(tb->cpu_mondo_block_pa);
470 	data0 = mondo[0];
471 	data1 = mondo[1];
472 	data2 = mondo[2];
473 	for (i = 0; i < cnt; i++)
474 		spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
475 }
476 
477 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
478  * packet, but we have no use for that.  However we do take advantage of
479  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
480  */
cheetah_xcall_deliver(struct trap_per_cpu * tb,int cnt)481 static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
482 {
483 	int nack_busy_id, is_jbus, need_more;
484 	u64 *mondo, pstate, ver, busy_mask;
485 	u16 *cpu_list;
486 
487 	cpu_list = __va(tb->cpu_list_pa);
488 	mondo = __va(tb->cpu_mondo_block_pa);
489 
490 	/* Unfortunately, someone at Sun had the brilliant idea to make the
491 	 * busy/nack fields hard-coded by ITID number for this Ultra-III
492 	 * derivative processor.
493 	 */
494 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
495 	is_jbus = ((ver >> 32) == __JALAPENO_ID ||
496 		   (ver >> 32) == __SERRANO_ID);
497 
498 	__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
499 
500 retry:
501 	need_more = 0;
502 	__asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
503 			     : : "r" (pstate), "i" (PSTATE_IE));
504 
505 	/* Setup the dispatch data registers. */
506 	__asm__ __volatile__("stxa	%0, [%3] %6\n\t"
507 			     "stxa	%1, [%4] %6\n\t"
508 			     "stxa	%2, [%5] %6\n\t"
509 			     "membar	#Sync\n\t"
510 			     : /* no outputs */
511 			     : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
512 			       "r" (0x40), "r" (0x50), "r" (0x60),
513 			       "i" (ASI_INTR_W));
514 
515 	nack_busy_id = 0;
516 	busy_mask = 0;
517 	{
518 		int i;
519 
520 		for (i = 0; i < cnt; i++) {
521 			u64 target, nr;
522 
523 			nr = cpu_list[i];
524 			if (nr == 0xffff)
525 				continue;
526 
527 			target = (nr << 14) | 0x70;
528 			if (is_jbus) {
529 				busy_mask |= (0x1UL << (nr * 2));
530 			} else {
531 				target |= (nack_busy_id << 24);
532 				busy_mask |= (0x1UL <<
533 					      (nack_busy_id * 2));
534 			}
535 			__asm__ __volatile__(
536 				"stxa	%%g0, [%0] %1\n\t"
537 				"membar	#Sync\n\t"
538 				: /* no outputs */
539 				: "r" (target), "i" (ASI_INTR_W));
540 			nack_busy_id++;
541 			if (nack_busy_id == 32) {
542 				need_more = 1;
543 				break;
544 			}
545 		}
546 	}
547 
548 	/* Now, poll for completion. */
549 	{
550 		u64 dispatch_stat, nack_mask;
551 		long stuck;
552 
553 		stuck = 100000 * nack_busy_id;
554 		nack_mask = busy_mask << 1;
555 		do {
556 			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
557 					     : "=r" (dispatch_stat)
558 					     : "i" (ASI_INTR_DISPATCH_STAT));
559 			if (!(dispatch_stat & (busy_mask | nack_mask))) {
560 				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
561 						     : : "r" (pstate));
562 				if (unlikely(need_more)) {
563 					int i, this_cnt = 0;
564 					for (i = 0; i < cnt; i++) {
565 						if (cpu_list[i] == 0xffff)
566 							continue;
567 						cpu_list[i] = 0xffff;
568 						this_cnt++;
569 						if (this_cnt == 32)
570 							break;
571 					}
572 					goto retry;
573 				}
574 				return;
575 			}
576 			if (!--stuck)
577 				break;
578 		} while (dispatch_stat & busy_mask);
579 
580 		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
581 				     : : "r" (pstate));
582 
583 		if (dispatch_stat & busy_mask) {
584 			/* Busy bits will not clear, continue instead
585 			 * of freezing up on this cpu.
586 			 */
587 			printk("CPU[%d]: mondo stuckage result[%016llx]\n",
588 			       smp_processor_id(), dispatch_stat);
589 		} else {
590 			int i, this_busy_nack = 0;
591 
592 			/* Delay some random time with interrupts enabled
593 			 * to prevent deadlock.
594 			 */
595 			udelay(2 * nack_busy_id);
596 
597 			/* Clear out the mask bits for cpus which did not
598 			 * NACK us.
599 			 */
600 			for (i = 0; i < cnt; i++) {
601 				u64 check_mask, nr;
602 
603 				nr = cpu_list[i];
604 				if (nr == 0xffff)
605 					continue;
606 
607 				if (is_jbus)
608 					check_mask = (0x2UL << (2*nr));
609 				else
610 					check_mask = (0x2UL <<
611 						      this_busy_nack);
612 				if ((dispatch_stat & check_mask) == 0)
613 					cpu_list[i] = 0xffff;
614 				this_busy_nack += 2;
615 				if (this_busy_nack == 64)
616 					break;
617 			}
618 
619 			goto retry;
620 		}
621 	}
622 }
623 
624 #define	CPU_MONDO_COUNTER(cpuid)	(cpu_mondo_counter[cpuid])
625 #define	MONDO_USEC_WAIT_MIN		2
626 #define	MONDO_USEC_WAIT_MAX		100
627 #define	MONDO_RETRY_LIMIT		500000
628 
629 /* Multi-cpu list version.
630  *
631  * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
632  * Sometimes not all cpus receive the mondo, requiring us to re-send
633  * the mondo until all cpus have received, or cpus are truly stuck
634  * unable to receive mondo, and we timeout.
635  * Occasionally a target cpu strand is borrowed briefly by hypervisor to
636  * perform guest service, such as PCIe error handling. Consider the
637  * service time, 1 second overall wait is reasonable for 1 cpu.
638  * Here two in-between mondo check wait time are defined: 2 usec for
639  * single cpu quick turn around and up to 100usec for large cpu count.
640  * Deliver mondo to large number of cpus could take longer, we adjusts
641  * the retry count as long as target cpus are making forward progress.
642  */
hypervisor_xcall_deliver(struct trap_per_cpu * tb,int cnt)643 static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
644 {
645 	int this_cpu, tot_cpus, prev_sent, i, rem;
646 	int usec_wait, retries, tot_retries;
647 	u16 first_cpu = 0xffff;
648 	unsigned long xc_rcvd = 0;
649 	unsigned long status;
650 	int ecpuerror_id = 0;
651 	int enocpu_id = 0;
652 	u16 *cpu_list;
653 	u16 cpu;
654 
655 	this_cpu = smp_processor_id();
656 	cpu_list = __va(tb->cpu_list_pa);
657 	usec_wait = cnt * MONDO_USEC_WAIT_MIN;
658 	if (usec_wait > MONDO_USEC_WAIT_MAX)
659 		usec_wait = MONDO_USEC_WAIT_MAX;
660 	retries = tot_retries = 0;
661 	tot_cpus = cnt;
662 	prev_sent = 0;
663 
664 	do {
665 		int n_sent, mondo_delivered, target_cpu_busy;
666 
667 		status = sun4v_cpu_mondo_send(cnt,
668 					      tb->cpu_list_pa,
669 					      tb->cpu_mondo_block_pa);
670 
671 		/* HV_EOK means all cpus received the xcall, we're done.  */
672 		if (likely(status == HV_EOK))
673 			goto xcall_done;
674 
675 		/* If not these non-fatal errors, panic */
676 		if (unlikely((status != HV_EWOULDBLOCK) &&
677 			(status != HV_ECPUERROR) &&
678 			(status != HV_ENOCPU)))
679 			goto fatal_errors;
680 
681 		/* First, see if we made any forward progress.
682 		 *
683 		 * Go through the cpu_list, count the target cpus that have
684 		 * received our mondo (n_sent), and those that did not (rem).
685 		 * Re-pack cpu_list with the cpus remain to be retried in the
686 		 * front - this simplifies tracking the truly stalled cpus.
687 		 *
688 		 * The hypervisor indicates successful sends by setting
689 		 * cpu list entries to the value 0xffff.
690 		 *
691 		 * EWOULDBLOCK means some target cpus did not receive the
692 		 * mondo and retry usually helps.
693 		 *
694 		 * ECPUERROR means at least one target cpu is in error state,
695 		 * it's usually safe to skip the faulty cpu and retry.
696 		 *
697 		 * ENOCPU means one of the target cpu doesn't belong to the
698 		 * domain, perhaps offlined which is unexpected, but not
699 		 * fatal and it's okay to skip the offlined cpu.
700 		 */
701 		rem = 0;
702 		n_sent = 0;
703 		for (i = 0; i < cnt; i++) {
704 			cpu = cpu_list[i];
705 			if (likely(cpu == 0xffff)) {
706 				n_sent++;
707 			} else if ((status == HV_ECPUERROR) &&
708 				(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
709 				ecpuerror_id = cpu + 1;
710 			} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
711 				enocpu_id = cpu + 1;
712 			} else {
713 				cpu_list[rem++] = cpu;
714 			}
715 		}
716 
717 		/* No cpu remained, we're done. */
718 		if (rem == 0)
719 			break;
720 
721 		/* Otherwise, update the cpu count for retry. */
722 		cnt = rem;
723 
724 		/* Record the overall number of mondos received by the
725 		 * first of the remaining cpus.
726 		 */
727 		if (first_cpu != cpu_list[0]) {
728 			first_cpu = cpu_list[0];
729 			xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
730 		}
731 
732 		/* Was any mondo delivered successfully? */
733 		mondo_delivered = (n_sent > prev_sent);
734 		prev_sent = n_sent;
735 
736 		/* or, was any target cpu busy processing other mondos? */
737 		target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
738 		xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
739 
740 		/* Retry count is for no progress. If we're making progress,
741 		 * reset the retry count.
742 		 */
743 		if (likely(mondo_delivered || target_cpu_busy)) {
744 			tot_retries += retries;
745 			retries = 0;
746 		} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
747 			goto fatal_mondo_timeout;
748 		}
749 
750 		/* Delay a little bit to let other cpus catch up on
751 		 * their cpu mondo queue work.
752 		 */
753 		if (!mondo_delivered)
754 			udelay(usec_wait);
755 
756 		retries++;
757 	} while (1);
758 
759 xcall_done:
760 	if (unlikely(ecpuerror_id > 0)) {
761 		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
762 		       this_cpu, ecpuerror_id - 1);
763 	} else if (unlikely(enocpu_id > 0)) {
764 		pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
765 		       this_cpu, enocpu_id - 1);
766 	}
767 	return;
768 
769 fatal_errors:
770 	/* fatal errors include bad alignment, etc */
771 	pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
772 	       this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
773 	panic("Unexpected SUN4V mondo error %lu\n", status);
774 
775 fatal_mondo_timeout:
776 	/* some cpus being non-responsive to the cpu mondo */
777 	pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
778 	       this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
779 	panic("SUN4V mondo timeout panic\n");
780 }
781 
782 static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
783 
xcall_deliver(u64 data0,u64 data1,u64 data2,const cpumask_t * mask)784 static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
785 {
786 	struct trap_per_cpu *tb;
787 	int this_cpu, i, cnt;
788 	unsigned long flags;
789 	u16 *cpu_list;
790 	u64 *mondo;
791 
792 	/* We have to do this whole thing with interrupts fully disabled.
793 	 * Otherwise if we send an xcall from interrupt context it will
794 	 * corrupt both our mondo block and cpu list state.
795 	 *
796 	 * One consequence of this is that we cannot use timeout mechanisms
797 	 * that depend upon interrupts being delivered locally.  So, for
798 	 * example, we cannot sample jiffies and expect it to advance.
799 	 *
800 	 * Fortunately, udelay() uses %stick/%tick so we can use that.
801 	 */
802 	local_irq_save(flags);
803 
804 	this_cpu = smp_processor_id();
805 	tb = &trap_block[this_cpu];
806 
807 	mondo = __va(tb->cpu_mondo_block_pa);
808 	mondo[0] = data0;
809 	mondo[1] = data1;
810 	mondo[2] = data2;
811 	wmb();
812 
813 	cpu_list = __va(tb->cpu_list_pa);
814 
815 	/* Setup the initial cpu list.  */
816 	cnt = 0;
817 	for_each_cpu(i, mask) {
818 		if (i == this_cpu || !cpu_online(i))
819 			continue;
820 		cpu_list[cnt++] = i;
821 	}
822 
823 	if (cnt)
824 		xcall_deliver_impl(tb, cnt);
825 
826 	local_irq_restore(flags);
827 }
828 
829 /* Send cross call to all processors mentioned in MASK_P
830  * except self.  Really, there are only two cases currently,
831  * "cpu_online_mask" and "mm_cpumask(mm)".
832  */
smp_cross_call_masked(unsigned long * func,u32 ctx,u64 data1,u64 data2,const cpumask_t * mask)833 static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
834 {
835 	u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
836 
837 	xcall_deliver(data0, data1, data2, mask);
838 }
839 
840 /* Send cross call to all processors except self. */
smp_cross_call(unsigned long * func,u32 ctx,u64 data1,u64 data2)841 static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
842 {
843 	smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask);
844 }
845 
846 extern unsigned long xcall_sync_tick;
847 
smp_start_sync_tick_client(int cpu)848 static void smp_start_sync_tick_client(int cpu)
849 {
850 	xcall_deliver((u64) &xcall_sync_tick, 0, 0,
851 		      cpumask_of(cpu));
852 }
853 
854 extern unsigned long xcall_call_function;
855 
arch_send_call_function_ipi_mask(const struct cpumask * mask)856 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
857 {
858 	xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
859 }
860 
861 extern unsigned long xcall_call_function_single;
862 
arch_send_call_function_single_ipi(int cpu)863 void arch_send_call_function_single_ipi(int cpu)
864 {
865 	xcall_deliver((u64) &xcall_call_function_single, 0, 0,
866 		      cpumask_of(cpu));
867 }
868 
smp_call_function_client(int irq,struct pt_regs * regs)869 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
870 {
871 	clear_softint(1 << irq);
872 	irq_enter();
873 	generic_smp_call_function_interrupt();
874 	irq_exit();
875 }
876 
smp_call_function_single_client(int irq,struct pt_regs * regs)877 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
878 {
879 	clear_softint(1 << irq);
880 	irq_enter();
881 	generic_smp_call_function_single_interrupt();
882 	irq_exit();
883 }
884 
tsb_sync(void * info)885 static void tsb_sync(void *info)
886 {
887 	struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
888 	struct mm_struct *mm = info;
889 
890 	/* It is not valid to test "current->active_mm == mm" here.
891 	 *
892 	 * The value of "current" is not changed atomically with
893 	 * switch_mm().  But that's OK, we just need to check the
894 	 * current cpu's trap block PGD physical address.
895 	 */
896 	if (tp->pgd_paddr == __pa(mm->pgd))
897 		tsb_context_switch(mm);
898 }
899 
smp_tsb_sync(struct mm_struct * mm)900 void smp_tsb_sync(struct mm_struct *mm)
901 {
902 	smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
903 }
904 
905 extern unsigned long xcall_flush_tlb_mm;
906 extern unsigned long xcall_flush_tlb_page;
907 extern unsigned long xcall_flush_tlb_kernel_range;
908 extern unsigned long xcall_fetch_glob_regs;
909 extern unsigned long xcall_fetch_glob_pmu;
910 extern unsigned long xcall_fetch_glob_pmu_n4;
911 extern unsigned long xcall_receive_signal;
912 extern unsigned long xcall_new_mmu_context_version;
913 #ifdef CONFIG_KGDB
914 extern unsigned long xcall_kgdb_capture;
915 #endif
916 
917 #ifdef DCACHE_ALIASING_POSSIBLE
918 extern unsigned long xcall_flush_dcache_page_cheetah;
919 #endif
920 extern unsigned long xcall_flush_dcache_page_spitfire;
921 
__local_flush_dcache_page(struct page * page)922 static inline void __local_flush_dcache_page(struct page *page)
923 {
924 #ifdef DCACHE_ALIASING_POSSIBLE
925 	__flush_dcache_page(page_address(page),
926 			    ((tlb_type == spitfire) &&
927 			     page_mapping(page) != NULL));
928 #else
929 	if (page_mapping(page) != NULL &&
930 	    tlb_type == spitfire)
931 		__flush_icache_page(__pa(page_address(page)));
932 #endif
933 }
934 
smp_flush_dcache_page_impl(struct page * page,int cpu)935 void smp_flush_dcache_page_impl(struct page *page, int cpu)
936 {
937 	int this_cpu;
938 
939 	if (tlb_type == hypervisor)
940 		return;
941 
942 #ifdef CONFIG_DEBUG_DCFLUSH
943 	atomic_inc(&dcpage_flushes);
944 #endif
945 
946 	this_cpu = get_cpu();
947 
948 	if (cpu == this_cpu) {
949 		__local_flush_dcache_page(page);
950 	} else if (cpu_online(cpu)) {
951 		void *pg_addr = page_address(page);
952 		u64 data0 = 0;
953 
954 		if (tlb_type == spitfire) {
955 			data0 = ((u64)&xcall_flush_dcache_page_spitfire);
956 			if (page_mapping(page) != NULL)
957 				data0 |= ((u64)1 << 32);
958 		} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
959 #ifdef DCACHE_ALIASING_POSSIBLE
960 			data0 =	((u64)&xcall_flush_dcache_page_cheetah);
961 #endif
962 		}
963 		if (data0) {
964 			xcall_deliver(data0, __pa(pg_addr),
965 				      (u64) pg_addr, cpumask_of(cpu));
966 #ifdef CONFIG_DEBUG_DCFLUSH
967 			atomic_inc(&dcpage_flushes_xcall);
968 #endif
969 		}
970 	}
971 
972 	put_cpu();
973 }
974 
flush_dcache_page_all(struct mm_struct * mm,struct page * page)975 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
976 {
977 	void *pg_addr;
978 	u64 data0;
979 
980 	if (tlb_type == hypervisor)
981 		return;
982 
983 	preempt_disable();
984 
985 #ifdef CONFIG_DEBUG_DCFLUSH
986 	atomic_inc(&dcpage_flushes);
987 #endif
988 	data0 = 0;
989 	pg_addr = page_address(page);
990 	if (tlb_type == spitfire) {
991 		data0 = ((u64)&xcall_flush_dcache_page_spitfire);
992 		if (page_mapping(page) != NULL)
993 			data0 |= ((u64)1 << 32);
994 	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
995 #ifdef DCACHE_ALIASING_POSSIBLE
996 		data0 = ((u64)&xcall_flush_dcache_page_cheetah);
997 #endif
998 	}
999 	if (data0) {
1000 		xcall_deliver(data0, __pa(pg_addr),
1001 			      (u64) pg_addr, cpu_online_mask);
1002 #ifdef CONFIG_DEBUG_DCFLUSH
1003 		atomic_inc(&dcpage_flushes_xcall);
1004 #endif
1005 	}
1006 	__local_flush_dcache_page(page);
1007 
1008 	preempt_enable();
1009 }
1010 
1011 #ifdef CONFIG_KGDB
kgdb_roundup_cpus(unsigned long flags)1012 void kgdb_roundup_cpus(unsigned long flags)
1013 {
1014 	smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
1015 }
1016 #endif
1017 
smp_fetch_global_regs(void)1018 void smp_fetch_global_regs(void)
1019 {
1020 	smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
1021 }
1022 
smp_fetch_global_pmu(void)1023 void smp_fetch_global_pmu(void)
1024 {
1025 	if (tlb_type == hypervisor &&
1026 	    sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
1027 		smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0);
1028 	else
1029 		smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0);
1030 }
1031 
1032 /* We know that the window frames of the user have been flushed
1033  * to the stack before we get here because all callers of us
1034  * are flush_tlb_*() routines, and these run after flush_cache_*()
1035  * which performs the flushw.
1036  *
1037  * The SMP TLB coherency scheme we use works as follows:
1038  *
1039  * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
1040  *    space has (potentially) executed on, this is the heuristic
1041  *    we use to avoid doing cross calls.
1042  *
1043  *    Also, for flushing from kswapd and also for clones, we
1044  *    use cpu_vm_mask as the list of cpus to make run the TLB.
1045  *
1046  * 2) TLB context numbers are shared globally across all processors
1047  *    in the system, this allows us to play several games to avoid
1048  *    cross calls.
1049  *
1050  *    One invariant is that when a cpu switches to a process, and
1051  *    that processes tsk->active_mm->cpu_vm_mask does not have the
1052  *    current cpu's bit set, that tlb context is flushed locally.
1053  *
1054  *    If the address space is non-shared (ie. mm->count == 1) we avoid
1055  *    cross calls when we want to flush the currently running process's
1056  *    tlb state.  This is done by clearing all cpu bits except the current
1057  *    processor's in current->mm->cpu_vm_mask and performing the
1058  *    flush locally only.  This will force any subsequent cpus which run
1059  *    this task to flush the context from the local tlb if the process
1060  *    migrates to another cpu (again).
1061  *
1062  * 3) For shared address spaces (threads) and swapping we bite the
1063  *    bullet for most cases and perform the cross call (but only to
1064  *    the cpus listed in cpu_vm_mask).
1065  *
1066  *    The performance gain from "optimizing" away the cross call for threads is
1067  *    questionable (in theory the big win for threads is the massive sharing of
1068  *    address space state across processors).
1069  */
1070 
1071 /* This currently is only used by the hugetlb arch pre-fault
1072  * hook on UltraSPARC-III+ and later when changing the pagesize
1073  * bits of the context register for an address space.
1074  */
smp_flush_tlb_mm(struct mm_struct * mm)1075 void smp_flush_tlb_mm(struct mm_struct *mm)
1076 {
1077 	u32 ctx = CTX_HWBITS(mm->context);
1078 	int cpu = get_cpu();
1079 
1080 	if (atomic_read(&mm->mm_users) == 1) {
1081 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1082 		goto local_flush_and_out;
1083 	}
1084 
1085 	smp_cross_call_masked(&xcall_flush_tlb_mm,
1086 			      ctx, 0, 0,
1087 			      mm_cpumask(mm));
1088 
1089 local_flush_and_out:
1090 	__flush_tlb_mm(ctx, SECONDARY_CONTEXT);
1091 
1092 	put_cpu();
1093 }
1094 
1095 struct tlb_pending_info {
1096 	unsigned long ctx;
1097 	unsigned long nr;
1098 	unsigned long *vaddrs;
1099 };
1100 
tlb_pending_func(void * info)1101 static void tlb_pending_func(void *info)
1102 {
1103 	struct tlb_pending_info *t = info;
1104 
1105 	__flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
1106 }
1107 
smp_flush_tlb_pending(struct mm_struct * mm,unsigned long nr,unsigned long * vaddrs)1108 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
1109 {
1110 	u32 ctx = CTX_HWBITS(mm->context);
1111 	struct tlb_pending_info info;
1112 	int cpu = get_cpu();
1113 
1114 	info.ctx = ctx;
1115 	info.nr = nr;
1116 	info.vaddrs = vaddrs;
1117 
1118 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1119 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1120 	else
1121 		smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
1122 				       &info, 1);
1123 
1124 	__flush_tlb_pending(ctx, nr, vaddrs);
1125 
1126 	put_cpu();
1127 }
1128 
smp_flush_tlb_page(struct mm_struct * mm,unsigned long vaddr)1129 void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
1130 {
1131 	unsigned long context = CTX_HWBITS(mm->context);
1132 	int cpu = get_cpu();
1133 
1134 	if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
1135 		cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
1136 	else
1137 		smp_cross_call_masked(&xcall_flush_tlb_page,
1138 				      context, vaddr, 0,
1139 				      mm_cpumask(mm));
1140 	__flush_tlb_page(context, vaddr);
1141 
1142 	put_cpu();
1143 }
1144 
smp_flush_tlb_kernel_range(unsigned long start,unsigned long end)1145 void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
1146 {
1147 	start &= PAGE_MASK;
1148 	end    = PAGE_ALIGN(end);
1149 	if (start != end) {
1150 		smp_cross_call(&xcall_flush_tlb_kernel_range,
1151 			       0, start, end);
1152 
1153 		__flush_tlb_kernel_range(start, end);
1154 	}
1155 }
1156 
1157 /* CPU capture. */
1158 /* #define CAPTURE_DEBUG */
1159 extern unsigned long xcall_capture;
1160 
1161 static atomic_t smp_capture_depth = ATOMIC_INIT(0);
1162 static atomic_t smp_capture_registry = ATOMIC_INIT(0);
1163 static unsigned long penguins_are_doing_time;
1164 
smp_capture(void)1165 void smp_capture(void)
1166 {
1167 	int result = atomic_add_return(1, &smp_capture_depth);
1168 
1169 	if (result == 1) {
1170 		int ncpus = num_online_cpus();
1171 
1172 #ifdef CAPTURE_DEBUG
1173 		printk("CPU[%d]: Sending penguins to jail...",
1174 		       smp_processor_id());
1175 #endif
1176 		penguins_are_doing_time = 1;
1177 		atomic_inc(&smp_capture_registry);
1178 		smp_cross_call(&xcall_capture, 0, 0, 0);
1179 		while (atomic_read(&smp_capture_registry) != ncpus)
1180 			rmb();
1181 #ifdef CAPTURE_DEBUG
1182 		printk("done\n");
1183 #endif
1184 	}
1185 }
1186 
smp_release(void)1187 void smp_release(void)
1188 {
1189 	if (atomic_dec_and_test(&smp_capture_depth)) {
1190 #ifdef CAPTURE_DEBUG
1191 		printk("CPU[%d]: Giving pardon to "
1192 		       "imprisoned penguins\n",
1193 		       smp_processor_id());
1194 #endif
1195 		penguins_are_doing_time = 0;
1196 		membar_safe("#StoreLoad");
1197 		atomic_dec(&smp_capture_registry);
1198 	}
1199 }
1200 
1201 /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
1202  * set, so they can service tlb flush xcalls...
1203  */
1204 extern void prom_world(int);
1205 
smp_penguin_jailcell(int irq,struct pt_regs * regs)1206 void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs)
1207 {
1208 	clear_softint(1 << irq);
1209 
1210 	preempt_disable();
1211 
1212 	__asm__ __volatile__("flushw");
1213 	prom_world(1);
1214 	atomic_inc(&smp_capture_registry);
1215 	membar_safe("#StoreLoad");
1216 	while (penguins_are_doing_time)
1217 		rmb();
1218 	atomic_dec(&smp_capture_registry);
1219 	prom_world(0);
1220 
1221 	preempt_enable();
1222 }
1223 
1224 /* /proc/profile writes can call this, don't __init it please. */
setup_profiling_timer(unsigned int multiplier)1225 int setup_profiling_timer(unsigned int multiplier)
1226 {
1227 	return -EINVAL;
1228 }
1229 
smp_prepare_cpus(unsigned int max_cpus)1230 void __init smp_prepare_cpus(unsigned int max_cpus)
1231 {
1232 }
1233 
smp_prepare_boot_cpu(void)1234 void smp_prepare_boot_cpu(void)
1235 {
1236 }
1237 
smp_setup_processor_id(void)1238 void __init smp_setup_processor_id(void)
1239 {
1240 	if (tlb_type == spitfire)
1241 		xcall_deliver_impl = spitfire_xcall_deliver;
1242 	else if (tlb_type == cheetah || tlb_type == cheetah_plus)
1243 		xcall_deliver_impl = cheetah_xcall_deliver;
1244 	else
1245 		xcall_deliver_impl = hypervisor_xcall_deliver;
1246 }
1247 
smp_fill_in_cpu_possible_map(void)1248 void __init smp_fill_in_cpu_possible_map(void)
1249 {
1250 	int possible_cpus = num_possible_cpus();
1251 	int i;
1252 
1253 	if (possible_cpus > nr_cpu_ids)
1254 		possible_cpus = nr_cpu_ids;
1255 
1256 	for (i = 0; i < possible_cpus; i++)
1257 		set_cpu_possible(i, true);
1258 	for (; i < NR_CPUS; i++)
1259 		set_cpu_possible(i, false);
1260 }
1261 
smp_fill_in_sib_core_maps(void)1262 void smp_fill_in_sib_core_maps(void)
1263 {
1264 	unsigned int i;
1265 
1266 	for_each_present_cpu(i) {
1267 		unsigned int j;
1268 
1269 		cpumask_clear(&cpu_core_map[i]);
1270 		if (cpu_data(i).core_id == 0) {
1271 			cpumask_set_cpu(i, &cpu_core_map[i]);
1272 			continue;
1273 		}
1274 
1275 		for_each_present_cpu(j) {
1276 			if (cpu_data(i).core_id ==
1277 			    cpu_data(j).core_id)
1278 				cpumask_set_cpu(j, &cpu_core_map[i]);
1279 		}
1280 	}
1281 
1282 	for_each_present_cpu(i)  {
1283 		unsigned int j;
1284 
1285 		for_each_present_cpu(j)  {
1286 			if (cpu_data(i).max_cache_id ==
1287 			    cpu_data(j).max_cache_id)
1288 				cpumask_set_cpu(j, &cpu_core_sib_cache_map[i]);
1289 
1290 			if (cpu_data(i).sock_id == cpu_data(j).sock_id)
1291 				cpumask_set_cpu(j, &cpu_core_sib_map[i]);
1292 		}
1293 	}
1294 
1295 	for_each_present_cpu(i) {
1296 		unsigned int j;
1297 
1298 		cpumask_clear(&per_cpu(cpu_sibling_map, i));
1299 		if (cpu_data(i).proc_id == -1) {
1300 			cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i));
1301 			continue;
1302 		}
1303 
1304 		for_each_present_cpu(j) {
1305 			if (cpu_data(i).proc_id ==
1306 			    cpu_data(j).proc_id)
1307 				cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i));
1308 		}
1309 	}
1310 }
1311 
__cpu_up(unsigned int cpu,struct task_struct * tidle)1312 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1313 {
1314 	int ret = smp_boot_one_cpu(cpu, tidle);
1315 
1316 	if (!ret) {
1317 		cpumask_set_cpu(cpu, &smp_commenced_mask);
1318 		while (!cpu_online(cpu))
1319 			mb();
1320 		if (!cpu_online(cpu)) {
1321 			ret = -ENODEV;
1322 		} else {
1323 			/* On SUN4V, writes to %tick and %stick are
1324 			 * not allowed.
1325 			 */
1326 			if (tlb_type != hypervisor)
1327 				smp_synchronize_one_tick(cpu);
1328 		}
1329 	}
1330 	return ret;
1331 }
1332 
1333 #ifdef CONFIG_HOTPLUG_CPU
cpu_play_dead(void)1334 void cpu_play_dead(void)
1335 {
1336 	int cpu = smp_processor_id();
1337 	unsigned long pstate;
1338 
1339 	idle_task_exit();
1340 
1341 	if (tlb_type == hypervisor) {
1342 		struct trap_per_cpu *tb = &trap_block[cpu];
1343 
1344 		sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
1345 				tb->cpu_mondo_pa, 0);
1346 		sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
1347 				tb->dev_mondo_pa, 0);
1348 		sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
1349 				tb->resum_mondo_pa, 0);
1350 		sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
1351 				tb->nonresum_mondo_pa, 0);
1352 	}
1353 
1354 	cpumask_clear_cpu(cpu, &smp_commenced_mask);
1355 	membar_safe("#Sync");
1356 
1357 	local_irq_disable();
1358 
1359 	__asm__ __volatile__(
1360 		"rdpr	%%pstate, %0\n\t"
1361 		"wrpr	%0, %1, %%pstate"
1362 		: "=r" (pstate)
1363 		: "i" (PSTATE_IE));
1364 
1365 	while (1)
1366 		barrier();
1367 }
1368 
__cpu_disable(void)1369 int __cpu_disable(void)
1370 {
1371 	int cpu = smp_processor_id();
1372 	cpuinfo_sparc *c;
1373 	int i;
1374 
1375 	for_each_cpu(i, &cpu_core_map[cpu])
1376 		cpumask_clear_cpu(cpu, &cpu_core_map[i]);
1377 	cpumask_clear(&cpu_core_map[cpu]);
1378 
1379 	for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
1380 		cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
1381 	cpumask_clear(&per_cpu(cpu_sibling_map, cpu));
1382 
1383 	c = &cpu_data(cpu);
1384 
1385 	c->core_id = 0;
1386 	c->proc_id = -1;
1387 
1388 	smp_wmb();
1389 
1390 	/* Make sure no interrupts point to this cpu.  */
1391 	fixup_irqs();
1392 
1393 	local_irq_enable();
1394 	mdelay(1);
1395 	local_irq_disable();
1396 
1397 	set_cpu_online(cpu, false);
1398 
1399 	cpu_map_rebuild();
1400 
1401 	return 0;
1402 }
1403 
__cpu_die(unsigned int cpu)1404 void __cpu_die(unsigned int cpu)
1405 {
1406 	int i;
1407 
1408 	for (i = 0; i < 100; i++) {
1409 		smp_rmb();
1410 		if (!cpumask_test_cpu(cpu, &smp_commenced_mask))
1411 			break;
1412 		msleep(100);
1413 	}
1414 	if (cpumask_test_cpu(cpu, &smp_commenced_mask)) {
1415 		printk(KERN_ERR "CPU %u didn't die...\n", cpu);
1416 	} else {
1417 #if defined(CONFIG_SUN_LDOMS)
1418 		unsigned long hv_err;
1419 		int limit = 100;
1420 
1421 		do {
1422 			hv_err = sun4v_cpu_stop(cpu);
1423 			if (hv_err == HV_EOK) {
1424 				set_cpu_present(cpu, false);
1425 				break;
1426 			}
1427 		} while (--limit > 0);
1428 		if (limit <= 0) {
1429 			printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
1430 			       hv_err);
1431 		}
1432 #endif
1433 	}
1434 }
1435 #endif
1436 
smp_cpus_done(unsigned int max_cpus)1437 void __init smp_cpus_done(unsigned int max_cpus)
1438 {
1439 }
1440 
smp_send_reschedule(int cpu)1441 void smp_send_reschedule(int cpu)
1442 {
1443 	if (cpu == smp_processor_id()) {
1444 		WARN_ON_ONCE(preemptible());
1445 		set_softint(1 << PIL_SMP_RECEIVE_SIGNAL);
1446 	} else {
1447 		xcall_deliver((u64) &xcall_receive_signal,
1448 			      0, 0, cpumask_of(cpu));
1449 	}
1450 }
1451 
smp_receive_signal_client(int irq,struct pt_regs * regs)1452 void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs)
1453 {
1454 	clear_softint(1 << irq);
1455 	scheduler_ipi();
1456 }
1457 
stop_this_cpu(void * dummy)1458 static void stop_this_cpu(void *dummy)
1459 {
1460 	prom_stopself();
1461 }
1462 
smp_send_stop(void)1463 void smp_send_stop(void)
1464 {
1465 	int cpu;
1466 
1467 	if (tlb_type == hypervisor) {
1468 		int this_cpu = smp_processor_id();
1469 #ifdef CONFIG_SERIAL_SUNHV
1470 		sunhv_migrate_hvcons_irq(this_cpu);
1471 #endif
1472 		for_each_online_cpu(cpu) {
1473 			if (cpu == this_cpu)
1474 				continue;
1475 #ifdef CONFIG_SUN_LDOMS
1476 			if (ldom_domaining_enabled) {
1477 				unsigned long hv_err;
1478 				hv_err = sun4v_cpu_stop(cpu);
1479 				if (hv_err)
1480 					printk(KERN_ERR "sun4v_cpu_stop() "
1481 					       "failed err=%lu\n", hv_err);
1482 			} else
1483 #endif
1484 				prom_stopcpu_cpuid(cpu);
1485 		}
1486 	} else
1487 		smp_call_function(stop_this_cpu, NULL, 0);
1488 }
1489 
1490 /**
1491  * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
1492  * @cpu: cpu to allocate for
1493  * @size: size allocation in bytes
1494  * @align: alignment
1495  *
1496  * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
1497  * does the right thing for NUMA regardless of the current
1498  * configuration.
1499  *
1500  * RETURNS:
1501  * Pointer to the allocated area on success, NULL on failure.
1502  */
pcpu_alloc_bootmem(unsigned int cpu,size_t size,size_t align)1503 static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1504 					size_t align)
1505 {
1506 	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
1507 #ifdef CONFIG_NEED_MULTIPLE_NODES
1508 	int node = cpu_to_node(cpu);
1509 	void *ptr;
1510 
1511 	if (!node_online(node) || !NODE_DATA(node)) {
1512 		ptr = __alloc_bootmem(size, align, goal);
1513 		pr_info("cpu %d has no node %d or node-local memory\n",
1514 			cpu, node);
1515 		pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
1516 			 cpu, size, __pa(ptr));
1517 	} else {
1518 		ptr = __alloc_bootmem_node(NODE_DATA(node),
1519 					   size, align, goal);
1520 		pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1521 			 "%016lx\n", cpu, size, node, __pa(ptr));
1522 	}
1523 	return ptr;
1524 #else
1525 	return __alloc_bootmem(size, align, goal);
1526 #endif
1527 }
1528 
pcpu_free_bootmem(void * ptr,size_t size)1529 static void __init pcpu_free_bootmem(void *ptr, size_t size)
1530 {
1531 	free_bootmem(__pa(ptr), size);
1532 }
1533 
pcpu_cpu_distance(unsigned int from,unsigned int to)1534 static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
1535 {
1536 	if (cpu_to_node(from) == cpu_to_node(to))
1537 		return LOCAL_DISTANCE;
1538 	else
1539 		return REMOTE_DISTANCE;
1540 }
1541 
pcpu_populate_pte(unsigned long addr)1542 static void __init pcpu_populate_pte(unsigned long addr)
1543 {
1544 	pgd_t *pgd = pgd_offset_k(addr);
1545 	pud_t *pud;
1546 	pmd_t *pmd;
1547 
1548 	if (pgd_none(*pgd)) {
1549 		pud_t *new;
1550 
1551 		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1552 		pgd_populate(&init_mm, pgd, new);
1553 	}
1554 
1555 	pud = pud_offset(pgd, addr);
1556 	if (pud_none(*pud)) {
1557 		pmd_t *new;
1558 
1559 		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1560 		pud_populate(&init_mm, pud, new);
1561 	}
1562 
1563 	pmd = pmd_offset(pud, addr);
1564 	if (!pmd_present(*pmd)) {
1565 		pte_t *new;
1566 
1567 		new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1568 		pmd_populate_kernel(&init_mm, pmd, new);
1569 	}
1570 }
1571 
setup_per_cpu_areas(void)1572 void __init setup_per_cpu_areas(void)
1573 {
1574 	unsigned long delta;
1575 	unsigned int cpu;
1576 	int rc = -EINVAL;
1577 
1578 	if (pcpu_chosen_fc != PCPU_FC_PAGE) {
1579 		rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1580 					    PERCPU_DYNAMIC_RESERVE, 4 << 20,
1581 					    pcpu_cpu_distance,
1582 					    pcpu_alloc_bootmem,
1583 					    pcpu_free_bootmem);
1584 		if (rc)
1585 			pr_warning("PERCPU: %s allocator failed (%d), "
1586 				   "falling back to page size\n",
1587 				   pcpu_fc_names[pcpu_chosen_fc], rc);
1588 	}
1589 	if (rc < 0)
1590 		rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
1591 					   pcpu_alloc_bootmem,
1592 					   pcpu_free_bootmem,
1593 					   pcpu_populate_pte);
1594 	if (rc < 0)
1595 		panic("cannot initialize percpu area (err=%d)", rc);
1596 
1597 	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1598 	for_each_possible_cpu(cpu)
1599 		__per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
1600 
1601 	/* Setup %g5 for the boot cpu.  */
1602 	__local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
1603 
1604 	of_fill_in_cpu_data();
1605 	if (tlb_type == hypervisor)
1606 		mdesc_fill_in_cpu_data(cpu_all_mask);
1607 }
1608