• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* arch/sparc64/kernel/traps.c
2  *
3  * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5  */
6 
7 /*
8  * I like traps on v9, :))))
9  */
10 
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/linkage.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
20 
21 #include <asm/smp.h>
22 #include <asm/delay.h>
23 #include <asm/system.h>
24 #include <asm/ptrace.h>
25 #include <asm/oplib.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/unistd.h>
29 #include <asm/uaccess.h>
30 #include <asm/fpumacro.h>
31 #include <asm/lsu.h>
32 #include <asm/dcu.h>
33 #include <asm/estate.h>
34 #include <asm/chafsr.h>
35 #include <asm/sfafsr.h>
36 #include <asm/psrcompat.h>
37 #include <asm/processor.h>
38 #include <asm/timer.h>
39 #include <asm/head.h>
40 #include <asm/prom.h>
41 #include <asm/memctrl.h>
42 
43 #include "entry.h"
44 #include "kstack.h"
45 
46 /* When an irrecoverable trap occurs at tl > 0, the trap entry
47  * code logs the trap state registers at every level in the trap
48  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
49  * is as follows:
50  */
51 struct tl1_traplog {
52 	struct {
53 		unsigned long tstate;
54 		unsigned long tpc;
55 		unsigned long tnpc;
56 		unsigned long tt;
57 	} trapstack[4];
58 	unsigned long tl;
59 };
60 
dump_tl1_traplog(struct tl1_traplog * p)61 static void dump_tl1_traplog(struct tl1_traplog *p)
62 {
63 	int i, limit;
64 
65 	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
66 	       "dumping track stack.\n", p->tl);
67 
68 	limit = (tlb_type == hypervisor) ? 2 : 4;
69 	for (i = 0; i < limit; i++) {
70 		printk(KERN_EMERG
71 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
72 		       "TNPC[%016lx] TT[%lx]\n",
73 		       i + 1,
74 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
75 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
76 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
77 	}
78 }
79 
bad_trap(struct pt_regs * regs,long lvl)80 void bad_trap(struct pt_regs *regs, long lvl)
81 {
82 	char buffer[32];
83 	siginfo_t info;
84 
85 	if (notify_die(DIE_TRAP, "bad trap", regs,
86 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
87 		return;
88 
89 	if (lvl < 0x100) {
90 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
91 		die_if_kernel(buffer, regs);
92 	}
93 
94 	lvl -= 0x100;
95 	if (regs->tstate & TSTATE_PRIV) {
96 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
97 		die_if_kernel(buffer, regs);
98 	}
99 	if (test_thread_flag(TIF_32BIT)) {
100 		regs->tpc &= 0xffffffff;
101 		regs->tnpc &= 0xffffffff;
102 	}
103 	info.si_signo = SIGILL;
104 	info.si_errno = 0;
105 	info.si_code = ILL_ILLTRP;
106 	info.si_addr = (void __user *)regs->tpc;
107 	info.si_trapno = lvl;
108 	force_sig_info(SIGILL, &info, current);
109 }
110 
bad_trap_tl1(struct pt_regs * regs,long lvl)111 void bad_trap_tl1(struct pt_regs *regs, long lvl)
112 {
113 	char buffer[32];
114 
115 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
116 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
117 		return;
118 
119 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
120 
121 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
122 	die_if_kernel (buffer, regs);
123 }
124 
125 #ifdef CONFIG_DEBUG_BUGVERBOSE
do_BUG(const char * file,int line)126 void do_BUG(const char *file, int line)
127 {
128 	bust_spinlocks(1);
129 	printk("kernel BUG at %s:%d!\n", file, line);
130 }
131 EXPORT_SYMBOL(do_BUG);
132 #endif
133 
134 static DEFINE_SPINLOCK(dimm_handler_lock);
135 static dimm_printer_t dimm_handler;
136 
sprintf_dimm(int synd_code,unsigned long paddr,char * buf,int buflen)137 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
138 {
139 	unsigned long flags;
140 	int ret = -ENODEV;
141 
142 	spin_lock_irqsave(&dimm_handler_lock, flags);
143 	if (dimm_handler) {
144 		ret = dimm_handler(synd_code, paddr, buf, buflen);
145 	} else if (tlb_type == spitfire) {
146 		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
147 			ret = -EINVAL;
148 		else
149 			ret = 0;
150 	} else
151 		ret = -ENODEV;
152 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
153 
154 	return ret;
155 }
156 
register_dimm_printer(dimm_printer_t func)157 int register_dimm_printer(dimm_printer_t func)
158 {
159 	unsigned long flags;
160 	int ret = 0;
161 
162 	spin_lock_irqsave(&dimm_handler_lock, flags);
163 	if (!dimm_handler)
164 		dimm_handler = func;
165 	else
166 		ret = -EEXIST;
167 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
168 
169 	return ret;
170 }
171 EXPORT_SYMBOL_GPL(register_dimm_printer);
172 
unregister_dimm_printer(dimm_printer_t func)173 void unregister_dimm_printer(dimm_printer_t func)
174 {
175 	unsigned long flags;
176 
177 	spin_lock_irqsave(&dimm_handler_lock, flags);
178 	if (dimm_handler == func)
179 		dimm_handler = NULL;
180 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
181 }
182 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
183 
spitfire_insn_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)184 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
185 {
186 	siginfo_t info;
187 
188 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
189 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
190 		return;
191 
192 	if (regs->tstate & TSTATE_PRIV) {
193 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
194 		       "SFAR[%016lx], going.\n", sfsr, sfar);
195 		die_if_kernel("Iax", regs);
196 	}
197 	if (test_thread_flag(TIF_32BIT)) {
198 		regs->tpc &= 0xffffffff;
199 		regs->tnpc &= 0xffffffff;
200 	}
201 	info.si_signo = SIGSEGV;
202 	info.si_errno = 0;
203 	info.si_code = SEGV_MAPERR;
204 	info.si_addr = (void __user *)regs->tpc;
205 	info.si_trapno = 0;
206 	force_sig_info(SIGSEGV, &info, current);
207 }
208 
spitfire_insn_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)209 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
210 {
211 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
212 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
213 		return;
214 
215 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
216 	spitfire_insn_access_exception(regs, sfsr, sfar);
217 }
218 
sun4v_insn_access_exception(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)219 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
220 {
221 	unsigned short type = (type_ctx >> 16);
222 	unsigned short ctx  = (type_ctx & 0xffff);
223 	siginfo_t info;
224 
225 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
226 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
227 		return;
228 
229 	if (regs->tstate & TSTATE_PRIV) {
230 		printk("sun4v_insn_access_exception: ADDR[%016lx] "
231 		       "CTX[%04x] TYPE[%04x], going.\n",
232 		       addr, ctx, type);
233 		die_if_kernel("Iax", regs);
234 	}
235 
236 	if (test_thread_flag(TIF_32BIT)) {
237 		regs->tpc &= 0xffffffff;
238 		regs->tnpc &= 0xffffffff;
239 	}
240 	info.si_signo = SIGSEGV;
241 	info.si_errno = 0;
242 	info.si_code = SEGV_MAPERR;
243 	info.si_addr = (void __user *) addr;
244 	info.si_trapno = 0;
245 	force_sig_info(SIGSEGV, &info, current);
246 }
247 
sun4v_insn_access_exception_tl1(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)248 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
249 {
250 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
251 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
252 		return;
253 
254 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
255 	sun4v_insn_access_exception(regs, addr, type_ctx);
256 }
257 
spitfire_data_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)258 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
259 {
260 	siginfo_t info;
261 
262 	if (notify_die(DIE_TRAP, "data access exception", regs,
263 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
264 		return;
265 
266 	if (regs->tstate & TSTATE_PRIV) {
267 		/* Test if this comes from uaccess places. */
268 		const struct exception_table_entry *entry;
269 
270 		entry = search_exception_tables(regs->tpc);
271 		if (entry) {
272 			/* Ouch, somebody is trying VM hole tricks on us... */
273 #ifdef DEBUG_EXCEPTIONS
274 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
275 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
276 			       regs->tpc, entry->fixup);
277 #endif
278 			regs->tpc = entry->fixup;
279 			regs->tnpc = regs->tpc + 4;
280 			return;
281 		}
282 		/* Shit... */
283 		printk("spitfire_data_access_exception: SFSR[%016lx] "
284 		       "SFAR[%016lx], going.\n", sfsr, sfar);
285 		die_if_kernel("Dax", regs);
286 	}
287 
288 	info.si_signo = SIGSEGV;
289 	info.si_errno = 0;
290 	info.si_code = SEGV_MAPERR;
291 	info.si_addr = (void __user *)sfar;
292 	info.si_trapno = 0;
293 	force_sig_info(SIGSEGV, &info, current);
294 }
295 
spitfire_data_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)296 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
297 {
298 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
299 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
300 		return;
301 
302 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
303 	spitfire_data_access_exception(regs, sfsr, sfar);
304 }
305 
sun4v_data_access_exception(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)306 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
307 {
308 	unsigned short type = (type_ctx >> 16);
309 	unsigned short ctx  = (type_ctx & 0xffff);
310 	siginfo_t info;
311 
312 	if (notify_die(DIE_TRAP, "data access exception", regs,
313 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
314 		return;
315 
316 	if (regs->tstate & TSTATE_PRIV) {
317 		/* Test if this comes from uaccess places. */
318 		const struct exception_table_entry *entry;
319 
320 		entry = search_exception_tables(regs->tpc);
321 		if (entry) {
322 			/* Ouch, somebody is trying VM hole tricks on us... */
323 #ifdef DEBUG_EXCEPTIONS
324 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
325 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
326 			       regs->tpc, entry->fixup);
327 #endif
328 			regs->tpc = entry->fixup;
329 			regs->tnpc = regs->tpc + 4;
330 			return;
331 		}
332 		printk("sun4v_data_access_exception: ADDR[%016lx] "
333 		       "CTX[%04x] TYPE[%04x], going.\n",
334 		       addr, ctx, type);
335 		die_if_kernel("Dax", regs);
336 	}
337 
338 	if (test_thread_flag(TIF_32BIT)) {
339 		regs->tpc &= 0xffffffff;
340 		regs->tnpc &= 0xffffffff;
341 	}
342 	info.si_signo = SIGSEGV;
343 	info.si_errno = 0;
344 	info.si_code = SEGV_MAPERR;
345 	info.si_addr = (void __user *) addr;
346 	info.si_trapno = 0;
347 	force_sig_info(SIGSEGV, &info, current);
348 }
349 
sun4v_data_access_exception_tl1(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)350 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
351 {
352 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
353 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
354 		return;
355 
356 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
357 	sun4v_data_access_exception(regs, addr, type_ctx);
358 }
359 
360 #ifdef CONFIG_PCI
361 #include "pci_impl.h"
362 #endif
363 
364 /* When access exceptions happen, we must do this. */
spitfire_clean_and_reenable_l1_caches(void)365 static void spitfire_clean_and_reenable_l1_caches(void)
366 {
367 	unsigned long va;
368 
369 	if (tlb_type != spitfire)
370 		BUG();
371 
372 	/* Clean 'em. */
373 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
374 		spitfire_put_icache_tag(va, 0x0);
375 		spitfire_put_dcache_tag(va, 0x0);
376 	}
377 
378 	/* Re-enable in LSU. */
379 	__asm__ __volatile__("flush %%g6\n\t"
380 			     "membar #Sync\n\t"
381 			     "stxa %0, [%%g0] %1\n\t"
382 			     "membar #Sync"
383 			     : /* no outputs */
384 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
385 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
386 			     "i" (ASI_LSU_CONTROL)
387 			     : "memory");
388 }
389 
spitfire_enable_estate_errors(void)390 static void spitfire_enable_estate_errors(void)
391 {
392 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
393 			     "membar	#Sync"
394 			     : /* no outputs */
395 			     : "r" (ESTATE_ERR_ALL),
396 			       "i" (ASI_ESTATE_ERROR_EN));
397 }
398 
399 static char ecc_syndrome_table[] = {
400 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
401 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
402 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
403 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
404 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
405 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
406 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
407 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
408 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
409 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
410 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
411 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
412 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
413 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
414 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
415 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
416 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
417 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
418 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
419 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
420 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
421 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
422 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
423 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
424 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
425 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
426 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
427 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
428 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
429 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
430 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
431 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
432 };
433 
434 static char *syndrome_unknown = "<Unknown>";
435 
spitfire_log_udb_syndrome(unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long bit)436 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
437 {
438 	unsigned short scode;
439 	char memmod_str[64], *p;
440 
441 	if (udbl & bit) {
442 		scode = ecc_syndrome_table[udbl & 0xff];
443 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
444 			p = syndrome_unknown;
445 		else
446 			p = memmod_str;
447 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
448 		       "Memory Module \"%s\"\n",
449 		       smp_processor_id(), scode, p);
450 	}
451 
452 	if (udbh & bit) {
453 		scode = ecc_syndrome_table[udbh & 0xff];
454 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
455 			p = syndrome_unknown;
456 		else
457 			p = memmod_str;
458 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
459 		       "Memory Module \"%s\"\n",
460 		       smp_processor_id(), scode, p);
461 	}
462 
463 }
464 
spitfire_cee_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,int tl1,struct pt_regs * regs)465 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
466 {
467 
468 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
469 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
470 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
471 
472 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
473 
474 	/* We always log it, even if someone is listening for this
475 	 * trap.
476 	 */
477 	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
478 		   0, TRAP_TYPE_CEE, SIGTRAP);
479 
480 	/* The Correctable ECC Error trap does not disable I/D caches.  So
481 	 * we only have to restore the ESTATE Error Enable register.
482 	 */
483 	spitfire_enable_estate_errors();
484 }
485 
spitfire_ue_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long tt,int tl1,struct pt_regs * regs)486 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
487 {
488 	siginfo_t info;
489 
490 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
491 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
492 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
493 
494 	/* XXX add more human friendly logging of the error status
495 	 * XXX as is implemented for cheetah
496 	 */
497 
498 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
499 
500 	/* We always log it, even if someone is listening for this
501 	 * trap.
502 	 */
503 	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
504 		   0, tt, SIGTRAP);
505 
506 	if (regs->tstate & TSTATE_PRIV) {
507 		if (tl1)
508 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
509 		die_if_kernel("UE", regs);
510 	}
511 
512 	/* XXX need more intelligent processing here, such as is implemented
513 	 * XXX for cheetah errors, in fact if the E-cache still holds the
514 	 * XXX line with bad parity this will loop
515 	 */
516 
517 	spitfire_clean_and_reenable_l1_caches();
518 	spitfire_enable_estate_errors();
519 
520 	if (test_thread_flag(TIF_32BIT)) {
521 		regs->tpc &= 0xffffffff;
522 		regs->tnpc &= 0xffffffff;
523 	}
524 	info.si_signo = SIGBUS;
525 	info.si_errno = 0;
526 	info.si_code = BUS_OBJERR;
527 	info.si_addr = (void *)0;
528 	info.si_trapno = 0;
529 	force_sig_info(SIGBUS, &info, current);
530 }
531 
spitfire_access_error(struct pt_regs * regs,unsigned long status_encoded,unsigned long afar)532 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
533 {
534 	unsigned long afsr, tt, udbh, udbl;
535 	int tl1;
536 
537 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
538 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
539 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
540 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
541 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
542 
543 #ifdef CONFIG_PCI
544 	if (tt == TRAP_TYPE_DAE &&
545 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
546 		spitfire_clean_and_reenable_l1_caches();
547 		spitfire_enable_estate_errors();
548 
549 		pci_poke_faulted = 1;
550 		regs->tnpc = regs->tpc + 4;
551 		return;
552 	}
553 #endif
554 
555 	if (afsr & SFAFSR_UE)
556 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
557 
558 	if (tt == TRAP_TYPE_CEE) {
559 		/* Handle the case where we took a CEE trap, but ACK'd
560 		 * only the UE state in the UDB error registers.
561 		 */
562 		if (afsr & SFAFSR_UE) {
563 			if (udbh & UDBE_CE) {
564 				__asm__ __volatile__(
565 					"stxa	%0, [%1] %2\n\t"
566 					"membar	#Sync"
567 					: /* no outputs */
568 					: "r" (udbh & UDBE_CE),
569 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
570 			}
571 			if (udbl & UDBE_CE) {
572 				__asm__ __volatile__(
573 					"stxa	%0, [%1] %2\n\t"
574 					"membar	#Sync"
575 					: /* no outputs */
576 					: "r" (udbl & UDBE_CE),
577 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
578 			}
579 		}
580 
581 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
582 	}
583 }
584 
585 int cheetah_pcache_forced_on;
586 
cheetah_enable_pcache(void)587 void cheetah_enable_pcache(void)
588 {
589 	unsigned long dcr;
590 
591 	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
592 	       smp_processor_id());
593 
594 	__asm__ __volatile__("ldxa [%%g0] %1, %0"
595 			     : "=r" (dcr)
596 			     : "i" (ASI_DCU_CONTROL_REG));
597 	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
598 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
599 			     "membar #Sync"
600 			     : /* no outputs */
601 			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
602 }
603 
604 /* Cheetah error trap handling. */
605 static unsigned long ecache_flush_physbase;
606 static unsigned long ecache_flush_linesize;
607 static unsigned long ecache_flush_size;
608 
609 /* This table is ordered in priority of errors and matches the
610  * AFAR overwrite policy as well.
611  */
612 
613 struct afsr_error_table {
614 	unsigned long mask;
615 	const char *name;
616 };
617 
618 static const char CHAFSR_PERR_msg[] =
619 	"System interface protocol error";
620 static const char CHAFSR_IERR_msg[] =
621 	"Internal processor error";
622 static const char CHAFSR_ISAP_msg[] =
623 	"System request parity error on incoming addresss";
624 static const char CHAFSR_UCU_msg[] =
625 	"Uncorrectable E-cache ECC error for ifetch/data";
626 static const char CHAFSR_UCC_msg[] =
627 	"SW Correctable E-cache ECC error for ifetch/data";
628 static const char CHAFSR_UE_msg[] =
629 	"Uncorrectable system bus data ECC error for read";
630 static const char CHAFSR_EDU_msg[] =
631 	"Uncorrectable E-cache ECC error for stmerge/blkld";
632 static const char CHAFSR_EMU_msg[] =
633 	"Uncorrectable system bus MTAG error";
634 static const char CHAFSR_WDU_msg[] =
635 	"Uncorrectable E-cache ECC error for writeback";
636 static const char CHAFSR_CPU_msg[] =
637 	"Uncorrectable ECC error for copyout";
638 static const char CHAFSR_CE_msg[] =
639 	"HW corrected system bus data ECC error for read";
640 static const char CHAFSR_EDC_msg[] =
641 	"HW corrected E-cache ECC error for stmerge/blkld";
642 static const char CHAFSR_EMC_msg[] =
643 	"HW corrected system bus MTAG ECC error";
644 static const char CHAFSR_WDC_msg[] =
645 	"HW corrected E-cache ECC error for writeback";
646 static const char CHAFSR_CPC_msg[] =
647 	"HW corrected ECC error for copyout";
648 static const char CHAFSR_TO_msg[] =
649 	"Unmapped error from system bus";
650 static const char CHAFSR_BERR_msg[] =
651 	"Bus error response from system bus";
652 static const char CHAFSR_IVC_msg[] =
653 	"HW corrected system bus data ECC error for ivec read";
654 static const char CHAFSR_IVU_msg[] =
655 	"Uncorrectable system bus data ECC error for ivec read";
656 static struct afsr_error_table __cheetah_error_table[] = {
657 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
658 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
659 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
660 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
661 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
662 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
663 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
664 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
665 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
666 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
667 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
668 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
669 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
670 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
671 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
672 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
673 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
674 	/* These two do not update the AFAR. */
675 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
676 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
677 	{	0,		NULL			},
678 };
679 static const char CHPAFSR_DTO_msg[] =
680 	"System bus unmapped error for prefetch/storequeue-read";
681 static const char CHPAFSR_DBERR_msg[] =
682 	"System bus error for prefetch/storequeue-read";
683 static const char CHPAFSR_THCE_msg[] =
684 	"Hardware corrected E-cache Tag ECC error";
685 static const char CHPAFSR_TSCE_msg[] =
686 	"SW handled correctable E-cache Tag ECC error";
687 static const char CHPAFSR_TUE_msg[] =
688 	"Uncorrectable E-cache Tag ECC error";
689 static const char CHPAFSR_DUE_msg[] =
690 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
691 static struct afsr_error_table __cheetah_plus_error_table[] = {
692 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
693 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
694 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
695 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
696 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
697 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
698 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
699 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
700 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
701 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
702 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
703 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
704 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
705 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
706 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
707 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
708 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
709 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
710 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
711 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
712 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
713 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
714 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
715 	/* These two do not update the AFAR. */
716 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
717 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
718 	{	0,		NULL			},
719 };
720 static const char JPAFSR_JETO_msg[] =
721 	"System interface protocol error, hw timeout caused";
722 static const char JPAFSR_SCE_msg[] =
723 	"Parity error on system snoop results";
724 static const char JPAFSR_JEIC_msg[] =
725 	"System interface protocol error, illegal command detected";
726 static const char JPAFSR_JEIT_msg[] =
727 	"System interface protocol error, illegal ADTYPE detected";
728 static const char JPAFSR_OM_msg[] =
729 	"Out of range memory error has occurred";
730 static const char JPAFSR_ETP_msg[] =
731 	"Parity error on L2 cache tag SRAM";
732 static const char JPAFSR_UMS_msg[] =
733 	"Error due to unsupported store";
734 static const char JPAFSR_RUE_msg[] =
735 	"Uncorrectable ECC error from remote cache/memory";
736 static const char JPAFSR_RCE_msg[] =
737 	"Correctable ECC error from remote cache/memory";
738 static const char JPAFSR_BP_msg[] =
739 	"JBUS parity error on returned read data";
740 static const char JPAFSR_WBP_msg[] =
741 	"JBUS parity error on data for writeback or block store";
742 static const char JPAFSR_FRC_msg[] =
743 	"Foreign read to DRAM incurring correctable ECC error";
744 static const char JPAFSR_FRU_msg[] =
745 	"Foreign read to DRAM incurring uncorrectable ECC error";
746 static struct afsr_error_table __jalapeno_error_table[] = {
747 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
748 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
749 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
750 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
751 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
752 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
753 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
754 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
755 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
756 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
757 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
758 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
759 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
760 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
761 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
762 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
763 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
764 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
765 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
766 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
767 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
768 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
769 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
770 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
771 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
772 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
773 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
774 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
775 	/* These two do not update the AFAR. */
776 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
777 	{	0,		NULL			},
778 };
779 static struct afsr_error_table *cheetah_error_table;
780 static unsigned long cheetah_afsr_errors;
781 
782 struct cheetah_err_info *cheetah_error_log;
783 
cheetah_get_error_log(unsigned long afsr)784 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
785 {
786 	struct cheetah_err_info *p;
787 	int cpu = smp_processor_id();
788 
789 	if (!cheetah_error_log)
790 		return NULL;
791 
792 	p = cheetah_error_log + (cpu * 2);
793 	if ((afsr & CHAFSR_TL1) != 0UL)
794 		p++;
795 
796 	return p;
797 }
798 
799 extern unsigned int tl0_icpe[], tl1_icpe[];
800 extern unsigned int tl0_dcpe[], tl1_dcpe[];
801 extern unsigned int tl0_fecc[], tl1_fecc[];
802 extern unsigned int tl0_cee[], tl1_cee[];
803 extern unsigned int tl0_iae[], tl1_iae[];
804 extern unsigned int tl0_dae[], tl1_dae[];
805 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
806 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
807 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
808 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
809 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
810 
cheetah_ecache_flush_init(void)811 void __init cheetah_ecache_flush_init(void)
812 {
813 	unsigned long largest_size, smallest_linesize, order, ver;
814 	int i, sz;
815 
816 	/* Scan all cpu device tree nodes, note two values:
817 	 * 1) largest E-cache size
818 	 * 2) smallest E-cache line size
819 	 */
820 	largest_size = 0UL;
821 	smallest_linesize = ~0UL;
822 
823 	for (i = 0; i < NR_CPUS; i++) {
824 		unsigned long val;
825 
826 		val = cpu_data(i).ecache_size;
827 		if (!val)
828 			continue;
829 
830 		if (val > largest_size)
831 			largest_size = val;
832 
833 		val = cpu_data(i).ecache_line_size;
834 		if (val < smallest_linesize)
835 			smallest_linesize = val;
836 
837 	}
838 
839 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
840 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
841 			    "parameters.\n");
842 		prom_halt();
843 	}
844 
845 	ecache_flush_size = (2 * largest_size);
846 	ecache_flush_linesize = smallest_linesize;
847 
848 	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
849 
850 	if (ecache_flush_physbase == ~0UL) {
851 		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
852 			    "contiguous physical memory.\n",
853 			    ecache_flush_size);
854 		prom_halt();
855 	}
856 
857 	/* Now allocate error trap reporting scoreboard. */
858 	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
859 	for (order = 0; order < MAX_ORDER; order++) {
860 		if ((PAGE_SIZE << order) >= sz)
861 			break;
862 	}
863 	cheetah_error_log = (struct cheetah_err_info *)
864 		__get_free_pages(GFP_KERNEL, order);
865 	if (!cheetah_error_log) {
866 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
867 			    "error logging scoreboard (%d bytes).\n", sz);
868 		prom_halt();
869 	}
870 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
871 
872 	/* Mark all AFSRs as invalid so that the trap handler will
873 	 * log new new information there.
874 	 */
875 	for (i = 0; i < 2 * NR_CPUS; i++)
876 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
877 
878 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
879 	if ((ver >> 32) == __JALAPENO_ID ||
880 	    (ver >> 32) == __SERRANO_ID) {
881 		cheetah_error_table = &__jalapeno_error_table[0];
882 		cheetah_afsr_errors = JPAFSR_ERRORS;
883 	} else if ((ver >> 32) == 0x003e0015) {
884 		cheetah_error_table = &__cheetah_plus_error_table[0];
885 		cheetah_afsr_errors = CHPAFSR_ERRORS;
886 	} else {
887 		cheetah_error_table = &__cheetah_error_table[0];
888 		cheetah_afsr_errors = CHAFSR_ERRORS;
889 	}
890 
891 	/* Now patch trap tables. */
892 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
893 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
894 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
895 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
896 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
897 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
898 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
899 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
900 	if (tlb_type == cheetah_plus) {
901 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
902 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
903 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
904 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
905 	}
906 	flushi(PAGE_OFFSET);
907 }
908 
cheetah_flush_ecache(void)909 static void cheetah_flush_ecache(void)
910 {
911 	unsigned long flush_base = ecache_flush_physbase;
912 	unsigned long flush_linesize = ecache_flush_linesize;
913 	unsigned long flush_size = ecache_flush_size;
914 
915 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
916 			     "   bne,pt	%%xcc, 1b\n\t"
917 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
918 			     : "=&r" (flush_size)
919 			     : "0" (flush_size), "r" (flush_base),
920 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
921 }
922 
cheetah_flush_ecache_line(unsigned long physaddr)923 static void cheetah_flush_ecache_line(unsigned long physaddr)
924 {
925 	unsigned long alias;
926 
927 	physaddr &= ~(8UL - 1UL);
928 	physaddr = (ecache_flush_physbase +
929 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
930 	alias = physaddr + (ecache_flush_size >> 1UL);
931 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
932 			     "ldxa [%1] %2, %%g0\n\t"
933 			     "membar #Sync"
934 			     : /* no outputs */
935 			     : "r" (physaddr), "r" (alias),
936 			       "i" (ASI_PHYS_USE_EC));
937 }
938 
939 /* Unfortunately, the diagnostic access to the I-cache tags we need to
940  * use to clear the thing interferes with I-cache coherency transactions.
941  *
942  * So we must only flush the I-cache when it is disabled.
943  */
__cheetah_flush_icache(void)944 static void __cheetah_flush_icache(void)
945 {
946 	unsigned int icache_size, icache_line_size;
947 	unsigned long addr;
948 
949 	icache_size = local_cpu_data().icache_size;
950 	icache_line_size = local_cpu_data().icache_line_size;
951 
952 	/* Clear the valid bits in all the tags. */
953 	for (addr = 0; addr < icache_size; addr += icache_line_size) {
954 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
955 				     "membar #Sync"
956 				     : /* no outputs */
957 				     : "r" (addr | (2 << 3)),
958 				       "i" (ASI_IC_TAG));
959 	}
960 }
961 
cheetah_flush_icache(void)962 static void cheetah_flush_icache(void)
963 {
964 	unsigned long dcu_save;
965 
966 	/* Save current DCU, disable I-cache. */
967 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
968 			     "or %0, %2, %%g1\n\t"
969 			     "stxa %%g1, [%%g0] %1\n\t"
970 			     "membar #Sync"
971 			     : "=r" (dcu_save)
972 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
973 			     : "g1");
974 
975 	__cheetah_flush_icache();
976 
977 	/* Restore DCU register */
978 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
979 			     "membar #Sync"
980 			     : /* no outputs */
981 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
982 }
983 
cheetah_flush_dcache(void)984 static void cheetah_flush_dcache(void)
985 {
986 	unsigned int dcache_size, dcache_line_size;
987 	unsigned long addr;
988 
989 	dcache_size = local_cpu_data().dcache_size;
990 	dcache_line_size = local_cpu_data().dcache_line_size;
991 
992 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
993 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
994 				     "membar #Sync"
995 				     : /* no outputs */
996 				     : "r" (addr), "i" (ASI_DCACHE_TAG));
997 	}
998 }
999 
1000 /* In order to make the even parity correct we must do two things.
1001  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1002  * Next, we clear out all 32-bytes of data for that line.  Data of
1003  * all-zero + tag parity value of zero == correct parity.
1004  */
cheetah_plus_zap_dcache_parity(void)1005 static void cheetah_plus_zap_dcache_parity(void)
1006 {
1007 	unsigned int dcache_size, dcache_line_size;
1008 	unsigned long addr;
1009 
1010 	dcache_size = local_cpu_data().dcache_size;
1011 	dcache_line_size = local_cpu_data().dcache_line_size;
1012 
1013 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1014 		unsigned long tag = (addr >> 14);
1015 		unsigned long line;
1016 
1017 		__asm__ __volatile__("membar	#Sync\n\t"
1018 				     "stxa	%0, [%1] %2\n\t"
1019 				     "membar	#Sync"
1020 				     : /* no outputs */
1021 				     : "r" (tag), "r" (addr),
1022 				       "i" (ASI_DCACHE_UTAG));
1023 		for (line = addr; line < addr + dcache_line_size; line += 8)
1024 			__asm__ __volatile__("membar	#Sync\n\t"
1025 					     "stxa	%%g0, [%0] %1\n\t"
1026 					     "membar	#Sync"
1027 					     : /* no outputs */
1028 					     : "r" (line),
1029 					       "i" (ASI_DCACHE_DATA));
1030 	}
1031 }
1032 
1033 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1034  * something palatable to the memory controller driver get_unumber
1035  * routine.
1036  */
1037 #define MT0	137
1038 #define MT1	138
1039 #define MT2	139
1040 #define NONE	254
1041 #define MTC0	140
1042 #define MTC1	141
1043 #define MTC2	142
1044 #define MTC3	143
1045 #define C0	128
1046 #define C1	129
1047 #define C2	130
1048 #define C3	131
1049 #define C4	132
1050 #define C5	133
1051 #define C6	134
1052 #define C7	135
1053 #define C8	136
1054 #define M2	144
1055 #define M3	145
1056 #define M4	146
1057 #define M	147
1058 static unsigned char cheetah_ecc_syntab[] = {
1059 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1060 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1061 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1062 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1063 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1064 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1065 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1066 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1067 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1068 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1069 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1070 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1071 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1072 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1073 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1074 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1075 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1076 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1077 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1078 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1079 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1080 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1081 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1082 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1083 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1084 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1085 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1086 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1087 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1088 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1089 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1090 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1091 };
1092 static unsigned char cheetah_mtag_syntab[] = {
1093        NONE, MTC0,
1094        MTC1, NONE,
1095        MTC2, NONE,
1096        NONE, MT0,
1097        MTC3, NONE,
1098        NONE, MT1,
1099        NONE, MT2,
1100        NONE, NONE
1101 };
1102 
1103 /* Return the highest priority error conditon mentioned. */
cheetah_get_hipri(unsigned long afsr)1104 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1105 {
1106 	unsigned long tmp = 0;
1107 	int i;
1108 
1109 	for (i = 0; cheetah_error_table[i].mask; i++) {
1110 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1111 			return tmp;
1112 	}
1113 	return tmp;
1114 }
1115 
cheetah_get_string(unsigned long bit)1116 static const char *cheetah_get_string(unsigned long bit)
1117 {
1118 	int i;
1119 
1120 	for (i = 0; cheetah_error_table[i].mask; i++) {
1121 		if ((bit & cheetah_error_table[i].mask) != 0UL)
1122 			return cheetah_error_table[i].name;
1123 	}
1124 	return "???";
1125 }
1126 
cheetah_log_errors(struct pt_regs * regs,struct cheetah_err_info * info,unsigned long afsr,unsigned long afar,int recoverable)1127 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1128 			       unsigned long afsr, unsigned long afar, int recoverable)
1129 {
1130 	unsigned long hipri;
1131 	char unum[256];
1132 
1133 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1134 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1135 	       afsr, afar,
1136 	       (afsr & CHAFSR_TL1) ? 1 : 0);
1137 	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1138 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1139 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1140 	printk("%s" "ERROR(%d): ",
1141 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1142 	printk("TPC<%pS>\n", (void *) regs->tpc);
1143 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1144 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1145 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1146 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1147 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1148 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1149 	hipri = cheetah_get_hipri(afsr);
1150 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1151 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1152 	       hipri, cheetah_get_string(hipri));
1153 
1154 	/* Try to get unumber if relevant. */
1155 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1156 			 CHAFSR_CPC | CHAFSR_CPU | \
1157 			 CHAFSR_UE  | CHAFSR_CE  | \
1158 			 CHAFSR_EDC | CHAFSR_EDU  | \
1159 			 CHAFSR_UCC | CHAFSR_UCU  | \
1160 			 CHAFSR_WDU | CHAFSR_WDC)
1161 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1162 	if (afsr & ESYND_ERRORS) {
1163 		int syndrome;
1164 		int ret;
1165 
1166 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1167 		syndrome = cheetah_ecc_syntab[syndrome];
1168 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1169 		if (ret != -1)
1170 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1171 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1172 			       smp_processor_id(), unum);
1173 	} else if (afsr & MSYND_ERRORS) {
1174 		int syndrome;
1175 		int ret;
1176 
1177 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1178 		syndrome = cheetah_mtag_syntab[syndrome];
1179 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1180 		if (ret != -1)
1181 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1182 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1183 			       smp_processor_id(), unum);
1184 	}
1185 
1186 	/* Now dump the cache snapshots. */
1187 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1188 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1189 	       (int) info->dcache_index,
1190 	       info->dcache_tag,
1191 	       info->dcache_utag,
1192 	       info->dcache_stag);
1193 	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1194 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1195 	       info->dcache_data[0],
1196 	       info->dcache_data[1],
1197 	       info->dcache_data[2],
1198 	       info->dcache_data[3]);
1199 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1200 	       "u[%016llx] l[%016llx]\n",
1201 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1202 	       (int) info->icache_index,
1203 	       info->icache_tag,
1204 	       info->icache_utag,
1205 	       info->icache_stag,
1206 	       info->icache_upper,
1207 	       info->icache_lower);
1208 	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1209 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1210 	       info->icache_data[0],
1211 	       info->icache_data[1],
1212 	       info->icache_data[2],
1213 	       info->icache_data[3]);
1214 	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1215 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1216 	       info->icache_data[4],
1217 	       info->icache_data[5],
1218 	       info->icache_data[6],
1219 	       info->icache_data[7]);
1220 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1221 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1222 	       (int) info->ecache_index, info->ecache_tag);
1223 	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1224 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1225 	       info->ecache_data[0],
1226 	       info->ecache_data[1],
1227 	       info->ecache_data[2],
1228 	       info->ecache_data[3]);
1229 
1230 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1231 	while (afsr != 0UL) {
1232 		unsigned long bit = cheetah_get_hipri(afsr);
1233 
1234 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1235 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1236 		       bit, cheetah_get_string(bit));
1237 
1238 		afsr &= ~bit;
1239 	}
1240 
1241 	if (!recoverable)
1242 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1243 }
1244 
cheetah_recheck_errors(struct cheetah_err_info * logp)1245 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1246 {
1247 	unsigned long afsr, afar;
1248 	int ret = 0;
1249 
1250 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1251 			     : "=r" (afsr)
1252 			     : "i" (ASI_AFSR));
1253 	if ((afsr & cheetah_afsr_errors) != 0) {
1254 		if (logp != NULL) {
1255 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1256 					     : "=r" (afar)
1257 					     : "i" (ASI_AFAR));
1258 			logp->afsr = afsr;
1259 			logp->afar = afar;
1260 		}
1261 		ret = 1;
1262 	}
1263 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1264 			     "membar #Sync\n\t"
1265 			     : : "r" (afsr), "i" (ASI_AFSR));
1266 
1267 	return ret;
1268 }
1269 
cheetah_fecc_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1270 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1271 {
1272 	struct cheetah_err_info local_snapshot, *p;
1273 	int recoverable;
1274 
1275 	/* Flush E-cache */
1276 	cheetah_flush_ecache();
1277 
1278 	p = cheetah_get_error_log(afsr);
1279 	if (!p) {
1280 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1281 			    afsr, afar);
1282 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1283 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1284 		prom_halt();
1285 	}
1286 
1287 	/* Grab snapshot of logged error. */
1288 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1289 
1290 	/* If the current trap snapshot does not match what the
1291 	 * trap handler passed along into our args, big trouble.
1292 	 * In such a case, mark the local copy as invalid.
1293 	 *
1294 	 * Else, it matches and we mark the afsr in the non-local
1295 	 * copy as invalid so we may log new error traps there.
1296 	 */
1297 	if (p->afsr != afsr || p->afar != afar)
1298 		local_snapshot.afsr = CHAFSR_INVALID;
1299 	else
1300 		p->afsr = CHAFSR_INVALID;
1301 
1302 	cheetah_flush_icache();
1303 	cheetah_flush_dcache();
1304 
1305 	/* Re-enable I-cache/D-cache */
1306 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1307 			     "or %%g1, %1, %%g1\n\t"
1308 			     "stxa %%g1, [%%g0] %0\n\t"
1309 			     "membar #Sync"
1310 			     : /* no outputs */
1311 			     : "i" (ASI_DCU_CONTROL_REG),
1312 			       "i" (DCU_DC | DCU_IC)
1313 			     : "g1");
1314 
1315 	/* Re-enable error reporting */
1316 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1317 			     "or %%g1, %1, %%g1\n\t"
1318 			     "stxa %%g1, [%%g0] %0\n\t"
1319 			     "membar #Sync"
1320 			     : /* no outputs */
1321 			     : "i" (ASI_ESTATE_ERROR_EN),
1322 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1323 			     : "g1");
1324 
1325 	/* Decide if we can continue after handling this trap and
1326 	 * logging the error.
1327 	 */
1328 	recoverable = 1;
1329 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1330 		recoverable = 0;
1331 
1332 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1333 	 * error was logged while we had error reporting traps disabled.
1334 	 */
1335 	if (cheetah_recheck_errors(&local_snapshot)) {
1336 		unsigned long new_afsr = local_snapshot.afsr;
1337 
1338 		/* If we got a new asynchronous error, die... */
1339 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1340 				CHAFSR_WDU | CHAFSR_CPU |
1341 				CHAFSR_IVU | CHAFSR_UE |
1342 				CHAFSR_BERR | CHAFSR_TO))
1343 			recoverable = 0;
1344 	}
1345 
1346 	/* Log errors. */
1347 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1348 
1349 	if (!recoverable)
1350 		panic("Irrecoverable Fast-ECC error trap.\n");
1351 
1352 	/* Flush E-cache to kick the error trap handlers out. */
1353 	cheetah_flush_ecache();
1354 }
1355 
1356 /* Try to fix a correctable error by pushing the line out from
1357  * the E-cache.  Recheck error reporting registers to see if the
1358  * problem is intermittent.
1359  */
cheetah_fix_ce(unsigned long physaddr)1360 static int cheetah_fix_ce(unsigned long physaddr)
1361 {
1362 	unsigned long orig_estate;
1363 	unsigned long alias1, alias2;
1364 	int ret;
1365 
1366 	/* Make sure correctable error traps are disabled. */
1367 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1368 			     "andn	%0, %1, %%g1\n\t"
1369 			     "stxa	%%g1, [%%g0] %2\n\t"
1370 			     "membar	#Sync"
1371 			     : "=&r" (orig_estate)
1372 			     : "i" (ESTATE_ERROR_CEEN),
1373 			       "i" (ASI_ESTATE_ERROR_EN)
1374 			     : "g1");
1375 
1376 	/* We calculate alias addresses that will force the
1377 	 * cache line in question out of the E-cache.  Then
1378 	 * we bring it back in with an atomic instruction so
1379 	 * that we get it in some modified/exclusive state,
1380 	 * then we displace it again to try and get proper ECC
1381 	 * pushed back into the system.
1382 	 */
1383 	physaddr &= ~(8UL - 1UL);
1384 	alias1 = (ecache_flush_physbase +
1385 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1386 	alias2 = alias1 + (ecache_flush_size >> 1);
1387 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1388 			     "ldxa	[%1] %3, %%g0\n\t"
1389 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1390 			     "ldxa	[%0] %3, %%g0\n\t"
1391 			     "ldxa	[%1] %3, %%g0\n\t"
1392 			     "membar	#Sync"
1393 			     : /* no outputs */
1394 			     : "r" (alias1), "r" (alias2),
1395 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1396 
1397 	/* Did that trigger another error? */
1398 	if (cheetah_recheck_errors(NULL)) {
1399 		/* Try one more time. */
1400 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1401 				     "membar #Sync"
1402 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1403 		if (cheetah_recheck_errors(NULL))
1404 			ret = 2;
1405 		else
1406 			ret = 1;
1407 	} else {
1408 		/* No new error, intermittent problem. */
1409 		ret = 0;
1410 	}
1411 
1412 	/* Restore error enables. */
1413 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1414 			     "membar	#Sync"
1415 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1416 
1417 	return ret;
1418 }
1419 
1420 /* Return non-zero if PADDR is a valid physical memory address. */
cheetah_check_main_memory(unsigned long paddr)1421 static int cheetah_check_main_memory(unsigned long paddr)
1422 {
1423 	unsigned long vaddr = PAGE_OFFSET + paddr;
1424 
1425 	if (vaddr > (unsigned long) high_memory)
1426 		return 0;
1427 
1428 	return kern_addr_valid(vaddr);
1429 }
1430 
cheetah_cee_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1431 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1432 {
1433 	struct cheetah_err_info local_snapshot, *p;
1434 	int recoverable, is_memory;
1435 
1436 	p = cheetah_get_error_log(afsr);
1437 	if (!p) {
1438 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1439 			    afsr, afar);
1440 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1441 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1442 		prom_halt();
1443 	}
1444 
1445 	/* Grab snapshot of logged error. */
1446 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1447 
1448 	/* If the current trap snapshot does not match what the
1449 	 * trap handler passed along into our args, big trouble.
1450 	 * In such a case, mark the local copy as invalid.
1451 	 *
1452 	 * Else, it matches and we mark the afsr in the non-local
1453 	 * copy as invalid so we may log new error traps there.
1454 	 */
1455 	if (p->afsr != afsr || p->afar != afar)
1456 		local_snapshot.afsr = CHAFSR_INVALID;
1457 	else
1458 		p->afsr = CHAFSR_INVALID;
1459 
1460 	is_memory = cheetah_check_main_memory(afar);
1461 
1462 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1463 		/* XXX Might want to log the results of this operation
1464 		 * XXX somewhere... -DaveM
1465 		 */
1466 		cheetah_fix_ce(afar);
1467 	}
1468 
1469 	{
1470 		int flush_all, flush_line;
1471 
1472 		flush_all = flush_line = 0;
1473 		if ((afsr & CHAFSR_EDC) != 0UL) {
1474 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1475 				flush_line = 1;
1476 			else
1477 				flush_all = 1;
1478 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1479 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1480 				flush_line = 1;
1481 			else
1482 				flush_all = 1;
1483 		}
1484 
1485 		/* Trap handler only disabled I-cache, flush it. */
1486 		cheetah_flush_icache();
1487 
1488 		/* Re-enable I-cache */
1489 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1490 				     "or %%g1, %1, %%g1\n\t"
1491 				     "stxa %%g1, [%%g0] %0\n\t"
1492 				     "membar #Sync"
1493 				     : /* no outputs */
1494 				     : "i" (ASI_DCU_CONTROL_REG),
1495 				     "i" (DCU_IC)
1496 				     : "g1");
1497 
1498 		if (flush_all)
1499 			cheetah_flush_ecache();
1500 		else if (flush_line)
1501 			cheetah_flush_ecache_line(afar);
1502 	}
1503 
1504 	/* Re-enable error reporting */
1505 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1506 			     "or %%g1, %1, %%g1\n\t"
1507 			     "stxa %%g1, [%%g0] %0\n\t"
1508 			     "membar #Sync"
1509 			     : /* no outputs */
1510 			     : "i" (ASI_ESTATE_ERROR_EN),
1511 			       "i" (ESTATE_ERROR_CEEN)
1512 			     : "g1");
1513 
1514 	/* Decide if we can continue after handling this trap and
1515 	 * logging the error.
1516 	 */
1517 	recoverable = 1;
1518 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1519 		recoverable = 0;
1520 
1521 	/* Re-check AFSR/AFAR */
1522 	(void) cheetah_recheck_errors(&local_snapshot);
1523 
1524 	/* Log errors. */
1525 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1526 
1527 	if (!recoverable)
1528 		panic("Irrecoverable Correctable-ECC error trap.\n");
1529 }
1530 
cheetah_deferred_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1531 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1532 {
1533 	struct cheetah_err_info local_snapshot, *p;
1534 	int recoverable, is_memory;
1535 
1536 #ifdef CONFIG_PCI
1537 	/* Check for the special PCI poke sequence. */
1538 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1539 		cheetah_flush_icache();
1540 		cheetah_flush_dcache();
1541 
1542 		/* Re-enable I-cache/D-cache */
1543 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1544 				     "or %%g1, %1, %%g1\n\t"
1545 				     "stxa %%g1, [%%g0] %0\n\t"
1546 				     "membar #Sync"
1547 				     : /* no outputs */
1548 				     : "i" (ASI_DCU_CONTROL_REG),
1549 				       "i" (DCU_DC | DCU_IC)
1550 				     : "g1");
1551 
1552 		/* Re-enable error reporting */
1553 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1554 				     "or %%g1, %1, %%g1\n\t"
1555 				     "stxa %%g1, [%%g0] %0\n\t"
1556 				     "membar #Sync"
1557 				     : /* no outputs */
1558 				     : "i" (ASI_ESTATE_ERROR_EN),
1559 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1560 				     : "g1");
1561 
1562 		(void) cheetah_recheck_errors(NULL);
1563 
1564 		pci_poke_faulted = 1;
1565 		regs->tpc += 4;
1566 		regs->tnpc = regs->tpc + 4;
1567 		return;
1568 	}
1569 #endif
1570 
1571 	p = cheetah_get_error_log(afsr);
1572 	if (!p) {
1573 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1574 			    afsr, afar);
1575 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1576 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1577 		prom_halt();
1578 	}
1579 
1580 	/* Grab snapshot of logged error. */
1581 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1582 
1583 	/* If the current trap snapshot does not match what the
1584 	 * trap handler passed along into our args, big trouble.
1585 	 * In such a case, mark the local copy as invalid.
1586 	 *
1587 	 * Else, it matches and we mark the afsr in the non-local
1588 	 * copy as invalid so we may log new error traps there.
1589 	 */
1590 	if (p->afsr != afsr || p->afar != afar)
1591 		local_snapshot.afsr = CHAFSR_INVALID;
1592 	else
1593 		p->afsr = CHAFSR_INVALID;
1594 
1595 	is_memory = cheetah_check_main_memory(afar);
1596 
1597 	{
1598 		int flush_all, flush_line;
1599 
1600 		flush_all = flush_line = 0;
1601 		if ((afsr & CHAFSR_EDU) != 0UL) {
1602 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1603 				flush_line = 1;
1604 			else
1605 				flush_all = 1;
1606 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1607 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1608 				flush_line = 1;
1609 			else
1610 				flush_all = 1;
1611 		}
1612 
1613 		cheetah_flush_icache();
1614 		cheetah_flush_dcache();
1615 
1616 		/* Re-enable I/D caches */
1617 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1618 				     "or %%g1, %1, %%g1\n\t"
1619 				     "stxa %%g1, [%%g0] %0\n\t"
1620 				     "membar #Sync"
1621 				     : /* no outputs */
1622 				     : "i" (ASI_DCU_CONTROL_REG),
1623 				     "i" (DCU_IC | DCU_DC)
1624 				     : "g1");
1625 
1626 		if (flush_all)
1627 			cheetah_flush_ecache();
1628 		else if (flush_line)
1629 			cheetah_flush_ecache_line(afar);
1630 	}
1631 
1632 	/* Re-enable error reporting */
1633 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1634 			     "or %%g1, %1, %%g1\n\t"
1635 			     "stxa %%g1, [%%g0] %0\n\t"
1636 			     "membar #Sync"
1637 			     : /* no outputs */
1638 			     : "i" (ASI_ESTATE_ERROR_EN),
1639 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1640 			     : "g1");
1641 
1642 	/* Decide if we can continue after handling this trap and
1643 	 * logging the error.
1644 	 */
1645 	recoverable = 1;
1646 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1647 		recoverable = 0;
1648 
1649 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1650 	 * error was logged while we had error reporting traps disabled.
1651 	 */
1652 	if (cheetah_recheck_errors(&local_snapshot)) {
1653 		unsigned long new_afsr = local_snapshot.afsr;
1654 
1655 		/* If we got a new asynchronous error, die... */
1656 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1657 				CHAFSR_WDU | CHAFSR_CPU |
1658 				CHAFSR_IVU | CHAFSR_UE |
1659 				CHAFSR_BERR | CHAFSR_TO))
1660 			recoverable = 0;
1661 	}
1662 
1663 	/* Log errors. */
1664 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1665 
1666 	/* "Recoverable" here means we try to yank the page from ever
1667 	 * being newly used again.  This depends upon a few things:
1668 	 * 1) Must be main memory, and AFAR must be valid.
1669 	 * 2) If we trapped from user, OK.
1670 	 * 3) Else, if we trapped from kernel we must find exception
1671 	 *    table entry (ie. we have to have been accessing user
1672 	 *    space).
1673 	 *
1674 	 * If AFAR is not in main memory, or we trapped from kernel
1675 	 * and cannot find an exception table entry, it is unacceptable
1676 	 * to try and continue.
1677 	 */
1678 	if (recoverable && is_memory) {
1679 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1680 			/* OK, usermode access. */
1681 			recoverable = 1;
1682 		} else {
1683 			const struct exception_table_entry *entry;
1684 
1685 			entry = search_exception_tables(regs->tpc);
1686 			if (entry) {
1687 				/* OK, kernel access to userspace. */
1688 				recoverable = 1;
1689 
1690 			} else {
1691 				/* BAD, privileged state is corrupted. */
1692 				recoverable = 0;
1693 			}
1694 
1695 			if (recoverable) {
1696 				if (pfn_valid(afar >> PAGE_SHIFT))
1697 					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1698 				else
1699 					recoverable = 0;
1700 
1701 				/* Only perform fixup if we still have a
1702 				 * recoverable condition.
1703 				 */
1704 				if (recoverable) {
1705 					regs->tpc = entry->fixup;
1706 					regs->tnpc = regs->tpc + 4;
1707 				}
1708 			}
1709 		}
1710 	} else {
1711 		recoverable = 0;
1712 	}
1713 
1714 	if (!recoverable)
1715 		panic("Irrecoverable deferred error trap.\n");
1716 }
1717 
1718 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1719  *
1720  * Bit0:	0=dcache,1=icache
1721  * Bit1:	0=recoverable,1=unrecoverable
1722  *
1723  * The hardware has disabled both the I-cache and D-cache in
1724  * the %dcr register.
1725  */
cheetah_plus_parity_error(int type,struct pt_regs * regs)1726 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1727 {
1728 	if (type & 0x1)
1729 		__cheetah_flush_icache();
1730 	else
1731 		cheetah_plus_zap_dcache_parity();
1732 	cheetah_flush_dcache();
1733 
1734 	/* Re-enable I-cache/D-cache */
1735 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1736 			     "or %%g1, %1, %%g1\n\t"
1737 			     "stxa %%g1, [%%g0] %0\n\t"
1738 			     "membar #Sync"
1739 			     : /* no outputs */
1740 			     : "i" (ASI_DCU_CONTROL_REG),
1741 			       "i" (DCU_DC | DCU_IC)
1742 			     : "g1");
1743 
1744 	if (type & 0x2) {
1745 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1746 		       smp_processor_id(),
1747 		       (type & 0x1) ? 'I' : 'D',
1748 		       regs->tpc);
1749 		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1750 		panic("Irrecoverable Cheetah+ parity error.");
1751 	}
1752 
1753 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1754 	       smp_processor_id(),
1755 	       (type & 0x1) ? 'I' : 'D',
1756 	       regs->tpc);
1757 	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1758 }
1759 
1760 struct sun4v_error_entry {
1761 	u64		err_handle;
1762 	u64		err_stick;
1763 
1764 	u32		err_type;
1765 #define SUN4V_ERR_TYPE_UNDEFINED	0
1766 #define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1767 #define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1768 #define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1769 #define SUN4V_ERR_TYPE_WARNING_RES	4
1770 
1771 	u32		err_attrs;
1772 #define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1773 #define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1774 #define SUN4V_ERR_ATTRS_PIO		0x00000004
1775 #define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1776 #define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1777 #define SUN4V_ERR_ATTRS_USER_MODE	0x01000000
1778 #define SUN4V_ERR_ATTRS_PRIV_MODE	0x02000000
1779 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1780 
1781 	u64		err_raddr;
1782 	u32		err_size;
1783 	u16		err_cpu;
1784 	u16		err_pad;
1785 };
1786 
1787 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1788 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1789 
sun4v_err_type_to_str(u32 type)1790 static const char *sun4v_err_type_to_str(u32 type)
1791 {
1792 	switch (type) {
1793 	case SUN4V_ERR_TYPE_UNDEFINED:
1794 		return "undefined";
1795 	case SUN4V_ERR_TYPE_UNCORRECTED_RES:
1796 		return "uncorrected resumable";
1797 	case SUN4V_ERR_TYPE_PRECISE_NONRES:
1798 		return "precise nonresumable";
1799 	case SUN4V_ERR_TYPE_DEFERRED_NONRES:
1800 		return "deferred nonresumable";
1801 	case SUN4V_ERR_TYPE_WARNING_RES:
1802 		return "warning resumable";
1803 	default:
1804 		return "unknown";
1805 	};
1806 }
1807 
sun4v_log_error(struct pt_regs * regs,struct sun4v_error_entry * ent,int cpu,const char * pfx,atomic_t * ocnt)1808 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
1809 {
1810 	int cnt;
1811 
1812 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1813 	printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n",
1814 	       pfx,
1815 	       ent->err_handle, ent->err_stick,
1816 	       ent->err_type,
1817 	       sun4v_err_type_to_str(ent->err_type));
1818 	printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n",
1819 	       pfx,
1820 	       ent->err_attrs,
1821 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ?
1822 		"processor" : ""),
1823 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ?
1824 		"memory" : ""),
1825 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ?
1826 		"pio" : ""),
1827 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ?
1828 		"integer-regs" : ""),
1829 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ?
1830 		"fpu-regs" : ""),
1831 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ?
1832 		"user" : ""),
1833 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ?
1834 		"privileged" : ""),
1835 	       ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ?
1836 		"queue-full" : ""));
1837 	printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n",
1838 	       pfx,
1839 	       ent->err_raddr, ent->err_size, ent->err_cpu);
1840 
1841 	show_regs(regs);
1842 
1843 	if ((cnt = atomic_read(ocnt)) != 0) {
1844 		atomic_set(ocnt, 0);
1845 		wmb();
1846 		printk("%s: Queue overflowed %d times.\n",
1847 		       pfx, cnt);
1848 	}
1849 }
1850 
1851 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1852  * Log the event and clear the first word of the entry.
1853  */
sun4v_resum_error(struct pt_regs * regs,unsigned long offset)1854 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1855 {
1856 	struct sun4v_error_entry *ent, local_copy;
1857 	struct trap_per_cpu *tb;
1858 	unsigned long paddr;
1859 	int cpu;
1860 
1861 	cpu = get_cpu();
1862 
1863 	tb = &trap_block[cpu];
1864 	paddr = tb->resum_kernel_buf_pa + offset;
1865 	ent = __va(paddr);
1866 
1867 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1868 
1869 	/* We have a local copy now, so release the entry.  */
1870 	ent->err_handle = 0;
1871 	wmb();
1872 
1873 	put_cpu();
1874 
1875 	if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) {
1876 		/* If err_type is 0x4, it's a powerdown request.  Do
1877 		 * not do the usual resumable error log because that
1878 		 * makes it look like some abnormal error.
1879 		 */
1880 		printk(KERN_INFO "Power down request...\n");
1881 		kill_cad_pid(SIGINT, 1);
1882 		return;
1883 	}
1884 
1885 	sun4v_log_error(regs, &local_copy, cpu,
1886 			KERN_ERR "RESUMABLE ERROR",
1887 			&sun4v_resum_oflow_cnt);
1888 }
1889 
1890 /* If we try to printk() we'll probably make matters worse, by trying
1891  * to retake locks this cpu already holds or causing more errors. So
1892  * just bump a counter, and we'll report these counter bumps above.
1893  */
sun4v_resum_overflow(struct pt_regs * regs)1894 void sun4v_resum_overflow(struct pt_regs *regs)
1895 {
1896 	atomic_inc(&sun4v_resum_oflow_cnt);
1897 }
1898 
1899 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1900  * Log the event, clear the first word of the entry, and die.
1901  */
sun4v_nonresum_error(struct pt_regs * regs,unsigned long offset)1902 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
1903 {
1904 	struct sun4v_error_entry *ent, local_copy;
1905 	struct trap_per_cpu *tb;
1906 	unsigned long paddr;
1907 	int cpu;
1908 
1909 	cpu = get_cpu();
1910 
1911 	tb = &trap_block[cpu];
1912 	paddr = tb->nonresum_kernel_buf_pa + offset;
1913 	ent = __va(paddr);
1914 
1915 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
1916 
1917 	/* We have a local copy now, so release the entry.  */
1918 	ent->err_handle = 0;
1919 	wmb();
1920 
1921 	put_cpu();
1922 
1923 #ifdef CONFIG_PCI
1924 	/* Check for the special PCI poke sequence. */
1925 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
1926 		pci_poke_faulted = 1;
1927 		regs->tpc += 4;
1928 		regs->tnpc = regs->tpc + 4;
1929 		return;
1930 	}
1931 #endif
1932 
1933 	sun4v_log_error(regs, &local_copy, cpu,
1934 			KERN_EMERG "NON-RESUMABLE ERROR",
1935 			&sun4v_nonresum_oflow_cnt);
1936 
1937 	panic("Non-resumable error.");
1938 }
1939 
1940 /* If we try to printk() we'll probably make matters worse, by trying
1941  * to retake locks this cpu already holds or causing more errors. So
1942  * just bump a counter, and we'll report these counter bumps above.
1943  */
sun4v_nonresum_overflow(struct pt_regs * regs)1944 void sun4v_nonresum_overflow(struct pt_regs *regs)
1945 {
1946 	/* XXX Actually even this can make not that much sense.  Perhaps
1947 	 * XXX we should just pull the plug and panic directly from here?
1948 	 */
1949 	atomic_inc(&sun4v_nonresum_oflow_cnt);
1950 }
1951 
1952 unsigned long sun4v_err_itlb_vaddr;
1953 unsigned long sun4v_err_itlb_ctx;
1954 unsigned long sun4v_err_itlb_pte;
1955 unsigned long sun4v_err_itlb_error;
1956 
sun4v_itlb_error_report(struct pt_regs * regs,int tl)1957 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
1958 {
1959 	if (tl > 1)
1960 		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1961 
1962 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
1963 	       regs->tpc, tl);
1964 	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
1965 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1966 	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
1967 	       (void *) regs->u_regs[UREG_I7]);
1968 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
1969 	       "pte[%lx] error[%lx]\n",
1970 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
1971 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
1972 
1973 	prom_halt();
1974 }
1975 
1976 unsigned long sun4v_err_dtlb_vaddr;
1977 unsigned long sun4v_err_dtlb_ctx;
1978 unsigned long sun4v_err_dtlb_pte;
1979 unsigned long sun4v_err_dtlb_error;
1980 
sun4v_dtlb_error_report(struct pt_regs * regs,int tl)1981 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
1982 {
1983 	if (tl > 1)
1984 		dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1985 
1986 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
1987 	       regs->tpc, tl);
1988 	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
1989 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
1990 	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
1991 	       (void *) regs->u_regs[UREG_I7]);
1992 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
1993 	       "pte[%lx] error[%lx]\n",
1994 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
1995 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
1996 
1997 	prom_halt();
1998 }
1999 
hypervisor_tlbop_error(unsigned long err,unsigned long op)2000 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2001 {
2002 	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2003 	       err, op);
2004 }
2005 
hypervisor_tlbop_error_xcall(unsigned long err,unsigned long op)2006 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2007 {
2008 	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2009 	       err, op);
2010 }
2011 
do_fpe_common(struct pt_regs * regs)2012 void do_fpe_common(struct pt_regs *regs)
2013 {
2014 	if (regs->tstate & TSTATE_PRIV) {
2015 		regs->tpc = regs->tnpc;
2016 		regs->tnpc += 4;
2017 	} else {
2018 		unsigned long fsr = current_thread_info()->xfsr[0];
2019 		siginfo_t info;
2020 
2021 		if (test_thread_flag(TIF_32BIT)) {
2022 			regs->tpc &= 0xffffffff;
2023 			regs->tnpc &= 0xffffffff;
2024 		}
2025 		info.si_signo = SIGFPE;
2026 		info.si_errno = 0;
2027 		info.si_addr = (void __user *)regs->tpc;
2028 		info.si_trapno = 0;
2029 		info.si_code = __SI_FAULT;
2030 		if ((fsr & 0x1c000) == (1 << 14)) {
2031 			if (fsr & 0x10)
2032 				info.si_code = FPE_FLTINV;
2033 			else if (fsr & 0x08)
2034 				info.si_code = FPE_FLTOVF;
2035 			else if (fsr & 0x04)
2036 				info.si_code = FPE_FLTUND;
2037 			else if (fsr & 0x02)
2038 				info.si_code = FPE_FLTDIV;
2039 			else if (fsr & 0x01)
2040 				info.si_code = FPE_FLTRES;
2041 		}
2042 		force_sig_info(SIGFPE, &info, current);
2043 	}
2044 }
2045 
do_fpieee(struct pt_regs * regs)2046 void do_fpieee(struct pt_regs *regs)
2047 {
2048 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2049 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2050 		return;
2051 
2052 	do_fpe_common(regs);
2053 }
2054 
2055 extern int do_mathemu(struct pt_regs *, struct fpustate *);
2056 
do_fpother(struct pt_regs * regs)2057 void do_fpother(struct pt_regs *regs)
2058 {
2059 	struct fpustate *f = FPUSTATE;
2060 	int ret = 0;
2061 
2062 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2063 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2064 		return;
2065 
2066 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2067 	case (2 << 14): /* unfinished_FPop */
2068 	case (3 << 14): /* unimplemented_FPop */
2069 		ret = do_mathemu(regs, f);
2070 		break;
2071 	}
2072 	if (ret)
2073 		return;
2074 	do_fpe_common(regs);
2075 }
2076 
do_tof(struct pt_regs * regs)2077 void do_tof(struct pt_regs *regs)
2078 {
2079 	siginfo_t info;
2080 
2081 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2082 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2083 		return;
2084 
2085 	if (regs->tstate & TSTATE_PRIV)
2086 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2087 	if (test_thread_flag(TIF_32BIT)) {
2088 		regs->tpc &= 0xffffffff;
2089 		regs->tnpc &= 0xffffffff;
2090 	}
2091 	info.si_signo = SIGEMT;
2092 	info.si_errno = 0;
2093 	info.si_code = EMT_TAGOVF;
2094 	info.si_addr = (void __user *)regs->tpc;
2095 	info.si_trapno = 0;
2096 	force_sig_info(SIGEMT, &info, current);
2097 }
2098 
do_div0(struct pt_regs * regs)2099 void do_div0(struct pt_regs *regs)
2100 {
2101 	siginfo_t info;
2102 
2103 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2104 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2105 		return;
2106 
2107 	if (regs->tstate & TSTATE_PRIV)
2108 		die_if_kernel("TL0: Kernel divide by zero.", regs);
2109 	if (test_thread_flag(TIF_32BIT)) {
2110 		regs->tpc &= 0xffffffff;
2111 		regs->tnpc &= 0xffffffff;
2112 	}
2113 	info.si_signo = SIGFPE;
2114 	info.si_errno = 0;
2115 	info.si_code = FPE_INTDIV;
2116 	info.si_addr = (void __user *)regs->tpc;
2117 	info.si_trapno = 0;
2118 	force_sig_info(SIGFPE, &info, current);
2119 }
2120 
instruction_dump(unsigned int * pc)2121 static void instruction_dump(unsigned int *pc)
2122 {
2123 	int i;
2124 
2125 	if ((((unsigned long) pc) & 3))
2126 		return;
2127 
2128 	printk("Instruction DUMP:");
2129 	for (i = -3; i < 6; i++)
2130 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2131 	printk("\n");
2132 }
2133 
user_instruction_dump(unsigned int __user * pc)2134 static void user_instruction_dump(unsigned int __user *pc)
2135 {
2136 	int i;
2137 	unsigned int buf[9];
2138 
2139 	if ((((unsigned long) pc) & 3))
2140 		return;
2141 
2142 	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2143 		return;
2144 
2145 	printk("Instruction DUMP:");
2146 	for (i = 0; i < 9; i++)
2147 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2148 	printk("\n");
2149 }
2150 
show_stack(struct task_struct * tsk,unsigned long * _ksp)2151 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2152 {
2153 	unsigned long fp, thread_base, ksp;
2154 	struct thread_info *tp;
2155 	int count = 0;
2156 
2157 	ksp = (unsigned long) _ksp;
2158 	if (!tsk)
2159 		tsk = current;
2160 	tp = task_thread_info(tsk);
2161 	if (ksp == 0UL) {
2162 		if (tsk == current)
2163 			asm("mov %%fp, %0" : "=r" (ksp));
2164 		else
2165 			ksp = tp->ksp;
2166 	}
2167 	if (tp == current_thread_info())
2168 		flushw_all();
2169 
2170 	fp = ksp + STACK_BIAS;
2171 	thread_base = (unsigned long) tp;
2172 
2173 	printk("Call Trace:\n");
2174 	do {
2175 		struct sparc_stackf *sf;
2176 		struct pt_regs *regs;
2177 		unsigned long pc;
2178 
2179 		if (!kstack_valid(tp, fp))
2180 			break;
2181 		sf = (struct sparc_stackf *) fp;
2182 		regs = (struct pt_regs *) (sf + 1);
2183 
2184 		if (kstack_is_trap_frame(tp, regs)) {
2185 			if (!(regs->tstate & TSTATE_PRIV))
2186 				break;
2187 			pc = regs->tpc;
2188 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2189 		} else {
2190 			pc = sf->callers_pc;
2191 			fp = (unsigned long)sf->fp + STACK_BIAS;
2192 		}
2193 
2194 		printk(" [%016lx] %pS\n", pc, (void *) pc);
2195 	} while (++count < 16);
2196 }
2197 
dump_stack(void)2198 void dump_stack(void)
2199 {
2200 	show_stack(current, NULL);
2201 }
2202 
2203 EXPORT_SYMBOL(dump_stack);
2204 
is_kernel_stack(struct task_struct * task,struct reg_window * rw)2205 static inline int is_kernel_stack(struct task_struct *task,
2206 				  struct reg_window *rw)
2207 {
2208 	unsigned long rw_addr = (unsigned long) rw;
2209 	unsigned long thread_base, thread_end;
2210 
2211 	if (rw_addr < PAGE_OFFSET) {
2212 		if (task != &init_task)
2213 			return 0;
2214 	}
2215 
2216 	thread_base = (unsigned long) task_stack_page(task);
2217 	thread_end = thread_base + sizeof(union thread_union);
2218 	if (rw_addr >= thread_base &&
2219 	    rw_addr < thread_end &&
2220 	    !(rw_addr & 0x7UL))
2221 		return 1;
2222 
2223 	return 0;
2224 }
2225 
kernel_stack_up(struct reg_window * rw)2226 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2227 {
2228 	unsigned long fp = rw->ins[6];
2229 
2230 	if (!fp)
2231 		return NULL;
2232 
2233 	return (struct reg_window *) (fp + STACK_BIAS);
2234 }
2235 
die_if_kernel(char * str,struct pt_regs * regs)2236 void die_if_kernel(char *str, struct pt_regs *regs)
2237 {
2238 	static int die_counter;
2239 	int count = 0;
2240 
2241 	/* Amuse the user. */
2242 	printk(
2243 "              \\|/ ____ \\|/\n"
2244 "              \"@'/ .. \\`@\"\n"
2245 "              /_| \\__/ |_\\\n"
2246 "                 \\__U_/\n");
2247 
2248 	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2249 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2250 	__asm__ __volatile__("flushw");
2251 	show_regs(regs);
2252 	add_taint(TAINT_DIE);
2253 	if (regs->tstate & TSTATE_PRIV) {
2254 		struct reg_window *rw = (struct reg_window *)
2255 			(regs->u_regs[UREG_FP] + STACK_BIAS);
2256 
2257 		/* Stop the back trace when we hit userland or we
2258 		 * find some badly aligned kernel stack.
2259 		 */
2260 		while (rw &&
2261 		       count++ < 30&&
2262 		       is_kernel_stack(current, rw)) {
2263 			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2264 			       (void *) rw->ins[7]);
2265 
2266 			rw = kernel_stack_up(rw);
2267 		}
2268 		instruction_dump ((unsigned int *) regs->tpc);
2269 	} else {
2270 		if (test_thread_flag(TIF_32BIT)) {
2271 			regs->tpc &= 0xffffffff;
2272 			regs->tnpc &= 0xffffffff;
2273 		}
2274 		user_instruction_dump ((unsigned int __user *) regs->tpc);
2275 	}
2276 	if (regs->tstate & TSTATE_PRIV)
2277 		do_exit(SIGKILL);
2278 	do_exit(SIGSEGV);
2279 }
2280 EXPORT_SYMBOL(die_if_kernel);
2281 
2282 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2283 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2284 
2285 extern int handle_popc(u32 insn, struct pt_regs *regs);
2286 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2287 
do_illegal_instruction(struct pt_regs * regs)2288 void do_illegal_instruction(struct pt_regs *regs)
2289 {
2290 	unsigned long pc = regs->tpc;
2291 	unsigned long tstate = regs->tstate;
2292 	u32 insn;
2293 	siginfo_t info;
2294 
2295 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2296 		       0, 0x10, SIGILL) == NOTIFY_STOP)
2297 		return;
2298 
2299 	if (tstate & TSTATE_PRIV)
2300 		die_if_kernel("Kernel illegal instruction", regs);
2301 	if (test_thread_flag(TIF_32BIT))
2302 		pc = (u32)pc;
2303 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2304 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2305 			if (handle_popc(insn, regs))
2306 				return;
2307 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2308 			if (handle_ldf_stq(insn, regs))
2309 				return;
2310 		} else if (tlb_type == hypervisor) {
2311 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2312 				if (!vis_emul(regs, insn))
2313 					return;
2314 			} else {
2315 				struct fpustate *f = FPUSTATE;
2316 
2317 				/* XXX maybe verify XFSR bits like
2318 				 * XXX do_fpother() does?
2319 				 */
2320 				if (do_mathemu(regs, f))
2321 					return;
2322 			}
2323 		}
2324 	}
2325 	info.si_signo = SIGILL;
2326 	info.si_errno = 0;
2327 	info.si_code = ILL_ILLOPC;
2328 	info.si_addr = (void __user *)pc;
2329 	info.si_trapno = 0;
2330 	force_sig_info(SIGILL, &info, current);
2331 }
2332 
2333 extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2334 
mem_address_unaligned(struct pt_regs * regs,unsigned long sfar,unsigned long sfsr)2335 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2336 {
2337 	siginfo_t info;
2338 
2339 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2340 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2341 		return;
2342 
2343 	if (regs->tstate & TSTATE_PRIV) {
2344 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2345 		return;
2346 	}
2347 	info.si_signo = SIGBUS;
2348 	info.si_errno = 0;
2349 	info.si_code = BUS_ADRALN;
2350 	info.si_addr = (void __user *)sfar;
2351 	info.si_trapno = 0;
2352 	force_sig_info(SIGBUS, &info, current);
2353 }
2354 
sun4v_do_mna(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)2355 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2356 {
2357 	siginfo_t info;
2358 
2359 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2360 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2361 		return;
2362 
2363 	if (regs->tstate & TSTATE_PRIV) {
2364 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2365 		return;
2366 	}
2367 	info.si_signo = SIGBUS;
2368 	info.si_errno = 0;
2369 	info.si_code = BUS_ADRALN;
2370 	info.si_addr = (void __user *) addr;
2371 	info.si_trapno = 0;
2372 	force_sig_info(SIGBUS, &info, current);
2373 }
2374 
do_privop(struct pt_regs * regs)2375 void do_privop(struct pt_regs *regs)
2376 {
2377 	siginfo_t info;
2378 
2379 	if (notify_die(DIE_TRAP, "privileged operation", regs,
2380 		       0, 0x11, SIGILL) == NOTIFY_STOP)
2381 		return;
2382 
2383 	if (test_thread_flag(TIF_32BIT)) {
2384 		regs->tpc &= 0xffffffff;
2385 		regs->tnpc &= 0xffffffff;
2386 	}
2387 	info.si_signo = SIGILL;
2388 	info.si_errno = 0;
2389 	info.si_code = ILL_PRVOPC;
2390 	info.si_addr = (void __user *)regs->tpc;
2391 	info.si_trapno = 0;
2392 	force_sig_info(SIGILL, &info, current);
2393 }
2394 
do_privact(struct pt_regs * regs)2395 void do_privact(struct pt_regs *regs)
2396 {
2397 	do_privop(regs);
2398 }
2399 
2400 /* Trap level 1 stuff or other traps we should never see... */
do_cee(struct pt_regs * regs)2401 void do_cee(struct pt_regs *regs)
2402 {
2403 	die_if_kernel("TL0: Cache Error Exception", regs);
2404 }
2405 
do_cee_tl1(struct pt_regs * regs)2406 void do_cee_tl1(struct pt_regs *regs)
2407 {
2408 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2409 	die_if_kernel("TL1: Cache Error Exception", regs);
2410 }
2411 
do_dae_tl1(struct pt_regs * regs)2412 void do_dae_tl1(struct pt_regs *regs)
2413 {
2414 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2415 	die_if_kernel("TL1: Data Access Exception", regs);
2416 }
2417 
do_iae_tl1(struct pt_regs * regs)2418 void do_iae_tl1(struct pt_regs *regs)
2419 {
2420 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2421 	die_if_kernel("TL1: Instruction Access Exception", regs);
2422 }
2423 
do_div0_tl1(struct pt_regs * regs)2424 void do_div0_tl1(struct pt_regs *regs)
2425 {
2426 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2427 	die_if_kernel("TL1: DIV0 Exception", regs);
2428 }
2429 
do_fpdis_tl1(struct pt_regs * regs)2430 void do_fpdis_tl1(struct pt_regs *regs)
2431 {
2432 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2433 	die_if_kernel("TL1: FPU Disabled", regs);
2434 }
2435 
do_fpieee_tl1(struct pt_regs * regs)2436 void do_fpieee_tl1(struct pt_regs *regs)
2437 {
2438 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2439 	die_if_kernel("TL1: FPU IEEE Exception", regs);
2440 }
2441 
do_fpother_tl1(struct pt_regs * regs)2442 void do_fpother_tl1(struct pt_regs *regs)
2443 {
2444 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2445 	die_if_kernel("TL1: FPU Other Exception", regs);
2446 }
2447 
do_ill_tl1(struct pt_regs * regs)2448 void do_ill_tl1(struct pt_regs *regs)
2449 {
2450 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2451 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2452 }
2453 
do_irq_tl1(struct pt_regs * regs)2454 void do_irq_tl1(struct pt_regs *regs)
2455 {
2456 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2457 	die_if_kernel("TL1: IRQ Exception", regs);
2458 }
2459 
do_lddfmna_tl1(struct pt_regs * regs)2460 void do_lddfmna_tl1(struct pt_regs *regs)
2461 {
2462 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2463 	die_if_kernel("TL1: LDDF Exception", regs);
2464 }
2465 
do_stdfmna_tl1(struct pt_regs * regs)2466 void do_stdfmna_tl1(struct pt_regs *regs)
2467 {
2468 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2469 	die_if_kernel("TL1: STDF Exception", regs);
2470 }
2471 
do_paw(struct pt_regs * regs)2472 void do_paw(struct pt_regs *regs)
2473 {
2474 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2475 }
2476 
do_paw_tl1(struct pt_regs * regs)2477 void do_paw_tl1(struct pt_regs *regs)
2478 {
2479 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2480 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2481 }
2482 
do_vaw(struct pt_regs * regs)2483 void do_vaw(struct pt_regs *regs)
2484 {
2485 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2486 }
2487 
do_vaw_tl1(struct pt_regs * regs)2488 void do_vaw_tl1(struct pt_regs *regs)
2489 {
2490 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2491 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2492 }
2493 
do_tof_tl1(struct pt_regs * regs)2494 void do_tof_tl1(struct pt_regs *regs)
2495 {
2496 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2497 	die_if_kernel("TL1: Tag Overflow Exception", regs);
2498 }
2499 
do_getpsr(struct pt_regs * regs)2500 void do_getpsr(struct pt_regs *regs)
2501 {
2502 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2503 	regs->tpc   = regs->tnpc;
2504 	regs->tnpc += 4;
2505 	if (test_thread_flag(TIF_32BIT)) {
2506 		regs->tpc &= 0xffffffff;
2507 		regs->tnpc &= 0xffffffff;
2508 	}
2509 }
2510 
2511 struct trap_per_cpu trap_block[NR_CPUS];
2512 
2513 /* This can get invoked before sched_init() so play it super safe
2514  * and use hard_smp_processor_id().
2515  */
init_cur_cpu_trap(struct thread_info * t)2516 void notrace init_cur_cpu_trap(struct thread_info *t)
2517 {
2518 	int cpu = hard_smp_processor_id();
2519 	struct trap_per_cpu *p = &trap_block[cpu];
2520 
2521 	p->thread = t;
2522 	p->pgd_paddr = 0;
2523 }
2524 
2525 extern void thread_info_offsets_are_bolixed_dave(void);
2526 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2527 extern void tsb_config_offsets_are_bolixed_dave(void);
2528 
2529 /* Only invoked on boot processor. */
trap_init(void)2530 void __init trap_init(void)
2531 {
2532 	/* Compile time sanity check. */
2533 	if (TI_TASK != offsetof(struct thread_info, task) ||
2534 	    TI_FLAGS != offsetof(struct thread_info, flags) ||
2535 	    TI_CPU != offsetof(struct thread_info, cpu) ||
2536 	    TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2537 	    TI_KSP != offsetof(struct thread_info, ksp) ||
2538 	    TI_FAULT_ADDR != offsetof(struct thread_info, fault_address) ||
2539 	    TI_KREGS != offsetof(struct thread_info, kregs) ||
2540 	    TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2541 	    TI_EXEC_DOMAIN != offsetof(struct thread_info, exec_domain) ||
2542 	    TI_REG_WINDOW != offsetof(struct thread_info, reg_window) ||
2543 	    TI_RWIN_SPTRS != offsetof(struct thread_info, rwbuf_stkptrs) ||
2544 	    TI_GSR != offsetof(struct thread_info, gsr) ||
2545 	    TI_XFSR != offsetof(struct thread_info, xfsr) ||
2546 	    TI_USER_CNTD0 != offsetof(struct thread_info, user_cntd0) ||
2547 	    TI_USER_CNTD1 != offsetof(struct thread_info, user_cntd1) ||
2548 	    TI_KERN_CNTD0 != offsetof(struct thread_info, kernel_cntd0) ||
2549 	    TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
2550 	    TI_PCR != offsetof(struct thread_info, pcr_reg) ||
2551 	    TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
2552 	    TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2553 	    TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) ||
2554 	    TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) ||
2555 	    TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) ||
2556 	    TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) ||
2557 	    TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2558 	    (TI_FPREGS & (64 - 1)))
2559 		thread_info_offsets_are_bolixed_dave();
2560 
2561 	if (TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu, thread) ||
2562 	    (TRAP_PER_CPU_PGD_PADDR !=
2563 	     offsetof(struct trap_per_cpu, pgd_paddr)) ||
2564 	    (TRAP_PER_CPU_CPU_MONDO_PA !=
2565 	     offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2566 	    (TRAP_PER_CPU_DEV_MONDO_PA !=
2567 	     offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2568 	    (TRAP_PER_CPU_RESUM_MONDO_PA !=
2569 	     offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2570 	    (TRAP_PER_CPU_RESUM_KBUF_PA !=
2571 	     offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2572 	    (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2573 	     offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2574 	    (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2575 	     offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2576 	    (TRAP_PER_CPU_FAULT_INFO !=
2577 	     offsetof(struct trap_per_cpu, fault_info)) ||
2578 	    (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2579 	     offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2580 	    (TRAP_PER_CPU_CPU_LIST_PA !=
2581 	     offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2582 	    (TRAP_PER_CPU_TSB_HUGE !=
2583 	     offsetof(struct trap_per_cpu, tsb_huge)) ||
2584 	    (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2585 	     offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2586 	    (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2587 	     offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2588 	    (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2589 	     offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2590 	    (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2591 	     offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2592 	    (TRAP_PER_CPU_RESUM_QMASK !=
2593 	     offsetof(struct trap_per_cpu, resum_qmask)) ||
2594 	    (TRAP_PER_CPU_NONRESUM_QMASK !=
2595 	     offsetof(struct trap_per_cpu, nonresum_qmask)))
2596 		trap_per_cpu_offsets_are_bolixed_dave();
2597 
2598 	if ((TSB_CONFIG_TSB !=
2599 	     offsetof(struct tsb_config, tsb)) ||
2600 	    (TSB_CONFIG_RSS_LIMIT !=
2601 	     offsetof(struct tsb_config, tsb_rss_limit)) ||
2602 	    (TSB_CONFIG_NENTRIES !=
2603 	     offsetof(struct tsb_config, tsb_nentries)) ||
2604 	    (TSB_CONFIG_REG_VAL !=
2605 	     offsetof(struct tsb_config, tsb_reg_val)) ||
2606 	    (TSB_CONFIG_MAP_VADDR !=
2607 	     offsetof(struct tsb_config, tsb_map_vaddr)) ||
2608 	    (TSB_CONFIG_MAP_PTE !=
2609 	     offsetof(struct tsb_config, tsb_map_pte)))
2610 		tsb_config_offsets_are_bolixed_dave();
2611 
2612 	/* Attach to the address space of init_task.  On SMP we
2613 	 * do this in smp.c:smp_callin for other cpus.
2614 	 */
2615 	atomic_inc(&init_mm.mm_count);
2616 	current->active_mm = &init_mm;
2617 }
2618