• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* arch/sparc64/kernel/traps.c
2  *
3  * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
4  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
5  */
6 
7 /*
8  * I like traps on v9, :))))
9  */
10 
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/linkage.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/mm.h>
18 #include <linux/init.h>
19 #include <linux/kdebug.h>
20 #include <linux/ftrace.h>
21 #include <linux/reboot.h>
22 #include <linux/gfp.h>
23 #include <linux/context_tracking.h>
24 
25 #include <asm/smp.h>
26 #include <asm/delay.h>
27 #include <asm/ptrace.h>
28 #include <asm/oplib.h>
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/unistd.h>
32 #include <asm/uaccess.h>
33 #include <asm/fpumacro.h>
34 #include <asm/lsu.h>
35 #include <asm/dcu.h>
36 #include <asm/estate.h>
37 #include <asm/chafsr.h>
38 #include <asm/sfafsr.h>
39 #include <asm/psrcompat.h>
40 #include <asm/processor.h>
41 #include <asm/timer.h>
42 #include <asm/head.h>
43 #include <asm/prom.h>
44 #include <asm/memctrl.h>
45 #include <asm/cacheflush.h>
46 #include <asm/setup.h>
47 
48 #include "entry.h"
49 #include "kernel.h"
50 #include "kstack.h"
51 
52 /* When an irrecoverable trap occurs at tl > 0, the trap entry
53  * code logs the trap state registers at every level in the trap
54  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
55  * is as follows:
56  */
57 struct tl1_traplog {
58 	struct {
59 		unsigned long tstate;
60 		unsigned long tpc;
61 		unsigned long tnpc;
62 		unsigned long tt;
63 	} trapstack[4];
64 	unsigned long tl;
65 };
66 
dump_tl1_traplog(struct tl1_traplog * p)67 static void dump_tl1_traplog(struct tl1_traplog *p)
68 {
69 	int i, limit;
70 
71 	printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
72 	       "dumping track stack.\n", p->tl);
73 
74 	limit = (tlb_type == hypervisor) ? 2 : 4;
75 	for (i = 0; i < limit; i++) {
76 		printk(KERN_EMERG
77 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
78 		       "TNPC[%016lx] TT[%lx]\n",
79 		       i + 1,
80 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
81 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
82 		printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
83 	}
84 }
85 
bad_trap(struct pt_regs * regs,long lvl)86 void bad_trap(struct pt_regs *regs, long lvl)
87 {
88 	char buffer[36];
89 	siginfo_t info;
90 
91 	if (notify_die(DIE_TRAP, "bad trap", regs,
92 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
93 		return;
94 
95 	if (lvl < 0x100) {
96 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
97 		die_if_kernel(buffer, regs);
98 	}
99 
100 	lvl -= 0x100;
101 	if (regs->tstate & TSTATE_PRIV) {
102 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
103 		die_if_kernel(buffer, regs);
104 	}
105 	if (test_thread_flag(TIF_32BIT)) {
106 		regs->tpc &= 0xffffffff;
107 		regs->tnpc &= 0xffffffff;
108 	}
109 	info.si_signo = SIGILL;
110 	info.si_errno = 0;
111 	info.si_code = ILL_ILLTRP;
112 	info.si_addr = (void __user *)regs->tpc;
113 	info.si_trapno = lvl;
114 	force_sig_info(SIGILL, &info, current);
115 }
116 
bad_trap_tl1(struct pt_regs * regs,long lvl)117 void bad_trap_tl1(struct pt_regs *regs, long lvl)
118 {
119 	char buffer[36];
120 
121 	if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
122 		       0, lvl, SIGTRAP) == NOTIFY_STOP)
123 		return;
124 
125 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
126 
127 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
128 	die_if_kernel (buffer, regs);
129 }
130 
131 #ifdef CONFIG_DEBUG_BUGVERBOSE
do_BUG(const char * file,int line)132 void do_BUG(const char *file, int line)
133 {
134 	bust_spinlocks(1);
135 	printk("kernel BUG at %s:%d!\n", file, line);
136 }
137 EXPORT_SYMBOL(do_BUG);
138 #endif
139 
140 static DEFINE_SPINLOCK(dimm_handler_lock);
141 static dimm_printer_t dimm_handler;
142 
sprintf_dimm(int synd_code,unsigned long paddr,char * buf,int buflen)143 static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
144 {
145 	unsigned long flags;
146 	int ret = -ENODEV;
147 
148 	spin_lock_irqsave(&dimm_handler_lock, flags);
149 	if (dimm_handler) {
150 		ret = dimm_handler(synd_code, paddr, buf, buflen);
151 	} else if (tlb_type == spitfire) {
152 		if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
153 			ret = -EINVAL;
154 		else
155 			ret = 0;
156 	} else
157 		ret = -ENODEV;
158 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
159 
160 	return ret;
161 }
162 
register_dimm_printer(dimm_printer_t func)163 int register_dimm_printer(dimm_printer_t func)
164 {
165 	unsigned long flags;
166 	int ret = 0;
167 
168 	spin_lock_irqsave(&dimm_handler_lock, flags);
169 	if (!dimm_handler)
170 		dimm_handler = func;
171 	else
172 		ret = -EEXIST;
173 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
174 
175 	return ret;
176 }
177 EXPORT_SYMBOL_GPL(register_dimm_printer);
178 
unregister_dimm_printer(dimm_printer_t func)179 void unregister_dimm_printer(dimm_printer_t func)
180 {
181 	unsigned long flags;
182 
183 	spin_lock_irqsave(&dimm_handler_lock, flags);
184 	if (dimm_handler == func)
185 		dimm_handler = NULL;
186 	spin_unlock_irqrestore(&dimm_handler_lock, flags);
187 }
188 EXPORT_SYMBOL_GPL(unregister_dimm_printer);
189 
spitfire_insn_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)190 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
191 {
192 	enum ctx_state prev_state = exception_enter();
193 	siginfo_t info;
194 
195 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
196 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
197 		goto out;
198 
199 	if (regs->tstate & TSTATE_PRIV) {
200 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
201 		       "SFAR[%016lx], going.\n", sfsr, sfar);
202 		die_if_kernel("Iax", regs);
203 	}
204 	if (test_thread_flag(TIF_32BIT)) {
205 		regs->tpc &= 0xffffffff;
206 		regs->tnpc &= 0xffffffff;
207 	}
208 	info.si_signo = SIGSEGV;
209 	info.si_errno = 0;
210 	info.si_code = SEGV_MAPERR;
211 	info.si_addr = (void __user *)regs->tpc;
212 	info.si_trapno = 0;
213 	force_sig_info(SIGSEGV, &info, current);
214 out:
215 	exception_exit(prev_state);
216 }
217 
spitfire_insn_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)218 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
219 {
220 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
221 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
222 		return;
223 
224 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
225 	spitfire_insn_access_exception(regs, sfsr, sfar);
226 }
227 
sun4v_insn_access_exception(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)228 void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
229 {
230 	unsigned short type = (type_ctx >> 16);
231 	unsigned short ctx  = (type_ctx & 0xffff);
232 	siginfo_t info;
233 
234 	if (notify_die(DIE_TRAP, "instruction access exception", regs,
235 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
236 		return;
237 
238 	if (regs->tstate & TSTATE_PRIV) {
239 		printk("sun4v_insn_access_exception: ADDR[%016lx] "
240 		       "CTX[%04x] TYPE[%04x], going.\n",
241 		       addr, ctx, type);
242 		die_if_kernel("Iax", regs);
243 	}
244 
245 	if (test_thread_flag(TIF_32BIT)) {
246 		regs->tpc &= 0xffffffff;
247 		regs->tnpc &= 0xffffffff;
248 	}
249 	info.si_signo = SIGSEGV;
250 	info.si_errno = 0;
251 	info.si_code = SEGV_MAPERR;
252 	info.si_addr = (void __user *) addr;
253 	info.si_trapno = 0;
254 	force_sig_info(SIGSEGV, &info, current);
255 }
256 
sun4v_insn_access_exception_tl1(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)257 void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
258 {
259 	if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
260 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
261 		return;
262 
263 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
264 	sun4v_insn_access_exception(regs, addr, type_ctx);
265 }
266 
spitfire_data_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)267 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
268 {
269 	enum ctx_state prev_state = exception_enter();
270 	siginfo_t info;
271 
272 	if (notify_die(DIE_TRAP, "data access exception", regs,
273 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
274 		goto out;
275 
276 	if (regs->tstate & TSTATE_PRIV) {
277 		/* Test if this comes from uaccess places. */
278 		const struct exception_table_entry *entry;
279 
280 		entry = search_exception_tables(regs->tpc);
281 		if (entry) {
282 			/* Ouch, somebody is trying VM hole tricks on us... */
283 #ifdef DEBUG_EXCEPTIONS
284 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
285 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
286 			       regs->tpc, entry->fixup);
287 #endif
288 			regs->tpc = entry->fixup;
289 			regs->tnpc = regs->tpc + 4;
290 			goto out;
291 		}
292 		/* Shit... */
293 		printk("spitfire_data_access_exception: SFSR[%016lx] "
294 		       "SFAR[%016lx], going.\n", sfsr, sfar);
295 		die_if_kernel("Dax", regs);
296 	}
297 
298 	info.si_signo = SIGSEGV;
299 	info.si_errno = 0;
300 	info.si_code = SEGV_MAPERR;
301 	info.si_addr = (void __user *)sfar;
302 	info.si_trapno = 0;
303 	force_sig_info(SIGSEGV, &info, current);
304 out:
305 	exception_exit(prev_state);
306 }
307 
spitfire_data_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)308 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
309 {
310 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
311 		       0, 0x30, SIGTRAP) == NOTIFY_STOP)
312 		return;
313 
314 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
315 	spitfire_data_access_exception(regs, sfsr, sfar);
316 }
317 
sun4v_data_access_exception(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)318 void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
319 {
320 	unsigned short type = (type_ctx >> 16);
321 	unsigned short ctx  = (type_ctx & 0xffff);
322 	siginfo_t info;
323 
324 	if (notify_die(DIE_TRAP, "data access exception", regs,
325 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
326 		return;
327 
328 	if (regs->tstate & TSTATE_PRIV) {
329 		/* Test if this comes from uaccess places. */
330 		const struct exception_table_entry *entry;
331 
332 		entry = search_exception_tables(regs->tpc);
333 		if (entry) {
334 			/* Ouch, somebody is trying VM hole tricks on us... */
335 #ifdef DEBUG_EXCEPTIONS
336 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
337 			printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
338 			       regs->tpc, entry->fixup);
339 #endif
340 			regs->tpc = entry->fixup;
341 			regs->tnpc = regs->tpc + 4;
342 			return;
343 		}
344 		printk("sun4v_data_access_exception: ADDR[%016lx] "
345 		       "CTX[%04x] TYPE[%04x], going.\n",
346 		       addr, ctx, type);
347 		die_if_kernel("Dax", regs);
348 	}
349 
350 	if (test_thread_flag(TIF_32BIT)) {
351 		regs->tpc &= 0xffffffff;
352 		regs->tnpc &= 0xffffffff;
353 	}
354 	info.si_signo = SIGSEGV;
355 	info.si_errno = 0;
356 	info.si_code = SEGV_MAPERR;
357 	info.si_addr = (void __user *) addr;
358 	info.si_trapno = 0;
359 	force_sig_info(SIGSEGV, &info, current);
360 }
361 
sun4v_data_access_exception_tl1(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)362 void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
363 {
364 	if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
365 		       0, 0x8, SIGTRAP) == NOTIFY_STOP)
366 		return;
367 
368 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
369 	sun4v_data_access_exception(regs, addr, type_ctx);
370 }
371 
372 #ifdef CONFIG_PCI
373 #include "pci_impl.h"
374 #endif
375 
376 /* When access exceptions happen, we must do this. */
spitfire_clean_and_reenable_l1_caches(void)377 static void spitfire_clean_and_reenable_l1_caches(void)
378 {
379 	unsigned long va;
380 
381 	if (tlb_type != spitfire)
382 		BUG();
383 
384 	/* Clean 'em. */
385 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
386 		spitfire_put_icache_tag(va, 0x0);
387 		spitfire_put_dcache_tag(va, 0x0);
388 	}
389 
390 	/* Re-enable in LSU. */
391 	__asm__ __volatile__("flush %%g6\n\t"
392 			     "membar #Sync\n\t"
393 			     "stxa %0, [%%g0] %1\n\t"
394 			     "membar #Sync"
395 			     : /* no outputs */
396 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
397 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
398 			     "i" (ASI_LSU_CONTROL)
399 			     : "memory");
400 }
401 
spitfire_enable_estate_errors(void)402 static void spitfire_enable_estate_errors(void)
403 {
404 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
405 			     "membar	#Sync"
406 			     : /* no outputs */
407 			     : "r" (ESTATE_ERR_ALL),
408 			       "i" (ASI_ESTATE_ERROR_EN));
409 }
410 
411 static char ecc_syndrome_table[] = {
412 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
413 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
414 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
415 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
416 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
417 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
418 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
419 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
420 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
421 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
422 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
423 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
424 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
425 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
426 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
427 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
428 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
429 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
430 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
431 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
432 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
433 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
434 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
435 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
436 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
437 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
438 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
439 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
440 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
441 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
442 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
443 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
444 };
445 
446 static char *syndrome_unknown = "<Unknown>";
447 
spitfire_log_udb_syndrome(unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long bit)448 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
449 {
450 	unsigned short scode;
451 	char memmod_str[64], *p;
452 
453 	if (udbl & bit) {
454 		scode = ecc_syndrome_table[udbl & 0xff];
455 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
456 			p = syndrome_unknown;
457 		else
458 			p = memmod_str;
459 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
460 		       "Memory Module \"%s\"\n",
461 		       smp_processor_id(), scode, p);
462 	}
463 
464 	if (udbh & bit) {
465 		scode = ecc_syndrome_table[udbh & 0xff];
466 		if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
467 			p = syndrome_unknown;
468 		else
469 			p = memmod_str;
470 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
471 		       "Memory Module \"%s\"\n",
472 		       smp_processor_id(), scode, p);
473 	}
474 
475 }
476 
spitfire_cee_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,int tl1,struct pt_regs * regs)477 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
478 {
479 
480 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
481 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
482 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
483 
484 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
485 
486 	/* We always log it, even if someone is listening for this
487 	 * trap.
488 	 */
489 	notify_die(DIE_TRAP, "Correctable ECC Error", regs,
490 		   0, TRAP_TYPE_CEE, SIGTRAP);
491 
492 	/* The Correctable ECC Error trap does not disable I/D caches.  So
493 	 * we only have to restore the ESTATE Error Enable register.
494 	 */
495 	spitfire_enable_estate_errors();
496 }
497 
spitfire_ue_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long tt,int tl1,struct pt_regs * regs)498 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
499 {
500 	siginfo_t info;
501 
502 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
503 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
504 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
505 
506 	/* XXX add more human friendly logging of the error status
507 	 * XXX as is implemented for cheetah
508 	 */
509 
510 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
511 
512 	/* We always log it, even if someone is listening for this
513 	 * trap.
514 	 */
515 	notify_die(DIE_TRAP, "Uncorrectable Error", regs,
516 		   0, tt, SIGTRAP);
517 
518 	if (regs->tstate & TSTATE_PRIV) {
519 		if (tl1)
520 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
521 		die_if_kernel("UE", regs);
522 	}
523 
524 	/* XXX need more intelligent processing here, such as is implemented
525 	 * XXX for cheetah errors, in fact if the E-cache still holds the
526 	 * XXX line with bad parity this will loop
527 	 */
528 
529 	spitfire_clean_and_reenable_l1_caches();
530 	spitfire_enable_estate_errors();
531 
532 	if (test_thread_flag(TIF_32BIT)) {
533 		regs->tpc &= 0xffffffff;
534 		regs->tnpc &= 0xffffffff;
535 	}
536 	info.si_signo = SIGBUS;
537 	info.si_errno = 0;
538 	info.si_code = BUS_OBJERR;
539 	info.si_addr = (void *)0;
540 	info.si_trapno = 0;
541 	force_sig_info(SIGBUS, &info, current);
542 }
543 
spitfire_access_error(struct pt_regs * regs,unsigned long status_encoded,unsigned long afar)544 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
545 {
546 	unsigned long afsr, tt, udbh, udbl;
547 	int tl1;
548 
549 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
550 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
551 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
552 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
553 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
554 
555 #ifdef CONFIG_PCI
556 	if (tt == TRAP_TYPE_DAE &&
557 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
558 		spitfire_clean_and_reenable_l1_caches();
559 		spitfire_enable_estate_errors();
560 
561 		pci_poke_faulted = 1;
562 		regs->tnpc = regs->tpc + 4;
563 		return;
564 	}
565 #endif
566 
567 	if (afsr & SFAFSR_UE)
568 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
569 
570 	if (tt == TRAP_TYPE_CEE) {
571 		/* Handle the case where we took a CEE trap, but ACK'd
572 		 * only the UE state in the UDB error registers.
573 		 */
574 		if (afsr & SFAFSR_UE) {
575 			if (udbh & UDBE_CE) {
576 				__asm__ __volatile__(
577 					"stxa	%0, [%1] %2\n\t"
578 					"membar	#Sync"
579 					: /* no outputs */
580 					: "r" (udbh & UDBE_CE),
581 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
582 			}
583 			if (udbl & UDBE_CE) {
584 				__asm__ __volatile__(
585 					"stxa	%0, [%1] %2\n\t"
586 					"membar	#Sync"
587 					: /* no outputs */
588 					: "r" (udbl & UDBE_CE),
589 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
590 			}
591 		}
592 
593 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
594 	}
595 }
596 
597 int cheetah_pcache_forced_on;
598 
cheetah_enable_pcache(void)599 void cheetah_enable_pcache(void)
600 {
601 	unsigned long dcr;
602 
603 	printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
604 	       smp_processor_id());
605 
606 	__asm__ __volatile__("ldxa [%%g0] %1, %0"
607 			     : "=r" (dcr)
608 			     : "i" (ASI_DCU_CONTROL_REG));
609 	dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
610 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
611 			     "membar #Sync"
612 			     : /* no outputs */
613 			     : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
614 }
615 
616 /* Cheetah error trap handling. */
617 static unsigned long ecache_flush_physbase;
618 static unsigned long ecache_flush_linesize;
619 static unsigned long ecache_flush_size;
620 
621 /* This table is ordered in priority of errors and matches the
622  * AFAR overwrite policy as well.
623  */
624 
625 struct afsr_error_table {
626 	unsigned long mask;
627 	const char *name;
628 };
629 
630 static const char CHAFSR_PERR_msg[] =
631 	"System interface protocol error";
632 static const char CHAFSR_IERR_msg[] =
633 	"Internal processor error";
634 static const char CHAFSR_ISAP_msg[] =
635 	"System request parity error on incoming address";
636 static const char CHAFSR_UCU_msg[] =
637 	"Uncorrectable E-cache ECC error for ifetch/data";
638 static const char CHAFSR_UCC_msg[] =
639 	"SW Correctable E-cache ECC error for ifetch/data";
640 static const char CHAFSR_UE_msg[] =
641 	"Uncorrectable system bus data ECC error for read";
642 static const char CHAFSR_EDU_msg[] =
643 	"Uncorrectable E-cache ECC error for stmerge/blkld";
644 static const char CHAFSR_EMU_msg[] =
645 	"Uncorrectable system bus MTAG error";
646 static const char CHAFSR_WDU_msg[] =
647 	"Uncorrectable E-cache ECC error for writeback";
648 static const char CHAFSR_CPU_msg[] =
649 	"Uncorrectable ECC error for copyout";
650 static const char CHAFSR_CE_msg[] =
651 	"HW corrected system bus data ECC error for read";
652 static const char CHAFSR_EDC_msg[] =
653 	"HW corrected E-cache ECC error for stmerge/blkld";
654 static const char CHAFSR_EMC_msg[] =
655 	"HW corrected system bus MTAG ECC error";
656 static const char CHAFSR_WDC_msg[] =
657 	"HW corrected E-cache ECC error for writeback";
658 static const char CHAFSR_CPC_msg[] =
659 	"HW corrected ECC error for copyout";
660 static const char CHAFSR_TO_msg[] =
661 	"Unmapped error from system bus";
662 static const char CHAFSR_BERR_msg[] =
663 	"Bus error response from system bus";
664 static const char CHAFSR_IVC_msg[] =
665 	"HW corrected system bus data ECC error for ivec read";
666 static const char CHAFSR_IVU_msg[] =
667 	"Uncorrectable system bus data ECC error for ivec read";
668 static struct afsr_error_table __cheetah_error_table[] = {
669 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
670 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
671 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
672 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
673 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
674 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
675 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
676 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
677 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
678 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
679 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
680 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
681 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
682 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
683 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
684 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
685 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
686 	/* These two do not update the AFAR. */
687 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
688 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
689 	{	0,		NULL			},
690 };
691 static const char CHPAFSR_DTO_msg[] =
692 	"System bus unmapped error for prefetch/storequeue-read";
693 static const char CHPAFSR_DBERR_msg[] =
694 	"System bus error for prefetch/storequeue-read";
695 static const char CHPAFSR_THCE_msg[] =
696 	"Hardware corrected E-cache Tag ECC error";
697 static const char CHPAFSR_TSCE_msg[] =
698 	"SW handled correctable E-cache Tag ECC error";
699 static const char CHPAFSR_TUE_msg[] =
700 	"Uncorrectable E-cache Tag ECC error";
701 static const char CHPAFSR_DUE_msg[] =
702 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
703 static struct afsr_error_table __cheetah_plus_error_table[] = {
704 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
705 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
706 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
707 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
708 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
709 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
710 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
711 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
712 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
713 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
714 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
715 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
716 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
717 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
718 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
719 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
720 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
721 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
722 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
723 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
724 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
725 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
726 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
727 	/* These two do not update the AFAR. */
728 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
729 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
730 	{	0,		NULL			},
731 };
732 static const char JPAFSR_JETO_msg[] =
733 	"System interface protocol error, hw timeout caused";
734 static const char JPAFSR_SCE_msg[] =
735 	"Parity error on system snoop results";
736 static const char JPAFSR_JEIC_msg[] =
737 	"System interface protocol error, illegal command detected";
738 static const char JPAFSR_JEIT_msg[] =
739 	"System interface protocol error, illegal ADTYPE detected";
740 static const char JPAFSR_OM_msg[] =
741 	"Out of range memory error has occurred";
742 static const char JPAFSR_ETP_msg[] =
743 	"Parity error on L2 cache tag SRAM";
744 static const char JPAFSR_UMS_msg[] =
745 	"Error due to unsupported store";
746 static const char JPAFSR_RUE_msg[] =
747 	"Uncorrectable ECC error from remote cache/memory";
748 static const char JPAFSR_RCE_msg[] =
749 	"Correctable ECC error from remote cache/memory";
750 static const char JPAFSR_BP_msg[] =
751 	"JBUS parity error on returned read data";
752 static const char JPAFSR_WBP_msg[] =
753 	"JBUS parity error on data for writeback or block store";
754 static const char JPAFSR_FRC_msg[] =
755 	"Foreign read to DRAM incurring correctable ECC error";
756 static const char JPAFSR_FRU_msg[] =
757 	"Foreign read to DRAM incurring uncorrectable ECC error";
758 static struct afsr_error_table __jalapeno_error_table[] = {
759 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
760 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
761 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
762 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
763 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
764 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
765 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
766 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
767 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
768 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
769 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
770 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
771 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
772 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
773 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
774 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
775 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
776 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
777 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
778 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
779 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
780 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
781 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
782 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
783 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
784 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
785 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
786 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
787 	/* These two do not update the AFAR. */
788 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
789 	{	0,		NULL			},
790 };
791 static struct afsr_error_table *cheetah_error_table;
792 static unsigned long cheetah_afsr_errors;
793 
794 struct cheetah_err_info *cheetah_error_log;
795 
cheetah_get_error_log(unsigned long afsr)796 static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
797 {
798 	struct cheetah_err_info *p;
799 	int cpu = smp_processor_id();
800 
801 	if (!cheetah_error_log)
802 		return NULL;
803 
804 	p = cheetah_error_log + (cpu * 2);
805 	if ((afsr & CHAFSR_TL1) != 0UL)
806 		p++;
807 
808 	return p;
809 }
810 
811 extern unsigned int tl0_icpe[], tl1_icpe[];
812 extern unsigned int tl0_dcpe[], tl1_dcpe[];
813 extern unsigned int tl0_fecc[], tl1_fecc[];
814 extern unsigned int tl0_cee[], tl1_cee[];
815 extern unsigned int tl0_iae[], tl1_iae[];
816 extern unsigned int tl0_dae[], tl1_dae[];
817 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
818 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
819 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
820 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
821 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
822 
cheetah_ecache_flush_init(void)823 void __init cheetah_ecache_flush_init(void)
824 {
825 	unsigned long largest_size, smallest_linesize, order, ver;
826 	int i, sz;
827 
828 	/* Scan all cpu device tree nodes, note two values:
829 	 * 1) largest E-cache size
830 	 * 2) smallest E-cache line size
831 	 */
832 	largest_size = 0UL;
833 	smallest_linesize = ~0UL;
834 
835 	for (i = 0; i < NR_CPUS; i++) {
836 		unsigned long val;
837 
838 		val = cpu_data(i).ecache_size;
839 		if (!val)
840 			continue;
841 
842 		if (val > largest_size)
843 			largest_size = val;
844 
845 		val = cpu_data(i).ecache_line_size;
846 		if (val < smallest_linesize)
847 			smallest_linesize = val;
848 
849 	}
850 
851 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
852 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
853 			    "parameters.\n");
854 		prom_halt();
855 	}
856 
857 	ecache_flush_size = (2 * largest_size);
858 	ecache_flush_linesize = smallest_linesize;
859 
860 	ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
861 
862 	if (ecache_flush_physbase == ~0UL) {
863 		prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
864 			    "contiguous physical memory.\n",
865 			    ecache_flush_size);
866 		prom_halt();
867 	}
868 
869 	/* Now allocate error trap reporting scoreboard. */
870 	sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
871 	for (order = 0; order < MAX_ORDER; order++) {
872 		if ((PAGE_SIZE << order) >= sz)
873 			break;
874 	}
875 	cheetah_error_log = (struct cheetah_err_info *)
876 		__get_free_pages(GFP_KERNEL, order);
877 	if (!cheetah_error_log) {
878 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
879 			    "error logging scoreboard (%d bytes).\n", sz);
880 		prom_halt();
881 	}
882 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
883 
884 	/* Mark all AFSRs as invalid so that the trap handler will
885 	 * log new new information there.
886 	 */
887 	for (i = 0; i < 2 * NR_CPUS; i++)
888 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
889 
890 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
891 	if ((ver >> 32) == __JALAPENO_ID ||
892 	    (ver >> 32) == __SERRANO_ID) {
893 		cheetah_error_table = &__jalapeno_error_table[0];
894 		cheetah_afsr_errors = JPAFSR_ERRORS;
895 	} else if ((ver >> 32) == 0x003e0015) {
896 		cheetah_error_table = &__cheetah_plus_error_table[0];
897 		cheetah_afsr_errors = CHPAFSR_ERRORS;
898 	} else {
899 		cheetah_error_table = &__cheetah_error_table[0];
900 		cheetah_afsr_errors = CHAFSR_ERRORS;
901 	}
902 
903 	/* Now patch trap tables. */
904 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
905 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
906 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
907 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
908 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
909 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
910 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
911 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
912 	if (tlb_type == cheetah_plus) {
913 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
914 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
915 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
916 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
917 	}
918 	flushi(PAGE_OFFSET);
919 }
920 
cheetah_flush_ecache(void)921 static void cheetah_flush_ecache(void)
922 {
923 	unsigned long flush_base = ecache_flush_physbase;
924 	unsigned long flush_linesize = ecache_flush_linesize;
925 	unsigned long flush_size = ecache_flush_size;
926 
927 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
928 			     "   bne,pt	%%xcc, 1b\n\t"
929 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
930 			     : "=&r" (flush_size)
931 			     : "0" (flush_size), "r" (flush_base),
932 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
933 }
934 
cheetah_flush_ecache_line(unsigned long physaddr)935 static void cheetah_flush_ecache_line(unsigned long physaddr)
936 {
937 	unsigned long alias;
938 
939 	physaddr &= ~(8UL - 1UL);
940 	physaddr = (ecache_flush_physbase +
941 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
942 	alias = physaddr + (ecache_flush_size >> 1UL);
943 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
944 			     "ldxa [%1] %2, %%g0\n\t"
945 			     "membar #Sync"
946 			     : /* no outputs */
947 			     : "r" (physaddr), "r" (alias),
948 			       "i" (ASI_PHYS_USE_EC));
949 }
950 
951 /* Unfortunately, the diagnostic access to the I-cache tags we need to
952  * use to clear the thing interferes with I-cache coherency transactions.
953  *
954  * So we must only flush the I-cache when it is disabled.
955  */
__cheetah_flush_icache(void)956 static void __cheetah_flush_icache(void)
957 {
958 	unsigned int icache_size, icache_line_size;
959 	unsigned long addr;
960 
961 	icache_size = local_cpu_data().icache_size;
962 	icache_line_size = local_cpu_data().icache_line_size;
963 
964 	/* Clear the valid bits in all the tags. */
965 	for (addr = 0; addr < icache_size; addr += icache_line_size) {
966 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
967 				     "membar #Sync"
968 				     : /* no outputs */
969 				     : "r" (addr | (2 << 3)),
970 				       "i" (ASI_IC_TAG));
971 	}
972 }
973 
cheetah_flush_icache(void)974 static void cheetah_flush_icache(void)
975 {
976 	unsigned long dcu_save;
977 
978 	/* Save current DCU, disable I-cache. */
979 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
980 			     "or %0, %2, %%g1\n\t"
981 			     "stxa %%g1, [%%g0] %1\n\t"
982 			     "membar #Sync"
983 			     : "=r" (dcu_save)
984 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
985 			     : "g1");
986 
987 	__cheetah_flush_icache();
988 
989 	/* Restore DCU register */
990 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
991 			     "membar #Sync"
992 			     : /* no outputs */
993 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
994 }
995 
cheetah_flush_dcache(void)996 static void cheetah_flush_dcache(void)
997 {
998 	unsigned int dcache_size, dcache_line_size;
999 	unsigned long addr;
1000 
1001 	dcache_size = local_cpu_data().dcache_size;
1002 	dcache_line_size = local_cpu_data().dcache_line_size;
1003 
1004 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1005 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1006 				     "membar #Sync"
1007 				     : /* no outputs */
1008 				     : "r" (addr), "i" (ASI_DCACHE_TAG));
1009 	}
1010 }
1011 
1012 /* In order to make the even parity correct we must do two things.
1013  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1014  * Next, we clear out all 32-bytes of data for that line.  Data of
1015  * all-zero + tag parity value of zero == correct parity.
1016  */
cheetah_plus_zap_dcache_parity(void)1017 static void cheetah_plus_zap_dcache_parity(void)
1018 {
1019 	unsigned int dcache_size, dcache_line_size;
1020 	unsigned long addr;
1021 
1022 	dcache_size = local_cpu_data().dcache_size;
1023 	dcache_line_size = local_cpu_data().dcache_line_size;
1024 
1025 	for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1026 		unsigned long tag = (addr >> 14);
1027 		unsigned long line;
1028 
1029 		__asm__ __volatile__("membar	#Sync\n\t"
1030 				     "stxa	%0, [%1] %2\n\t"
1031 				     "membar	#Sync"
1032 				     : /* no outputs */
1033 				     : "r" (tag), "r" (addr),
1034 				       "i" (ASI_DCACHE_UTAG));
1035 		for (line = addr; line < addr + dcache_line_size; line += 8)
1036 			__asm__ __volatile__("membar	#Sync\n\t"
1037 					     "stxa	%%g0, [%0] %1\n\t"
1038 					     "membar	#Sync"
1039 					     : /* no outputs */
1040 					     : "r" (line),
1041 					       "i" (ASI_DCACHE_DATA));
1042 	}
1043 }
1044 
1045 /* Conversion tables used to frob Cheetah AFSR syndrome values into
1046  * something palatable to the memory controller driver get_unumber
1047  * routine.
1048  */
1049 #define MT0	137
1050 #define MT1	138
1051 #define MT2	139
1052 #define NONE	254
1053 #define MTC0	140
1054 #define MTC1	141
1055 #define MTC2	142
1056 #define MTC3	143
1057 #define C0	128
1058 #define C1	129
1059 #define C2	130
1060 #define C3	131
1061 #define C4	132
1062 #define C5	133
1063 #define C6	134
1064 #define C7	135
1065 #define C8	136
1066 #define M2	144
1067 #define M3	145
1068 #define M4	146
1069 #define M	147
1070 static unsigned char cheetah_ecc_syntab[] = {
1071 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1072 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1073 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1074 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1075 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1076 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1077 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1078 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1079 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1080 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1081 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1082 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1083 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1084 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1085 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1086 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1087 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1088 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1089 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1090 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1091 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1092 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1093 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1094 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1095 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1096 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1097 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1098 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1099 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1100 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1101 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1102 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1103 };
1104 static unsigned char cheetah_mtag_syntab[] = {
1105        NONE, MTC0,
1106        MTC1, NONE,
1107        MTC2, NONE,
1108        NONE, MT0,
1109        MTC3, NONE,
1110        NONE, MT1,
1111        NONE, MT2,
1112        NONE, NONE
1113 };
1114 
1115 /* Return the highest priority error conditon mentioned. */
cheetah_get_hipri(unsigned long afsr)1116 static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1117 {
1118 	unsigned long tmp = 0;
1119 	int i;
1120 
1121 	for (i = 0; cheetah_error_table[i].mask; i++) {
1122 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1123 			return tmp;
1124 	}
1125 	return tmp;
1126 }
1127 
cheetah_get_string(unsigned long bit)1128 static const char *cheetah_get_string(unsigned long bit)
1129 {
1130 	int i;
1131 
1132 	for (i = 0; cheetah_error_table[i].mask; i++) {
1133 		if ((bit & cheetah_error_table[i].mask) != 0UL)
1134 			return cheetah_error_table[i].name;
1135 	}
1136 	return "???";
1137 }
1138 
cheetah_log_errors(struct pt_regs * regs,struct cheetah_err_info * info,unsigned long afsr,unsigned long afar,int recoverable)1139 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1140 			       unsigned long afsr, unsigned long afar, int recoverable)
1141 {
1142 	unsigned long hipri;
1143 	char unum[256];
1144 
1145 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1146 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1147 	       afsr, afar,
1148 	       (afsr & CHAFSR_TL1) ? 1 : 0);
1149 	printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1150 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1151 	       regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1152 	printk("%s" "ERROR(%d): ",
1153 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1154 	printk("TPC<%pS>\n", (void *) regs->tpc);
1155 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1156 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1157 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1158 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1159 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1160 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1161 	hipri = cheetah_get_hipri(afsr);
1162 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1163 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1164 	       hipri, cheetah_get_string(hipri));
1165 
1166 	/* Try to get unumber if relevant. */
1167 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
1168 			 CHAFSR_CPC | CHAFSR_CPU | \
1169 			 CHAFSR_UE  | CHAFSR_CE  | \
1170 			 CHAFSR_EDC | CHAFSR_EDU  | \
1171 			 CHAFSR_UCC | CHAFSR_UCU  | \
1172 			 CHAFSR_WDU | CHAFSR_WDC)
1173 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1174 	if (afsr & ESYND_ERRORS) {
1175 		int syndrome;
1176 		int ret;
1177 
1178 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1179 		syndrome = cheetah_ecc_syntab[syndrome];
1180 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1181 		if (ret != -1)
1182 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1183 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1184 			       smp_processor_id(), unum);
1185 	} else if (afsr & MSYND_ERRORS) {
1186 		int syndrome;
1187 		int ret;
1188 
1189 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1190 		syndrome = cheetah_mtag_syntab[syndrome];
1191 		ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1192 		if (ret != -1)
1193 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1194 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1195 			       smp_processor_id(), unum);
1196 	}
1197 
1198 	/* Now dump the cache snapshots. */
1199 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1200 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1201 	       (int) info->dcache_index,
1202 	       info->dcache_tag,
1203 	       info->dcache_utag,
1204 	       info->dcache_stag);
1205 	printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1206 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1207 	       info->dcache_data[0],
1208 	       info->dcache_data[1],
1209 	       info->dcache_data[2],
1210 	       info->dcache_data[3]);
1211 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1212 	       "u[%016llx] l[%016llx]\n",
1213 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1214 	       (int) info->icache_index,
1215 	       info->icache_tag,
1216 	       info->icache_utag,
1217 	       info->icache_stag,
1218 	       info->icache_upper,
1219 	       info->icache_lower);
1220 	printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1221 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1222 	       info->icache_data[0],
1223 	       info->icache_data[1],
1224 	       info->icache_data[2],
1225 	       info->icache_data[3]);
1226 	printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1227 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1228 	       info->icache_data[4],
1229 	       info->icache_data[5],
1230 	       info->icache_data[6],
1231 	       info->icache_data[7]);
1232 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1233 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1234 	       (int) info->ecache_index, info->ecache_tag);
1235 	printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1236 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1237 	       info->ecache_data[0],
1238 	       info->ecache_data[1],
1239 	       info->ecache_data[2],
1240 	       info->ecache_data[3]);
1241 
1242 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1243 	while (afsr != 0UL) {
1244 		unsigned long bit = cheetah_get_hipri(afsr);
1245 
1246 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1247 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1248 		       bit, cheetah_get_string(bit));
1249 
1250 		afsr &= ~bit;
1251 	}
1252 
1253 	if (!recoverable)
1254 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1255 }
1256 
cheetah_recheck_errors(struct cheetah_err_info * logp)1257 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1258 {
1259 	unsigned long afsr, afar;
1260 	int ret = 0;
1261 
1262 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1263 			     : "=r" (afsr)
1264 			     : "i" (ASI_AFSR));
1265 	if ((afsr & cheetah_afsr_errors) != 0) {
1266 		if (logp != NULL) {
1267 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1268 					     : "=r" (afar)
1269 					     : "i" (ASI_AFAR));
1270 			logp->afsr = afsr;
1271 			logp->afar = afar;
1272 		}
1273 		ret = 1;
1274 	}
1275 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1276 			     "membar #Sync\n\t"
1277 			     : : "r" (afsr), "i" (ASI_AFSR));
1278 
1279 	return ret;
1280 }
1281 
cheetah_fecc_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1282 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1283 {
1284 	struct cheetah_err_info local_snapshot, *p;
1285 	int recoverable;
1286 
1287 	/* Flush E-cache */
1288 	cheetah_flush_ecache();
1289 
1290 	p = cheetah_get_error_log(afsr);
1291 	if (!p) {
1292 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1293 			    afsr, afar);
1294 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1295 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1296 		prom_halt();
1297 	}
1298 
1299 	/* Grab snapshot of logged error. */
1300 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1301 
1302 	/* If the current trap snapshot does not match what the
1303 	 * trap handler passed along into our args, big trouble.
1304 	 * In such a case, mark the local copy as invalid.
1305 	 *
1306 	 * Else, it matches and we mark the afsr in the non-local
1307 	 * copy as invalid so we may log new error traps there.
1308 	 */
1309 	if (p->afsr != afsr || p->afar != afar)
1310 		local_snapshot.afsr = CHAFSR_INVALID;
1311 	else
1312 		p->afsr = CHAFSR_INVALID;
1313 
1314 	cheetah_flush_icache();
1315 	cheetah_flush_dcache();
1316 
1317 	/* Re-enable I-cache/D-cache */
1318 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1319 			     "or %%g1, %1, %%g1\n\t"
1320 			     "stxa %%g1, [%%g0] %0\n\t"
1321 			     "membar #Sync"
1322 			     : /* no outputs */
1323 			     : "i" (ASI_DCU_CONTROL_REG),
1324 			       "i" (DCU_DC | DCU_IC)
1325 			     : "g1");
1326 
1327 	/* Re-enable error reporting */
1328 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1329 			     "or %%g1, %1, %%g1\n\t"
1330 			     "stxa %%g1, [%%g0] %0\n\t"
1331 			     "membar #Sync"
1332 			     : /* no outputs */
1333 			     : "i" (ASI_ESTATE_ERROR_EN),
1334 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1335 			     : "g1");
1336 
1337 	/* Decide if we can continue after handling this trap and
1338 	 * logging the error.
1339 	 */
1340 	recoverable = 1;
1341 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1342 		recoverable = 0;
1343 
1344 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1345 	 * error was logged while we had error reporting traps disabled.
1346 	 */
1347 	if (cheetah_recheck_errors(&local_snapshot)) {
1348 		unsigned long new_afsr = local_snapshot.afsr;
1349 
1350 		/* If we got a new asynchronous error, die... */
1351 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1352 				CHAFSR_WDU | CHAFSR_CPU |
1353 				CHAFSR_IVU | CHAFSR_UE |
1354 				CHAFSR_BERR | CHAFSR_TO))
1355 			recoverable = 0;
1356 	}
1357 
1358 	/* Log errors. */
1359 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1360 
1361 	if (!recoverable)
1362 		panic("Irrecoverable Fast-ECC error trap.\n");
1363 
1364 	/* Flush E-cache to kick the error trap handlers out. */
1365 	cheetah_flush_ecache();
1366 }
1367 
1368 /* Try to fix a correctable error by pushing the line out from
1369  * the E-cache.  Recheck error reporting registers to see if the
1370  * problem is intermittent.
1371  */
cheetah_fix_ce(unsigned long physaddr)1372 static int cheetah_fix_ce(unsigned long physaddr)
1373 {
1374 	unsigned long orig_estate;
1375 	unsigned long alias1, alias2;
1376 	int ret;
1377 
1378 	/* Make sure correctable error traps are disabled. */
1379 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1380 			     "andn	%0, %1, %%g1\n\t"
1381 			     "stxa	%%g1, [%%g0] %2\n\t"
1382 			     "membar	#Sync"
1383 			     : "=&r" (orig_estate)
1384 			     : "i" (ESTATE_ERROR_CEEN),
1385 			       "i" (ASI_ESTATE_ERROR_EN)
1386 			     : "g1");
1387 
1388 	/* We calculate alias addresses that will force the
1389 	 * cache line in question out of the E-cache.  Then
1390 	 * we bring it back in with an atomic instruction so
1391 	 * that we get it in some modified/exclusive state,
1392 	 * then we displace it again to try and get proper ECC
1393 	 * pushed back into the system.
1394 	 */
1395 	physaddr &= ~(8UL - 1UL);
1396 	alias1 = (ecache_flush_physbase +
1397 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1398 	alias2 = alias1 + (ecache_flush_size >> 1);
1399 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1400 			     "ldxa	[%1] %3, %%g0\n\t"
1401 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1402 			     "ldxa	[%0] %3, %%g0\n\t"
1403 			     "ldxa	[%1] %3, %%g0\n\t"
1404 			     "membar	#Sync"
1405 			     : /* no outputs */
1406 			     : "r" (alias1), "r" (alias2),
1407 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1408 
1409 	/* Did that trigger another error? */
1410 	if (cheetah_recheck_errors(NULL)) {
1411 		/* Try one more time. */
1412 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1413 				     "membar #Sync"
1414 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1415 		if (cheetah_recheck_errors(NULL))
1416 			ret = 2;
1417 		else
1418 			ret = 1;
1419 	} else {
1420 		/* No new error, intermittent problem. */
1421 		ret = 0;
1422 	}
1423 
1424 	/* Restore error enables. */
1425 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1426 			     "membar	#Sync"
1427 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1428 
1429 	return ret;
1430 }
1431 
1432 /* Return non-zero if PADDR is a valid physical memory address. */
cheetah_check_main_memory(unsigned long paddr)1433 static int cheetah_check_main_memory(unsigned long paddr)
1434 {
1435 	unsigned long vaddr = PAGE_OFFSET + paddr;
1436 
1437 	if (vaddr > (unsigned long) high_memory)
1438 		return 0;
1439 
1440 	return kern_addr_valid(vaddr);
1441 }
1442 
cheetah_cee_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1443 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1444 {
1445 	struct cheetah_err_info local_snapshot, *p;
1446 	int recoverable, is_memory;
1447 
1448 	p = cheetah_get_error_log(afsr);
1449 	if (!p) {
1450 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1451 			    afsr, afar);
1452 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1453 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1454 		prom_halt();
1455 	}
1456 
1457 	/* Grab snapshot of logged error. */
1458 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1459 
1460 	/* If the current trap snapshot does not match what the
1461 	 * trap handler passed along into our args, big trouble.
1462 	 * In such a case, mark the local copy as invalid.
1463 	 *
1464 	 * Else, it matches and we mark the afsr in the non-local
1465 	 * copy as invalid so we may log new error traps there.
1466 	 */
1467 	if (p->afsr != afsr || p->afar != afar)
1468 		local_snapshot.afsr = CHAFSR_INVALID;
1469 	else
1470 		p->afsr = CHAFSR_INVALID;
1471 
1472 	is_memory = cheetah_check_main_memory(afar);
1473 
1474 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1475 		/* XXX Might want to log the results of this operation
1476 		 * XXX somewhere... -DaveM
1477 		 */
1478 		cheetah_fix_ce(afar);
1479 	}
1480 
1481 	{
1482 		int flush_all, flush_line;
1483 
1484 		flush_all = flush_line = 0;
1485 		if ((afsr & CHAFSR_EDC) != 0UL) {
1486 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1487 				flush_line = 1;
1488 			else
1489 				flush_all = 1;
1490 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1491 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1492 				flush_line = 1;
1493 			else
1494 				flush_all = 1;
1495 		}
1496 
1497 		/* Trap handler only disabled I-cache, flush it. */
1498 		cheetah_flush_icache();
1499 
1500 		/* Re-enable I-cache */
1501 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1502 				     "or %%g1, %1, %%g1\n\t"
1503 				     "stxa %%g1, [%%g0] %0\n\t"
1504 				     "membar #Sync"
1505 				     : /* no outputs */
1506 				     : "i" (ASI_DCU_CONTROL_REG),
1507 				     "i" (DCU_IC)
1508 				     : "g1");
1509 
1510 		if (flush_all)
1511 			cheetah_flush_ecache();
1512 		else if (flush_line)
1513 			cheetah_flush_ecache_line(afar);
1514 	}
1515 
1516 	/* Re-enable error reporting */
1517 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1518 			     "or %%g1, %1, %%g1\n\t"
1519 			     "stxa %%g1, [%%g0] %0\n\t"
1520 			     "membar #Sync"
1521 			     : /* no outputs */
1522 			     : "i" (ASI_ESTATE_ERROR_EN),
1523 			       "i" (ESTATE_ERROR_CEEN)
1524 			     : "g1");
1525 
1526 	/* Decide if we can continue after handling this trap and
1527 	 * logging the error.
1528 	 */
1529 	recoverable = 1;
1530 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1531 		recoverable = 0;
1532 
1533 	/* Re-check AFSR/AFAR */
1534 	(void) cheetah_recheck_errors(&local_snapshot);
1535 
1536 	/* Log errors. */
1537 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1538 
1539 	if (!recoverable)
1540 		panic("Irrecoverable Correctable-ECC error trap.\n");
1541 }
1542 
cheetah_deferred_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1543 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1544 {
1545 	struct cheetah_err_info local_snapshot, *p;
1546 	int recoverable, is_memory;
1547 
1548 #ifdef CONFIG_PCI
1549 	/* Check for the special PCI poke sequence. */
1550 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1551 		cheetah_flush_icache();
1552 		cheetah_flush_dcache();
1553 
1554 		/* Re-enable I-cache/D-cache */
1555 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1556 				     "or %%g1, %1, %%g1\n\t"
1557 				     "stxa %%g1, [%%g0] %0\n\t"
1558 				     "membar #Sync"
1559 				     : /* no outputs */
1560 				     : "i" (ASI_DCU_CONTROL_REG),
1561 				       "i" (DCU_DC | DCU_IC)
1562 				     : "g1");
1563 
1564 		/* Re-enable error reporting */
1565 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1566 				     "or %%g1, %1, %%g1\n\t"
1567 				     "stxa %%g1, [%%g0] %0\n\t"
1568 				     "membar #Sync"
1569 				     : /* no outputs */
1570 				     : "i" (ASI_ESTATE_ERROR_EN),
1571 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1572 				     : "g1");
1573 
1574 		(void) cheetah_recheck_errors(NULL);
1575 
1576 		pci_poke_faulted = 1;
1577 		regs->tpc += 4;
1578 		regs->tnpc = regs->tpc + 4;
1579 		return;
1580 	}
1581 #endif
1582 
1583 	p = cheetah_get_error_log(afsr);
1584 	if (!p) {
1585 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1586 			    afsr, afar);
1587 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1588 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1589 		prom_halt();
1590 	}
1591 
1592 	/* Grab snapshot of logged error. */
1593 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1594 
1595 	/* If the current trap snapshot does not match what the
1596 	 * trap handler passed along into our args, big trouble.
1597 	 * In such a case, mark the local copy as invalid.
1598 	 *
1599 	 * Else, it matches and we mark the afsr in the non-local
1600 	 * copy as invalid so we may log new error traps there.
1601 	 */
1602 	if (p->afsr != afsr || p->afar != afar)
1603 		local_snapshot.afsr = CHAFSR_INVALID;
1604 	else
1605 		p->afsr = CHAFSR_INVALID;
1606 
1607 	is_memory = cheetah_check_main_memory(afar);
1608 
1609 	{
1610 		int flush_all, flush_line;
1611 
1612 		flush_all = flush_line = 0;
1613 		if ((afsr & CHAFSR_EDU) != 0UL) {
1614 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1615 				flush_line = 1;
1616 			else
1617 				flush_all = 1;
1618 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1619 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1620 				flush_line = 1;
1621 			else
1622 				flush_all = 1;
1623 		}
1624 
1625 		cheetah_flush_icache();
1626 		cheetah_flush_dcache();
1627 
1628 		/* Re-enable I/D caches */
1629 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1630 				     "or %%g1, %1, %%g1\n\t"
1631 				     "stxa %%g1, [%%g0] %0\n\t"
1632 				     "membar #Sync"
1633 				     : /* no outputs */
1634 				     : "i" (ASI_DCU_CONTROL_REG),
1635 				     "i" (DCU_IC | DCU_DC)
1636 				     : "g1");
1637 
1638 		if (flush_all)
1639 			cheetah_flush_ecache();
1640 		else if (flush_line)
1641 			cheetah_flush_ecache_line(afar);
1642 	}
1643 
1644 	/* Re-enable error reporting */
1645 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1646 			     "or %%g1, %1, %%g1\n\t"
1647 			     "stxa %%g1, [%%g0] %0\n\t"
1648 			     "membar #Sync"
1649 			     : /* no outputs */
1650 			     : "i" (ASI_ESTATE_ERROR_EN),
1651 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1652 			     : "g1");
1653 
1654 	/* Decide if we can continue after handling this trap and
1655 	 * logging the error.
1656 	 */
1657 	recoverable = 1;
1658 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1659 		recoverable = 0;
1660 
1661 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1662 	 * error was logged while we had error reporting traps disabled.
1663 	 */
1664 	if (cheetah_recheck_errors(&local_snapshot)) {
1665 		unsigned long new_afsr = local_snapshot.afsr;
1666 
1667 		/* If we got a new asynchronous error, die... */
1668 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1669 				CHAFSR_WDU | CHAFSR_CPU |
1670 				CHAFSR_IVU | CHAFSR_UE |
1671 				CHAFSR_BERR | CHAFSR_TO))
1672 			recoverable = 0;
1673 	}
1674 
1675 	/* Log errors. */
1676 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1677 
1678 	/* "Recoverable" here means we try to yank the page from ever
1679 	 * being newly used again.  This depends upon a few things:
1680 	 * 1) Must be main memory, and AFAR must be valid.
1681 	 * 2) If we trapped from user, OK.
1682 	 * 3) Else, if we trapped from kernel we must find exception
1683 	 *    table entry (ie. we have to have been accessing user
1684 	 *    space).
1685 	 *
1686 	 * If AFAR is not in main memory, or we trapped from kernel
1687 	 * and cannot find an exception table entry, it is unacceptable
1688 	 * to try and continue.
1689 	 */
1690 	if (recoverable && is_memory) {
1691 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1692 			/* OK, usermode access. */
1693 			recoverable = 1;
1694 		} else {
1695 			const struct exception_table_entry *entry;
1696 
1697 			entry = search_exception_tables(regs->tpc);
1698 			if (entry) {
1699 				/* OK, kernel access to userspace. */
1700 				recoverable = 1;
1701 
1702 			} else {
1703 				/* BAD, privileged state is corrupted. */
1704 				recoverable = 0;
1705 			}
1706 
1707 			if (recoverable) {
1708 				if (pfn_valid(afar >> PAGE_SHIFT))
1709 					get_page(pfn_to_page(afar >> PAGE_SHIFT));
1710 				else
1711 					recoverable = 0;
1712 
1713 				/* Only perform fixup if we still have a
1714 				 * recoverable condition.
1715 				 */
1716 				if (recoverable) {
1717 					regs->tpc = entry->fixup;
1718 					regs->tnpc = regs->tpc + 4;
1719 				}
1720 			}
1721 		}
1722 	} else {
1723 		recoverable = 0;
1724 	}
1725 
1726 	if (!recoverable)
1727 		panic("Irrecoverable deferred error trap.\n");
1728 }
1729 
1730 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1731  *
1732  * Bit0:	0=dcache,1=icache
1733  * Bit1:	0=recoverable,1=unrecoverable
1734  *
1735  * The hardware has disabled both the I-cache and D-cache in
1736  * the %dcr register.
1737  */
cheetah_plus_parity_error(int type,struct pt_regs * regs)1738 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1739 {
1740 	if (type & 0x1)
1741 		__cheetah_flush_icache();
1742 	else
1743 		cheetah_plus_zap_dcache_parity();
1744 	cheetah_flush_dcache();
1745 
1746 	/* Re-enable I-cache/D-cache */
1747 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1748 			     "or %%g1, %1, %%g1\n\t"
1749 			     "stxa %%g1, [%%g0] %0\n\t"
1750 			     "membar #Sync"
1751 			     : /* no outputs */
1752 			     : "i" (ASI_DCU_CONTROL_REG),
1753 			       "i" (DCU_DC | DCU_IC)
1754 			     : "g1");
1755 
1756 	if (type & 0x2) {
1757 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1758 		       smp_processor_id(),
1759 		       (type & 0x1) ? 'I' : 'D',
1760 		       regs->tpc);
1761 		printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1762 		panic("Irrecoverable Cheetah+ parity error.");
1763 	}
1764 
1765 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1766 	       smp_processor_id(),
1767 	       (type & 0x1) ? 'I' : 'D',
1768 	       regs->tpc);
1769 	printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1770 }
1771 
1772 struct sun4v_error_entry {
1773 	/* Unique error handle */
1774 /*0x00*/u64		err_handle;
1775 
1776 	/* %stick value at the time of the error */
1777 /*0x08*/u64		err_stick;
1778 
1779 /*0x10*/u8		reserved_1[3];
1780 
1781 	/* Error type */
1782 /*0x13*/u8		err_type;
1783 #define SUN4V_ERR_TYPE_UNDEFINED	0
1784 #define SUN4V_ERR_TYPE_UNCORRECTED_RES	1
1785 #define SUN4V_ERR_TYPE_PRECISE_NONRES	2
1786 #define SUN4V_ERR_TYPE_DEFERRED_NONRES	3
1787 #define SUN4V_ERR_TYPE_SHUTDOWN_RQST	4
1788 #define SUN4V_ERR_TYPE_DUMP_CORE	5
1789 #define SUN4V_ERR_TYPE_SP_STATE_CHANGE	6
1790 #define SUN4V_ERR_TYPE_NUM		7
1791 
1792 	/* Error attributes */
1793 /*0x14*/u32		err_attrs;
1794 #define SUN4V_ERR_ATTRS_PROCESSOR	0x00000001
1795 #define SUN4V_ERR_ATTRS_MEMORY		0x00000002
1796 #define SUN4V_ERR_ATTRS_PIO		0x00000004
1797 #define SUN4V_ERR_ATTRS_INT_REGISTERS	0x00000008
1798 #define SUN4V_ERR_ATTRS_FPU_REGISTERS	0x00000010
1799 #define SUN4V_ERR_ATTRS_SHUTDOWN_RQST	0x00000020
1800 #define SUN4V_ERR_ATTRS_ASR		0x00000040
1801 #define SUN4V_ERR_ATTRS_ASI		0x00000080
1802 #define SUN4V_ERR_ATTRS_PRIV_REG	0x00000100
1803 #define SUN4V_ERR_ATTRS_SPSTATE_MSK	0x00000600
1804 #define SUN4V_ERR_ATTRS_SPSTATE_SHFT	9
1805 #define SUN4V_ERR_ATTRS_MODE_MSK	0x03000000
1806 #define SUN4V_ERR_ATTRS_MODE_SHFT	24
1807 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL	0x80000000
1808 
1809 #define SUN4V_ERR_SPSTATE_FAULTED	0
1810 #define SUN4V_ERR_SPSTATE_AVAILABLE	1
1811 #define SUN4V_ERR_SPSTATE_NOT_PRESENT	2
1812 
1813 #define SUN4V_ERR_MODE_USER		1
1814 #define SUN4V_ERR_MODE_PRIV		2
1815 
1816 	/* Real address of the memory region or PIO transaction */
1817 /*0x18*/u64		err_raddr;
1818 
1819 	/* Size of the operation triggering the error, in bytes */
1820 /*0x20*/u32		err_size;
1821 
1822 	/* ID of the CPU */
1823 /*0x24*/u16		err_cpu;
1824 
1825 	/* Grace periof for shutdown, in seconds */
1826 /*0x26*/u16		err_secs;
1827 
1828 	/* Value of the %asi register */
1829 /*0x28*/u8		err_asi;
1830 
1831 /*0x29*/u8		reserved_2;
1832 
1833 	/* Value of the ASR register number */
1834 /*0x2a*/u16		err_asr;
1835 #define SUN4V_ERR_ASR_VALID		0x8000
1836 
1837 /*0x2c*/u32		reserved_3;
1838 /*0x30*/u64		reserved_4;
1839 /*0x38*/u64		reserved_5;
1840 };
1841 
1842 static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1843 static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1844 
sun4v_err_type_to_str(u8 type)1845 static const char *sun4v_err_type_to_str(u8 type)
1846 {
1847 	static const char *types[SUN4V_ERR_TYPE_NUM] = {
1848 		"undefined",
1849 		"uncorrected resumable",
1850 		"precise nonresumable",
1851 		"deferred nonresumable",
1852 		"shutdown request",
1853 		"dump core",
1854 		"SP state change",
1855 	};
1856 
1857 	if (type < SUN4V_ERR_TYPE_NUM)
1858 		return types[type];
1859 
1860 	return "unknown";
1861 }
1862 
sun4v_emit_err_attr_strings(u32 attrs)1863 static void sun4v_emit_err_attr_strings(u32 attrs)
1864 {
1865 	static const char *attr_names[] = {
1866 		"processor",
1867 		"memory",
1868 		"PIO",
1869 		"int-registers",
1870 		"fpu-registers",
1871 		"shutdown-request",
1872 		"ASR",
1873 		"ASI",
1874 		"priv-reg",
1875 	};
1876 	static const char *sp_states[] = {
1877 		"sp-faulted",
1878 		"sp-available",
1879 		"sp-not-present",
1880 		"sp-state-reserved",
1881 	};
1882 	static const char *modes[] = {
1883 		"mode-reserved0",
1884 		"user",
1885 		"priv",
1886 		"mode-reserved1",
1887 	};
1888 	u32 sp_state, mode;
1889 	int i;
1890 
1891 	for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1892 		if (attrs & (1U << i)) {
1893 			const char *s = attr_names[i];
1894 
1895 			pr_cont("%s ", s);
1896 		}
1897 	}
1898 
1899 	sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1900 		    SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1901 	pr_cont("%s ", sp_states[sp_state]);
1902 
1903 	mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1904 		SUN4V_ERR_ATTRS_MODE_SHFT);
1905 	pr_cont("%s ", modes[mode]);
1906 
1907 	if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1908 		pr_cont("res-queue-full ");
1909 }
1910 
1911 /* When the report contains a real-address of "-1" it means that the
1912  * hardware did not provide the address.  So we compute the effective
1913  * address of the load or store instruction at regs->tpc and report
1914  * that.  Usually when this happens it's a PIO and in such a case we
1915  * are using physical addresses with bypass ASIs anyways, so what we
1916  * report here is exactly what we want.
1917  */
sun4v_report_real_raddr(const char * pfx,struct pt_regs * regs)1918 static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1919 {
1920 	unsigned int insn;
1921 	u64 addr;
1922 
1923 	if (!(regs->tstate & TSTATE_PRIV))
1924 		return;
1925 
1926 	insn = *(unsigned int *) regs->tpc;
1927 
1928 	addr = compute_effective_address(regs, insn, 0);
1929 
1930 	printk("%s: insn effective address [0x%016llx]\n",
1931 	       pfx, addr);
1932 }
1933 
sun4v_log_error(struct pt_regs * regs,struct sun4v_error_entry * ent,int cpu,const char * pfx,atomic_t * ocnt)1934 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1935 			    int cpu, const char *pfx, atomic_t *ocnt)
1936 {
1937 	u64 *raw_ptr = (u64 *) ent;
1938 	u32 attrs;
1939 	int cnt;
1940 
1941 	printk("%s: Reporting on cpu %d\n", pfx, cpu);
1942 	printk("%s: TPC [0x%016lx] <%pS>\n",
1943 	       pfx, regs->tpc, (void *) regs->tpc);
1944 
1945 	printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1946 	       pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1947 	printk("%s:      %016llx:%016llx:%016llx:%016llx]\n",
1948 	       pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1949 
1950 	printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1951 	       pfx, ent->err_handle, ent->err_stick);
1952 
1953 	printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1954 
1955 	attrs = ent->err_attrs;
1956 	printk("%s: attrs [0x%08x] < ", pfx, attrs);
1957 	sun4v_emit_err_attr_strings(attrs);
1958 	pr_cont(">\n");
1959 
1960 	/* Various fields in the error report are only valid if
1961 	 * certain attribute bits are set.
1962 	 */
1963 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1964 		     SUN4V_ERR_ATTRS_PIO |
1965 		     SUN4V_ERR_ATTRS_ASI)) {
1966 		printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1967 
1968 		if (ent->err_raddr == ~(u64)0)
1969 			sun4v_report_real_raddr(pfx, regs);
1970 	}
1971 
1972 	if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
1973 		printk("%s: size [0x%x]\n", pfx, ent->err_size);
1974 
1975 	if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
1976 		     SUN4V_ERR_ATTRS_INT_REGISTERS |
1977 		     SUN4V_ERR_ATTRS_FPU_REGISTERS |
1978 		     SUN4V_ERR_ATTRS_PRIV_REG))
1979 		printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
1980 
1981 	if (attrs & SUN4V_ERR_ATTRS_ASI)
1982 		printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
1983 
1984 	if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
1985 		      SUN4V_ERR_ATTRS_FPU_REGISTERS |
1986 		      SUN4V_ERR_ATTRS_PRIV_REG)) &&
1987 	    (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
1988 		printk("%s: reg [0x%04x]\n",
1989 		       pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
1990 
1991 	show_regs(regs);
1992 
1993 	if ((cnt = atomic_read(ocnt)) != 0) {
1994 		atomic_set(ocnt, 0);
1995 		wmb();
1996 		printk("%s: Queue overflowed %d times.\n",
1997 		       pfx, cnt);
1998 	}
1999 }
2000 
2001 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2002  * Log the event and clear the first word of the entry.
2003  */
sun4v_resum_error(struct pt_regs * regs,unsigned long offset)2004 void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
2005 {
2006 	enum ctx_state prev_state = exception_enter();
2007 	struct sun4v_error_entry *ent, local_copy;
2008 	struct trap_per_cpu *tb;
2009 	unsigned long paddr;
2010 	int cpu;
2011 
2012 	cpu = get_cpu();
2013 
2014 	tb = &trap_block[cpu];
2015 	paddr = tb->resum_kernel_buf_pa + offset;
2016 	ent = __va(paddr);
2017 
2018 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2019 
2020 	/* We have a local copy now, so release the entry.  */
2021 	ent->err_handle = 0;
2022 	wmb();
2023 
2024 	put_cpu();
2025 
2026 	if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2027 		/* We should really take the seconds field of
2028 		 * the error report and use it for the shutdown
2029 		 * invocation, but for now do the same thing we
2030 		 * do for a DS shutdown request.
2031 		 */
2032 		pr_info("Shutdown request, %u seconds...\n",
2033 			local_copy.err_secs);
2034 		orderly_poweroff(true);
2035 		goto out;
2036 	}
2037 
2038 	sun4v_log_error(regs, &local_copy, cpu,
2039 			KERN_ERR "RESUMABLE ERROR",
2040 			&sun4v_resum_oflow_cnt);
2041 out:
2042 	exception_exit(prev_state);
2043 }
2044 
2045 /* If we try to printk() we'll probably make matters worse, by trying
2046  * to retake locks this cpu already holds or causing more errors. So
2047  * just bump a counter, and we'll report these counter bumps above.
2048  */
sun4v_resum_overflow(struct pt_regs * regs)2049 void sun4v_resum_overflow(struct pt_regs *regs)
2050 {
2051 	atomic_inc(&sun4v_resum_oflow_cnt);
2052 }
2053 
2054 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2055  * Log the event, clear the first word of the entry, and die.
2056  */
sun4v_nonresum_error(struct pt_regs * regs,unsigned long offset)2057 void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2058 {
2059 	struct sun4v_error_entry *ent, local_copy;
2060 	struct trap_per_cpu *tb;
2061 	unsigned long paddr;
2062 	int cpu;
2063 
2064 	cpu = get_cpu();
2065 
2066 	tb = &trap_block[cpu];
2067 	paddr = tb->nonresum_kernel_buf_pa + offset;
2068 	ent = __va(paddr);
2069 
2070 	memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2071 
2072 	/* We have a local copy now, so release the entry.  */
2073 	ent->err_handle = 0;
2074 	wmb();
2075 
2076 	put_cpu();
2077 
2078 #ifdef CONFIG_PCI
2079 	/* Check for the special PCI poke sequence. */
2080 	if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2081 		pci_poke_faulted = 1;
2082 		regs->tpc += 4;
2083 		regs->tnpc = regs->tpc + 4;
2084 		return;
2085 	}
2086 #endif
2087 
2088 	sun4v_log_error(regs, &local_copy, cpu,
2089 			KERN_EMERG "NON-RESUMABLE ERROR",
2090 			&sun4v_nonresum_oflow_cnt);
2091 
2092 	panic("Non-resumable error.");
2093 }
2094 
2095 /* If we try to printk() we'll probably make matters worse, by trying
2096  * to retake locks this cpu already holds or causing more errors. So
2097  * just bump a counter, and we'll report these counter bumps above.
2098  */
sun4v_nonresum_overflow(struct pt_regs * regs)2099 void sun4v_nonresum_overflow(struct pt_regs *regs)
2100 {
2101 	/* XXX Actually even this can make not that much sense.  Perhaps
2102 	 * XXX we should just pull the plug and panic directly from here?
2103 	 */
2104 	atomic_inc(&sun4v_nonresum_oflow_cnt);
2105 }
2106 
sun4v_tlb_error(struct pt_regs * regs)2107 static void sun4v_tlb_error(struct pt_regs *regs)
2108 {
2109 	die_if_kernel("TLB/TSB error", regs);
2110 }
2111 
2112 unsigned long sun4v_err_itlb_vaddr;
2113 unsigned long sun4v_err_itlb_ctx;
2114 unsigned long sun4v_err_itlb_pte;
2115 unsigned long sun4v_err_itlb_error;
2116 
sun4v_itlb_error_report(struct pt_regs * regs,int tl)2117 void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2118 {
2119 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2120 
2121 	printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2122 	       regs->tpc, tl);
2123 	printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2124 	printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2125 	printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2126 	       (void *) regs->u_regs[UREG_I7]);
2127 	printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2128 	       "pte[%lx] error[%lx]\n",
2129 	       sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2130 	       sun4v_err_itlb_pte, sun4v_err_itlb_error);
2131 
2132 	sun4v_tlb_error(regs);
2133 }
2134 
2135 unsigned long sun4v_err_dtlb_vaddr;
2136 unsigned long sun4v_err_dtlb_ctx;
2137 unsigned long sun4v_err_dtlb_pte;
2138 unsigned long sun4v_err_dtlb_error;
2139 
sun4v_dtlb_error_report(struct pt_regs * regs,int tl)2140 void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2141 {
2142 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2143 
2144 	printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2145 	       regs->tpc, tl);
2146 	printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2147 	printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2148 	printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2149 	       (void *) regs->u_regs[UREG_I7]);
2150 	printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2151 	       "pte[%lx] error[%lx]\n",
2152 	       sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2153 	       sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2154 
2155 	sun4v_tlb_error(regs);
2156 }
2157 
hypervisor_tlbop_error(unsigned long err,unsigned long op)2158 void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2159 {
2160 	printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2161 	       err, op);
2162 }
2163 
hypervisor_tlbop_error_xcall(unsigned long err,unsigned long op)2164 void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2165 {
2166 	printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2167 	       err, op);
2168 }
2169 
do_fpe_common(struct pt_regs * regs)2170 static void do_fpe_common(struct pt_regs *regs)
2171 {
2172 	if (regs->tstate & TSTATE_PRIV) {
2173 		regs->tpc = regs->tnpc;
2174 		regs->tnpc += 4;
2175 	} else {
2176 		unsigned long fsr = current_thread_info()->xfsr[0];
2177 		siginfo_t info;
2178 
2179 		if (test_thread_flag(TIF_32BIT)) {
2180 			regs->tpc &= 0xffffffff;
2181 			regs->tnpc &= 0xffffffff;
2182 		}
2183 		info.si_signo = SIGFPE;
2184 		info.si_errno = 0;
2185 		info.si_addr = (void __user *)regs->tpc;
2186 		info.si_trapno = 0;
2187 		info.si_code = __SI_FAULT;
2188 		if ((fsr & 0x1c000) == (1 << 14)) {
2189 			if (fsr & 0x10)
2190 				info.si_code = FPE_FLTINV;
2191 			else if (fsr & 0x08)
2192 				info.si_code = FPE_FLTOVF;
2193 			else if (fsr & 0x04)
2194 				info.si_code = FPE_FLTUND;
2195 			else if (fsr & 0x02)
2196 				info.si_code = FPE_FLTDIV;
2197 			else if (fsr & 0x01)
2198 				info.si_code = FPE_FLTRES;
2199 		}
2200 		force_sig_info(SIGFPE, &info, current);
2201 	}
2202 }
2203 
do_fpieee(struct pt_regs * regs)2204 void do_fpieee(struct pt_regs *regs)
2205 {
2206 	enum ctx_state prev_state = exception_enter();
2207 
2208 	if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2209 		       0, 0x24, SIGFPE) == NOTIFY_STOP)
2210 		goto out;
2211 
2212 	do_fpe_common(regs);
2213 out:
2214 	exception_exit(prev_state);
2215 }
2216 
do_fpother(struct pt_regs * regs)2217 void do_fpother(struct pt_regs *regs)
2218 {
2219 	enum ctx_state prev_state = exception_enter();
2220 	struct fpustate *f = FPUSTATE;
2221 	int ret = 0;
2222 
2223 	if (notify_die(DIE_TRAP, "fpu exception other", regs,
2224 		       0, 0x25, SIGFPE) == NOTIFY_STOP)
2225 		goto out;
2226 
2227 	switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2228 	case (2 << 14): /* unfinished_FPop */
2229 	case (3 << 14): /* unimplemented_FPop */
2230 		ret = do_mathemu(regs, f, false);
2231 		break;
2232 	}
2233 	if (ret)
2234 		goto out;
2235 	do_fpe_common(regs);
2236 out:
2237 	exception_exit(prev_state);
2238 }
2239 
do_tof(struct pt_regs * regs)2240 void do_tof(struct pt_regs *regs)
2241 {
2242 	enum ctx_state prev_state = exception_enter();
2243 	siginfo_t info;
2244 
2245 	if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2246 		       0, 0x26, SIGEMT) == NOTIFY_STOP)
2247 		goto out;
2248 
2249 	if (regs->tstate & TSTATE_PRIV)
2250 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
2251 	if (test_thread_flag(TIF_32BIT)) {
2252 		regs->tpc &= 0xffffffff;
2253 		regs->tnpc &= 0xffffffff;
2254 	}
2255 	info.si_signo = SIGEMT;
2256 	info.si_errno = 0;
2257 	info.si_code = EMT_TAGOVF;
2258 	info.si_addr = (void __user *)regs->tpc;
2259 	info.si_trapno = 0;
2260 	force_sig_info(SIGEMT, &info, current);
2261 out:
2262 	exception_exit(prev_state);
2263 }
2264 
do_div0(struct pt_regs * regs)2265 void do_div0(struct pt_regs *regs)
2266 {
2267 	enum ctx_state prev_state = exception_enter();
2268 	siginfo_t info;
2269 
2270 	if (notify_die(DIE_TRAP, "integer division by zero", regs,
2271 		       0, 0x28, SIGFPE) == NOTIFY_STOP)
2272 		goto out;
2273 
2274 	if (regs->tstate & TSTATE_PRIV)
2275 		die_if_kernel("TL0: Kernel divide by zero.", regs);
2276 	if (test_thread_flag(TIF_32BIT)) {
2277 		regs->tpc &= 0xffffffff;
2278 		regs->tnpc &= 0xffffffff;
2279 	}
2280 	info.si_signo = SIGFPE;
2281 	info.si_errno = 0;
2282 	info.si_code = FPE_INTDIV;
2283 	info.si_addr = (void __user *)regs->tpc;
2284 	info.si_trapno = 0;
2285 	force_sig_info(SIGFPE, &info, current);
2286 out:
2287 	exception_exit(prev_state);
2288 }
2289 
instruction_dump(unsigned int * pc)2290 static void instruction_dump(unsigned int *pc)
2291 {
2292 	int i;
2293 
2294 	if ((((unsigned long) pc) & 3))
2295 		return;
2296 
2297 	printk("Instruction DUMP:");
2298 	for (i = -3; i < 6; i++)
2299 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2300 	printk("\n");
2301 }
2302 
user_instruction_dump(unsigned int __user * pc)2303 static void user_instruction_dump(unsigned int __user *pc)
2304 {
2305 	int i;
2306 	unsigned int buf[9];
2307 
2308 	if ((((unsigned long) pc) & 3))
2309 		return;
2310 
2311 	if (copy_from_user(buf, pc - 3, sizeof(buf)))
2312 		return;
2313 
2314 	printk("Instruction DUMP:");
2315 	for (i = 0; i < 9; i++)
2316 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2317 	printk("\n");
2318 }
2319 
show_stack(struct task_struct * tsk,unsigned long * _ksp)2320 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2321 {
2322 	unsigned long fp, ksp;
2323 	struct thread_info *tp;
2324 	int count = 0;
2325 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2326 	int graph = 0;
2327 #endif
2328 
2329 	ksp = (unsigned long) _ksp;
2330 	if (!tsk)
2331 		tsk = current;
2332 	tp = task_thread_info(tsk);
2333 	if (ksp == 0UL) {
2334 		if (tsk == current)
2335 			asm("mov %%fp, %0" : "=r" (ksp));
2336 		else
2337 			ksp = tp->ksp;
2338 	}
2339 	if (tp == current_thread_info())
2340 		flushw_all();
2341 
2342 	fp = ksp + STACK_BIAS;
2343 
2344 	printk("Call Trace:\n");
2345 	do {
2346 		struct sparc_stackf *sf;
2347 		struct pt_regs *regs;
2348 		unsigned long pc;
2349 
2350 		if (!kstack_valid(tp, fp))
2351 			break;
2352 		sf = (struct sparc_stackf *) fp;
2353 		regs = (struct pt_regs *) (sf + 1);
2354 
2355 		if (kstack_is_trap_frame(tp, regs)) {
2356 			if (!(regs->tstate & TSTATE_PRIV))
2357 				break;
2358 			pc = regs->tpc;
2359 			fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2360 		} else {
2361 			pc = sf->callers_pc;
2362 			fp = (unsigned long)sf->fp + STACK_BIAS;
2363 		}
2364 
2365 		printk(" [%016lx] %pS\n", pc, (void *) pc);
2366 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2367 		if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2368 			int index = tsk->curr_ret_stack;
2369 			if (tsk->ret_stack && index >= graph) {
2370 				pc = tsk->ret_stack[index - graph].ret;
2371 				printk(" [%016lx] %pS\n", pc, (void *) pc);
2372 				graph++;
2373 			}
2374 		}
2375 #endif
2376 	} while (++count < 16);
2377 }
2378 
kernel_stack_up(struct reg_window * rw)2379 static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2380 {
2381 	unsigned long fp = rw->ins[6];
2382 
2383 	if (!fp)
2384 		return NULL;
2385 
2386 	return (struct reg_window *) (fp + STACK_BIAS);
2387 }
2388 
die_if_kernel(char * str,struct pt_regs * regs)2389 void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
2390 {
2391 	static int die_counter;
2392 	int count = 0;
2393 
2394 	/* Amuse the user. */
2395 	printk(
2396 "              \\|/ ____ \\|/\n"
2397 "              \"@'/ .. \\`@\"\n"
2398 "              /_| \\__/ |_\\\n"
2399 "                 \\__U_/\n");
2400 
2401 	printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2402 	notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2403 	__asm__ __volatile__("flushw");
2404 	show_regs(regs);
2405 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2406 	if (regs->tstate & TSTATE_PRIV) {
2407 		struct thread_info *tp = current_thread_info();
2408 		struct reg_window *rw = (struct reg_window *)
2409 			(regs->u_regs[UREG_FP] + STACK_BIAS);
2410 
2411 		/* Stop the back trace when we hit userland or we
2412 		 * find some badly aligned kernel stack.
2413 		 */
2414 		while (rw &&
2415 		       count++ < 30 &&
2416 		       kstack_valid(tp, (unsigned long) rw)) {
2417 			printk("Caller[%016lx]: %pS\n", rw->ins[7],
2418 			       (void *) rw->ins[7]);
2419 
2420 			rw = kernel_stack_up(rw);
2421 		}
2422 		instruction_dump ((unsigned int *) regs->tpc);
2423 	} else {
2424 		if (test_thread_flag(TIF_32BIT)) {
2425 			regs->tpc &= 0xffffffff;
2426 			regs->tnpc &= 0xffffffff;
2427 		}
2428 		user_instruction_dump ((unsigned int __user *) regs->tpc);
2429 	}
2430 	if (panic_on_oops)
2431 		panic("Fatal exception");
2432 	if (regs->tstate & TSTATE_PRIV)
2433 		do_exit(SIGKILL);
2434 	do_exit(SIGSEGV);
2435 }
2436 EXPORT_SYMBOL(die_if_kernel);
2437 
2438 #define VIS_OPCODE_MASK	((0x3 << 30) | (0x3f << 19))
2439 #define VIS_OPCODE_VAL	((0x2 << 30) | (0x36 << 19))
2440 
do_illegal_instruction(struct pt_regs * regs)2441 void do_illegal_instruction(struct pt_regs *regs)
2442 {
2443 	enum ctx_state prev_state = exception_enter();
2444 	unsigned long pc = regs->tpc;
2445 	unsigned long tstate = regs->tstate;
2446 	u32 insn;
2447 	siginfo_t info;
2448 
2449 	if (notify_die(DIE_TRAP, "illegal instruction", regs,
2450 		       0, 0x10, SIGILL) == NOTIFY_STOP)
2451 		goto out;
2452 
2453 	if (tstate & TSTATE_PRIV)
2454 		die_if_kernel("Kernel illegal instruction", regs);
2455 	if (test_thread_flag(TIF_32BIT))
2456 		pc = (u32)pc;
2457 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2458 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2459 			if (handle_popc(insn, regs))
2460 				goto out;
2461 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2462 			if (handle_ldf_stq(insn, regs))
2463 				goto out;
2464 		} else if (tlb_type == hypervisor) {
2465 			if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2466 				if (!vis_emul(regs, insn))
2467 					goto out;
2468 			} else {
2469 				struct fpustate *f = FPUSTATE;
2470 
2471 				/* On UltraSPARC T2 and later, FPU insns which
2472 				 * are not implemented in HW signal an illegal
2473 				 * instruction trap and do not set the FP Trap
2474 				 * Trap in the %fsr to unimplemented_FPop.
2475 				 */
2476 				if (do_mathemu(regs, f, true))
2477 					goto out;
2478 			}
2479 		}
2480 	}
2481 	info.si_signo = SIGILL;
2482 	info.si_errno = 0;
2483 	info.si_code = ILL_ILLOPC;
2484 	info.si_addr = (void __user *)pc;
2485 	info.si_trapno = 0;
2486 	force_sig_info(SIGILL, &info, current);
2487 out:
2488 	exception_exit(prev_state);
2489 }
2490 
mem_address_unaligned(struct pt_regs * regs,unsigned long sfar,unsigned long sfsr)2491 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2492 {
2493 	enum ctx_state prev_state = exception_enter();
2494 	siginfo_t info;
2495 
2496 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2497 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2498 		goto out;
2499 
2500 	if (regs->tstate & TSTATE_PRIV) {
2501 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2502 		goto out;
2503 	}
2504 	info.si_signo = SIGBUS;
2505 	info.si_errno = 0;
2506 	info.si_code = BUS_ADRALN;
2507 	info.si_addr = (void __user *)sfar;
2508 	info.si_trapno = 0;
2509 	force_sig_info(SIGBUS, &info, current);
2510 out:
2511 	exception_exit(prev_state);
2512 }
2513 
sun4v_do_mna(struct pt_regs * regs,unsigned long addr,unsigned long type_ctx)2514 void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2515 {
2516 	siginfo_t info;
2517 
2518 	if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2519 		       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2520 		return;
2521 
2522 	if (regs->tstate & TSTATE_PRIV) {
2523 		kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2524 		return;
2525 	}
2526 	info.si_signo = SIGBUS;
2527 	info.si_errno = 0;
2528 	info.si_code = BUS_ADRALN;
2529 	info.si_addr = (void __user *) addr;
2530 	info.si_trapno = 0;
2531 	force_sig_info(SIGBUS, &info, current);
2532 }
2533 
do_privop(struct pt_regs * regs)2534 void do_privop(struct pt_regs *regs)
2535 {
2536 	enum ctx_state prev_state = exception_enter();
2537 	siginfo_t info;
2538 
2539 	if (notify_die(DIE_TRAP, "privileged operation", regs,
2540 		       0, 0x11, SIGILL) == NOTIFY_STOP)
2541 		goto out;
2542 
2543 	if (test_thread_flag(TIF_32BIT)) {
2544 		regs->tpc &= 0xffffffff;
2545 		regs->tnpc &= 0xffffffff;
2546 	}
2547 	info.si_signo = SIGILL;
2548 	info.si_errno = 0;
2549 	info.si_code = ILL_PRVOPC;
2550 	info.si_addr = (void __user *)regs->tpc;
2551 	info.si_trapno = 0;
2552 	force_sig_info(SIGILL, &info, current);
2553 out:
2554 	exception_exit(prev_state);
2555 }
2556 
do_privact(struct pt_regs * regs)2557 void do_privact(struct pt_regs *regs)
2558 {
2559 	do_privop(regs);
2560 }
2561 
2562 /* Trap level 1 stuff or other traps we should never see... */
do_cee(struct pt_regs * regs)2563 void do_cee(struct pt_regs *regs)
2564 {
2565 	exception_enter();
2566 	die_if_kernel("TL0: Cache Error Exception", regs);
2567 }
2568 
do_div0_tl1(struct pt_regs * regs)2569 void do_div0_tl1(struct pt_regs *regs)
2570 {
2571 	exception_enter();
2572 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2573 	die_if_kernel("TL1: DIV0 Exception", regs);
2574 }
2575 
do_fpieee_tl1(struct pt_regs * regs)2576 void do_fpieee_tl1(struct pt_regs *regs)
2577 {
2578 	exception_enter();
2579 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2580 	die_if_kernel("TL1: FPU IEEE Exception", regs);
2581 }
2582 
do_fpother_tl1(struct pt_regs * regs)2583 void do_fpother_tl1(struct pt_regs *regs)
2584 {
2585 	exception_enter();
2586 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2587 	die_if_kernel("TL1: FPU Other Exception", regs);
2588 }
2589 
do_ill_tl1(struct pt_regs * regs)2590 void do_ill_tl1(struct pt_regs *regs)
2591 {
2592 	exception_enter();
2593 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2594 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
2595 }
2596 
do_irq_tl1(struct pt_regs * regs)2597 void do_irq_tl1(struct pt_regs *regs)
2598 {
2599 	exception_enter();
2600 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2601 	die_if_kernel("TL1: IRQ Exception", regs);
2602 }
2603 
do_lddfmna_tl1(struct pt_regs * regs)2604 void do_lddfmna_tl1(struct pt_regs *regs)
2605 {
2606 	exception_enter();
2607 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2608 	die_if_kernel("TL1: LDDF Exception", regs);
2609 }
2610 
do_stdfmna_tl1(struct pt_regs * regs)2611 void do_stdfmna_tl1(struct pt_regs *regs)
2612 {
2613 	exception_enter();
2614 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2615 	die_if_kernel("TL1: STDF Exception", regs);
2616 }
2617 
do_paw(struct pt_regs * regs)2618 void do_paw(struct pt_regs *regs)
2619 {
2620 	exception_enter();
2621 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2622 }
2623 
do_paw_tl1(struct pt_regs * regs)2624 void do_paw_tl1(struct pt_regs *regs)
2625 {
2626 	exception_enter();
2627 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2628 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2629 }
2630 
do_vaw(struct pt_regs * regs)2631 void do_vaw(struct pt_regs *regs)
2632 {
2633 	exception_enter();
2634 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2635 }
2636 
do_vaw_tl1(struct pt_regs * regs)2637 void do_vaw_tl1(struct pt_regs *regs)
2638 {
2639 	exception_enter();
2640 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2641 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2642 }
2643 
do_tof_tl1(struct pt_regs * regs)2644 void do_tof_tl1(struct pt_regs *regs)
2645 {
2646 	exception_enter();
2647 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2648 	die_if_kernel("TL1: Tag Overflow Exception", regs);
2649 }
2650 
do_getpsr(struct pt_regs * regs)2651 void do_getpsr(struct pt_regs *regs)
2652 {
2653 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2654 	regs->tpc   = regs->tnpc;
2655 	regs->tnpc += 4;
2656 	if (test_thread_flag(TIF_32BIT)) {
2657 		regs->tpc &= 0xffffffff;
2658 		regs->tnpc &= 0xffffffff;
2659 	}
2660 }
2661 
2662 u64 cpu_mondo_counter[NR_CPUS] = {0};
2663 struct trap_per_cpu trap_block[NR_CPUS];
2664 EXPORT_SYMBOL(trap_block);
2665 
2666 /* This can get invoked before sched_init() so play it super safe
2667  * and use hard_smp_processor_id().
2668  */
init_cur_cpu_trap(struct thread_info * t)2669 void notrace init_cur_cpu_trap(struct thread_info *t)
2670 {
2671 	int cpu = hard_smp_processor_id();
2672 	struct trap_per_cpu *p = &trap_block[cpu];
2673 
2674 	p->thread = t;
2675 	p->pgd_paddr = 0;
2676 }
2677 
2678 extern void thread_info_offsets_are_bolixed_dave(void);
2679 extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2680 extern void tsb_config_offsets_are_bolixed_dave(void);
2681 
2682 /* Only invoked on boot processor. */
trap_init(void)2683 void __init trap_init(void)
2684 {
2685 	/* Compile time sanity check. */
2686 	BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2687 		     TI_FLAGS != offsetof(struct thread_info, flags) ||
2688 		     TI_CPU != offsetof(struct thread_info, cpu) ||
2689 		     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2690 		     TI_KSP != offsetof(struct thread_info, ksp) ||
2691 		     TI_FAULT_ADDR != offsetof(struct thread_info,
2692 					       fault_address) ||
2693 		     TI_KREGS != offsetof(struct thread_info, kregs) ||
2694 		     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2695 		     TI_REG_WINDOW != offsetof(struct thread_info,
2696 					       reg_window) ||
2697 		     TI_RWIN_SPTRS != offsetof(struct thread_info,
2698 					       rwbuf_stkptrs) ||
2699 		     TI_GSR != offsetof(struct thread_info, gsr) ||
2700 		     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2701 		     TI_PRE_COUNT != offsetof(struct thread_info,
2702 					      preempt_count) ||
2703 		     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2704 		     TI_CURRENT_DS != offsetof(struct thread_info,
2705 						current_ds) ||
2706 		     TI_KUNA_REGS != offsetof(struct thread_info,
2707 					      kern_una_regs) ||
2708 		     TI_KUNA_INSN != offsetof(struct thread_info,
2709 					      kern_una_insn) ||
2710 		     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2711 		     (TI_FPREGS & (64 - 1)));
2712 
2713 	BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2714 						     thread) ||
2715 		     (TRAP_PER_CPU_PGD_PADDR !=
2716 		      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2717 		     (TRAP_PER_CPU_CPU_MONDO_PA !=
2718 		      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2719 		     (TRAP_PER_CPU_DEV_MONDO_PA !=
2720 		      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2721 		     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2722 		      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2723 		     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2724 		      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2725 		     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2726 		      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2727 		     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2728 		      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2729 		     (TRAP_PER_CPU_FAULT_INFO !=
2730 		      offsetof(struct trap_per_cpu, fault_info)) ||
2731 		     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2732 		      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2733 		     (TRAP_PER_CPU_CPU_LIST_PA !=
2734 		      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2735 		     (TRAP_PER_CPU_TSB_HUGE !=
2736 		      offsetof(struct trap_per_cpu, tsb_huge)) ||
2737 		     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2738 		      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2739 		     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2740 		      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2741 		     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2742 		      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2743 		     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2744 		      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2745 		     (TRAP_PER_CPU_RESUM_QMASK !=
2746 		      offsetof(struct trap_per_cpu, resum_qmask)) ||
2747 		     (TRAP_PER_CPU_NONRESUM_QMASK !=
2748 		      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2749 		     (TRAP_PER_CPU_PER_CPU_BASE !=
2750 		      offsetof(struct trap_per_cpu, __per_cpu_base)));
2751 
2752 	BUILD_BUG_ON((TSB_CONFIG_TSB !=
2753 		      offsetof(struct tsb_config, tsb)) ||
2754 		     (TSB_CONFIG_RSS_LIMIT !=
2755 		      offsetof(struct tsb_config, tsb_rss_limit)) ||
2756 		     (TSB_CONFIG_NENTRIES !=
2757 		      offsetof(struct tsb_config, tsb_nentries)) ||
2758 		     (TSB_CONFIG_REG_VAL !=
2759 		      offsetof(struct tsb_config, tsb_reg_val)) ||
2760 		     (TSB_CONFIG_MAP_VADDR !=
2761 		      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2762 		     (TSB_CONFIG_MAP_PTE !=
2763 		      offsetof(struct tsb_config, tsb_map_pte)));
2764 
2765 	/* Attach to the address space of init_task.  On SMP we
2766 	 * do this in smp.c:smp_callin for other cpus.
2767 	 */
2768 	atomic_inc(&init_mm.mm_count);
2769 	current->active_mm = &init_mm;
2770 }
2771