• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/arch/parisc/traps.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
6  */
7 
8 /*
9  * 'Traps.c' handles hardware traps and faults after we have saved some
10  * state in 'asm.s'.
11  */
12 
13 #include <linux/sched.h>
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/ptrace.h>
18 #include <linux/timer.h>
19 #include <linux/delay.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/console.h>
27 #include <linux/bug.h>
28 
29 #include <asm/assembly.h>
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <asm/traps.h>
34 #include <asm/unaligned.h>
35 #include <linux/atomic.h>
36 #include <asm/smp.h>
37 #include <asm/pdc.h>
38 #include <asm/pdc_chassis.h>
39 #include <asm/unwind.h>
40 #include <asm/tlbflush.h>
41 #include <asm/cacheflush.h>
42 
43 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
44 
45 #define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
46 			  /*  dumped to the console via printk)          */
47 
48 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
49 DEFINE_SPINLOCK(pa_dbit_lock);
50 #endif
51 
52 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
53 	struct pt_regs *regs);
54 
printbinary(char * buf,unsigned long x,int nbits)55 static int printbinary(char *buf, unsigned long x, int nbits)
56 {
57 	unsigned long mask = 1UL << (nbits - 1);
58 	while (mask != 0) {
59 		*buf++ = (mask & x ? '1' : '0');
60 		mask >>= 1;
61 	}
62 	*buf = '\0';
63 
64 	return nbits;
65 }
66 
67 #ifdef CONFIG_64BIT
68 #define RFMT "%016lx"
69 #else
70 #define RFMT "%08lx"
71 #endif
72 #define FFMT "%016llx"	/* fpregs are 64-bit always */
73 
74 #define PRINTREGS(lvl,r,f,fmt,x)	\
75 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
76 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
77 		(r)[(x)+2], (r)[(x)+3])
78 
print_gr(char * level,struct pt_regs * regs)79 static void print_gr(char *level, struct pt_regs *regs)
80 {
81 	int i;
82 	char buf[64];
83 
84 	printk("%s\n", level);
85 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
86 	printbinary(buf, regs->gr[0], 32);
87 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
88 
89 	for (i = 0; i < 32; i += 4)
90 		PRINTREGS(level, regs->gr, "r", RFMT, i);
91 }
92 
print_fr(char * level,struct pt_regs * regs)93 static void print_fr(char *level, struct pt_regs *regs)
94 {
95 	int i;
96 	char buf[64];
97 	struct { u32 sw[2]; } s;
98 
99 	/* FR are 64bit everywhere. Need to use asm to get the content
100 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
101 	 * in our way, otherwise we're screwed.
102 	 * The fldd is used to restore the T-bit if there was one, as the
103 	 * store clears it anyway.
104 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
105 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
106 		      "fldd 0(%1),%%fr0	\n\t"
107 		      : "=m" (s) : "r" (&s) : "r0");
108 
109 	printk("%s\n", level);
110 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
111 	printbinary(buf, s.sw[0], 32);
112 	printk("%sFPSR: %s\n", level, buf);
113 	printk("%sFPER1: %08x\n", level, s.sw[1]);
114 
115 	/* here we'll print fr0 again, tho it'll be meaningless */
116 	for (i = 0; i < 32; i += 4)
117 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
118 }
119 
show_regs(struct pt_regs * regs)120 void show_regs(struct pt_regs *regs)
121 {
122 	int i, user;
123 	char *level;
124 	unsigned long cr30, cr31;
125 
126 	user = user_mode(regs);
127 	level = user ? KERN_DEBUG : KERN_CRIT;
128 
129 	show_regs_print_info(level);
130 
131 	print_gr(level, regs);
132 
133 	for (i = 0; i < 8; i += 4)
134 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
135 
136 	if (user)
137 		print_fr(level, regs);
138 
139 	cr30 = mfctl(30);
140 	cr31 = mfctl(31);
141 	printk("%s\n", level);
142 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
143 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
144 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
145 	       level, regs->iir, regs->isr, regs->ior);
146 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
147 	       level, current_thread_info()->cpu, cr30, cr31);
148 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
149 
150 	if (user) {
151 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
152 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
153 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
154 	} else {
155 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
156 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
157 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
158 
159 		parisc_show_stack(current, NULL, regs);
160 	}
161 }
162 
do_show_stack(struct unwind_frame_info * info)163 static void do_show_stack(struct unwind_frame_info *info)
164 {
165 	int i = 1;
166 
167 	printk(KERN_CRIT "Backtrace:\n");
168 	while (i <= 16) {
169 		if (unwind_once(info) < 0 || info->ip == 0)
170 			break;
171 
172 		if (__kernel_text_address(info->ip)) {
173 			printk(KERN_CRIT " [<" RFMT ">] %pS\n",
174 				info->ip, (void *) info->ip);
175 			i++;
176 		}
177 	}
178 	printk(KERN_CRIT "\n");
179 }
180 
parisc_show_stack(struct task_struct * task,unsigned long * sp,struct pt_regs * regs)181 static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
182 	struct pt_regs *regs)
183 {
184 	struct unwind_frame_info info;
185 	struct task_struct *t;
186 
187 	t = task ? task : current;
188 	if (regs) {
189 		unwind_frame_init(&info, t, regs);
190 		goto show_stack;
191 	}
192 
193 	if (t == current) {
194 		unsigned long sp;
195 
196 HERE:
197 		asm volatile ("copy %%r30, %0" : "=r"(sp));
198 		{
199 			struct pt_regs r;
200 
201 			memset(&r, 0, sizeof(struct pt_regs));
202 			r.iaoq[0] = (unsigned long)&&HERE;
203 			r.gr[2] = (unsigned long)__builtin_return_address(0);
204 			r.gr[30] = sp;
205 
206 			unwind_frame_init(&info, current, &r);
207 		}
208 	} else {
209 		unwind_frame_init_from_blocked_task(&info, t);
210 	}
211 
212 show_stack:
213 	do_show_stack(&info);
214 }
215 
show_stack(struct task_struct * t,unsigned long * sp)216 void show_stack(struct task_struct *t, unsigned long *sp)
217 {
218 	return parisc_show_stack(t, sp, NULL);
219 }
220 
is_valid_bugaddr(unsigned long iaoq)221 int is_valid_bugaddr(unsigned long iaoq)
222 {
223 	return 1;
224 }
225 
die_if_kernel(char * str,struct pt_regs * regs,long err)226 void die_if_kernel(char *str, struct pt_regs *regs, long err)
227 {
228 	if (user_mode(regs)) {
229 		if (err == 0)
230 			return; /* STFU */
231 
232 		printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
233 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
234 #ifdef PRINT_USER_FAULTS
235 		/* XXX for debugging only */
236 		show_regs(regs);
237 #endif
238 		return;
239 	}
240 
241 	oops_in_progress = 1;
242 
243 	oops_enter();
244 
245 	/* Amuse the user in a SPARC fashion */
246 	if (err) printk(KERN_CRIT
247 			"      _______________________________ \n"
248 			"     < Your System ate a SPARC! Gah! >\n"
249 			"      ------------------------------- \n"
250 			"             \\   ^__^\n"
251 			"                 (__)\\       )\\/\\\n"
252 			"                  U  ||----w |\n"
253 			"                     ||     ||\n");
254 
255 	/* unlock the pdc lock if necessary */
256 	pdc_emergency_unlock();
257 
258 	/* maybe the kernel hasn't booted very far yet and hasn't been able
259 	 * to initialize the serial or STI console. In that case we should
260 	 * re-enable the pdc console, so that the user will be able to
261 	 * identify the problem. */
262 	if (!console_drivers)
263 		pdc_console_restart();
264 
265 	if (err)
266 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
267 			current->comm, task_pid_nr(current), str, err);
268 
269 	/* Wot's wrong wif bein' racy? */
270 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
271 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
272 		local_irq_enable();
273 		while (1);
274 	}
275 	current->thread.flags |= PARISC_KERNEL_DEATH;
276 
277 	show_regs(regs);
278 	dump_stack();
279 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
280 
281 	if (in_interrupt())
282 		panic("Fatal exception in interrupt");
283 
284 	if (panic_on_oops) {
285 		printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
286 		ssleep(5);
287 		panic("Fatal exception");
288 	}
289 
290 	oops_exit();
291 	do_exit(SIGSEGV);
292 }
293 
syscall_ipi(int (* syscall)(struct pt_regs *),struct pt_regs * regs)294 int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
295 {
296 	return syscall(regs);
297 }
298 
299 /* gdb uses break 4,8 */
300 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)301 static void handle_gdb_break(struct pt_regs *regs, int wot)
302 {
303 	struct siginfo si;
304 
305 	si.si_signo = SIGTRAP;
306 	si.si_errno = 0;
307 	si.si_code = wot;
308 	si.si_addr = (void __user *) (regs->iaoq[0] & ~3);
309 	force_sig_info(SIGTRAP, &si, current);
310 }
311 
handle_break(struct pt_regs * regs)312 static void handle_break(struct pt_regs *regs)
313 {
314 	unsigned iir = regs->iir;
315 
316 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
317 		/* check if a BUG() or WARN() trapped here.  */
318 		enum bug_trap_type tt;
319 		tt = report_bug(regs->iaoq[0] & ~3, regs);
320 		if (tt == BUG_TRAP_TYPE_WARN) {
321 			regs->iaoq[0] += 4;
322 			regs->iaoq[1] += 4;
323 			return; /* return to next instruction when WARN_ON().  */
324 		}
325 		die_if_kernel("Unknown kernel breakpoint", regs,
326 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
327 	}
328 
329 #ifdef PRINT_USER_FAULTS
330 	if (unlikely(iir != GDB_BREAK_INSN)) {
331 		printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
332 			iir & 31, (iir>>13) & ((1<<13)-1),
333 			task_pid_nr(current), current->comm);
334 		show_regs(regs);
335 	}
336 #endif
337 
338 	/* send standard GDB signal */
339 	handle_gdb_break(regs, TRAP_BRKPT);
340 }
341 
default_trap(int code,struct pt_regs * regs)342 static void default_trap(int code, struct pt_regs *regs)
343 {
344 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
345 	show_regs(regs);
346 }
347 
348 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
349 
350 
transfer_pim_to_trap_frame(struct pt_regs * regs)351 void transfer_pim_to_trap_frame(struct pt_regs *regs)
352 {
353     register int i;
354     extern unsigned int hpmc_pim_data[];
355     struct pdc_hpmc_pim_11 *pim_narrow;
356     struct pdc_hpmc_pim_20 *pim_wide;
357 
358     if (boot_cpu_data.cpu_type >= pcxu) {
359 
360 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
361 
362 	/*
363 	 * Note: The following code will probably generate a
364 	 * bunch of truncation error warnings from the compiler.
365 	 * Could be handled with an ifdef, but perhaps there
366 	 * is a better way.
367 	 */
368 
369 	regs->gr[0] = pim_wide->cr[22];
370 
371 	for (i = 1; i < 32; i++)
372 	    regs->gr[i] = pim_wide->gr[i];
373 
374 	for (i = 0; i < 32; i++)
375 	    regs->fr[i] = pim_wide->fr[i];
376 
377 	for (i = 0; i < 8; i++)
378 	    regs->sr[i] = pim_wide->sr[i];
379 
380 	regs->iasq[0] = pim_wide->cr[17];
381 	regs->iasq[1] = pim_wide->iasq_back;
382 	regs->iaoq[0] = pim_wide->cr[18];
383 	regs->iaoq[1] = pim_wide->iaoq_back;
384 
385 	regs->sar  = pim_wide->cr[11];
386 	regs->iir  = pim_wide->cr[19];
387 	regs->isr  = pim_wide->cr[20];
388 	regs->ior  = pim_wide->cr[21];
389     }
390     else {
391 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
392 
393 	regs->gr[0] = pim_narrow->cr[22];
394 
395 	for (i = 1; i < 32; i++)
396 	    regs->gr[i] = pim_narrow->gr[i];
397 
398 	for (i = 0; i < 32; i++)
399 	    regs->fr[i] = pim_narrow->fr[i];
400 
401 	for (i = 0; i < 8; i++)
402 	    regs->sr[i] = pim_narrow->sr[i];
403 
404 	regs->iasq[0] = pim_narrow->cr[17];
405 	regs->iasq[1] = pim_narrow->iasq_back;
406 	regs->iaoq[0] = pim_narrow->cr[18];
407 	regs->iaoq[1] = pim_narrow->iaoq_back;
408 
409 	regs->sar  = pim_narrow->cr[11];
410 	regs->iir  = pim_narrow->cr[19];
411 	regs->isr  = pim_narrow->cr[20];
412 	regs->ior  = pim_narrow->cr[21];
413     }
414 
415     /*
416      * The following fields only have meaning if we came through
417      * another path. So just zero them here.
418      */
419 
420     regs->ksp = 0;
421     regs->kpc = 0;
422     regs->orig_r28 = 0;
423 }
424 
425 
426 /*
427  * This routine is called as a last resort when everything else
428  * has gone clearly wrong. We get called for faults in kernel space,
429  * and HPMC's.
430  */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)431 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
432 {
433 	static DEFINE_SPINLOCK(terminate_lock);
434 
435 	oops_in_progress = 1;
436 
437 	set_eiem(0);
438 	local_irq_disable();
439 	spin_lock(&terminate_lock);
440 
441 	/* unlock the pdc lock if necessary */
442 	pdc_emergency_unlock();
443 
444 	/* restart pdc console if necessary */
445 	if (!console_drivers)
446 		pdc_console_restart();
447 
448 	/* Not all paths will gutter the processor... */
449 	switch(code){
450 
451 	case 1:
452 		transfer_pim_to_trap_frame(regs);
453 		break;
454 
455 	default:
456 		/* Fall through */
457 		break;
458 
459 	}
460 
461 	{
462 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
463 		struct unwind_frame_info info;
464 		unwind_frame_init(&info, current, regs);
465 		do_show_stack(&info);
466 	}
467 
468 	printk("\n");
469 	printk(KERN_CRIT "%s: Code=%d regs=%p (Addr=" RFMT ")\n",
470 			msg, code, regs, offset);
471 	show_regs(regs);
472 
473 	spin_unlock(&terminate_lock);
474 
475 	/* put soft power button back under hardware control;
476 	 * if the user had pressed it once at any time, the
477 	 * system will shut down immediately right here. */
478 	pdc_soft_power_button(0);
479 
480 	/* Call kernel panic() so reboot timeouts work properly
481 	 * FIXME: This function should be on the list of
482 	 * panic notifiers, and we should call panic
483 	 * directly from the location that we wish.
484 	 * e.g. We should not call panic from
485 	 * parisc_terminate, but rather the oter way around.
486 	 * This hack works, prints the panic message twice,
487 	 * and it enables reboot timers!
488 	 */
489 	panic(msg);
490 }
491 
handle_interruption(int code,struct pt_regs * regs)492 void notrace handle_interruption(int code, struct pt_regs *regs)
493 {
494 	unsigned long fault_address = 0;
495 	unsigned long fault_space = 0;
496 	struct siginfo si;
497 
498 	if (code == 1)
499 	    pdc_console_restart();  /* switch back to pdc if HPMC */
500 	else
501 	    local_irq_enable();
502 
503 	/* Security check:
504 	 * If the priority level is still user, and the
505 	 * faulting space is not equal to the active space
506 	 * then the user is attempting something in a space
507 	 * that does not belong to them. Kill the process.
508 	 *
509 	 * This is normally the situation when the user
510 	 * attempts to jump into the kernel space at the
511 	 * wrong offset, be it at the gateway page or a
512 	 * random location.
513 	 *
514 	 * We cannot normally signal the process because it
515 	 * could *be* on the gateway page, and processes
516 	 * executing on the gateway page can't have signals
517 	 * delivered.
518 	 *
519 	 * We merely readjust the address into the users
520 	 * space, at a destination address of zero, and
521 	 * allow processing to continue.
522 	 */
523 	if (((unsigned long)regs->iaoq[0] & 3) &&
524 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
525 		/* Kill the user process later */
526 		regs->iaoq[0] = 0 | 3;
527 		regs->iaoq[1] = regs->iaoq[0] + 4;
528 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
529 		regs->gr[0] &= ~PSW_B;
530 		return;
531 	}
532 
533 #if 0
534 	printk(KERN_CRIT "Interruption # %d\n", code);
535 #endif
536 
537 	switch(code) {
538 
539 	case  1:
540 		/* High-priority machine check (HPMC) */
541 
542 		/* set up a new led state on systems shipped with a LED State panel */
543 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
544 
545 		parisc_terminate("High Priority Machine Check (HPMC)",
546 				regs, code, 0);
547 		/* NOT REACHED */
548 
549 	case  2:
550 		/* Power failure interrupt */
551 		printk(KERN_CRIT "Power failure interrupt !\n");
552 		return;
553 
554 	case  3:
555 		/* Recovery counter trap */
556 		regs->gr[0] &= ~PSW_R;
557 		if (user_space(regs))
558 			handle_gdb_break(regs, TRAP_TRACE);
559 		/* else this must be the start of a syscall - just let it run */
560 		return;
561 
562 	case  5:
563 		/* Low-priority machine check */
564 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
565 
566 		flush_cache_all();
567 		flush_tlb_all();
568 		cpu_lpmc(5, regs);
569 		return;
570 
571 	case  6:
572 		/* Instruction TLB miss fault/Instruction page fault */
573 		fault_address = regs->iaoq[0];
574 		fault_space   = regs->iasq[0];
575 		break;
576 
577 	case  8:
578 		/* Illegal instruction trap */
579 		die_if_kernel("Illegal instruction", regs, code);
580 		si.si_code = ILL_ILLOPC;
581 		goto give_sigill;
582 
583 	case  9:
584 		/* Break instruction trap */
585 		handle_break(regs);
586 		return;
587 
588 	case 10:
589 		/* Privileged operation trap */
590 		die_if_kernel("Privileged operation", regs, code);
591 		si.si_code = ILL_PRVOPC;
592 		goto give_sigill;
593 
594 	case 11:
595 		/* Privileged register trap */
596 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
597 
598 			/* This is a MFCTL cr26/cr27 to gr instruction.
599 			 * PCXS traps on this, so we need to emulate it.
600 			 */
601 
602 			if (regs->iir & 0x00200000)
603 				regs->gr[regs->iir & 0x1f] = mfctl(27);
604 			else
605 				regs->gr[regs->iir & 0x1f] = mfctl(26);
606 
607 			regs->iaoq[0] = regs->iaoq[1];
608 			regs->iaoq[1] += 4;
609 			regs->iasq[0] = regs->iasq[1];
610 			return;
611 		}
612 
613 		die_if_kernel("Privileged register usage", regs, code);
614 		si.si_code = ILL_PRVREG;
615 	give_sigill:
616 		si.si_signo = SIGILL;
617 		si.si_errno = 0;
618 		si.si_addr = (void __user *) regs->iaoq[0];
619 		force_sig_info(SIGILL, &si, current);
620 		return;
621 
622 	case 12:
623 		/* Overflow Trap, let the userland signal handler do the cleanup */
624 		si.si_signo = SIGFPE;
625 		si.si_code = FPE_INTOVF;
626 		si.si_addr = (void __user *) regs->iaoq[0];
627 		force_sig_info(SIGFPE, &si, current);
628 		return;
629 
630 	case 13:
631 		/* Conditional Trap
632 		   The condition succeeds in an instruction which traps
633 		   on condition  */
634 		if(user_mode(regs)){
635 			si.si_signo = SIGFPE;
636 			/* Set to zero, and let the userspace app figure it out from
637 			   the insn pointed to by si_addr */
638 			si.si_code = 0;
639 			si.si_addr = (void __user *) regs->iaoq[0];
640 			force_sig_info(SIGFPE, &si, current);
641 			return;
642 		}
643 		/* The kernel doesn't want to handle condition codes */
644 		break;
645 
646 	case 14:
647 		/* Assist Exception Trap, i.e. floating point exception. */
648 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
649 		__inc_irq_stat(irq_fpassist_count);
650 		handle_fpe(regs);
651 		return;
652 
653 	case 15:
654 		/* Data TLB miss fault/Data page fault */
655 		/* Fall through */
656 	case 16:
657 		/* Non-access instruction TLB miss fault */
658 		/* The instruction TLB entry needed for the target address of the FIC
659 		   is absent, and hardware can't find it, so we get to cleanup */
660 		/* Fall through */
661 	case 17:
662 		/* Non-access data TLB miss fault/Non-access data page fault */
663 		/* FIXME:
664 			 Still need to add slow path emulation code here!
665 			 If the insn used a non-shadow register, then the tlb
666 			 handlers could not have their side-effect (e.g. probe
667 			 writing to a target register) emulated since rfir would
668 			 erase the changes to said register. Instead we have to
669 			 setup everything, call this function we are in, and emulate
670 			 by hand. Technically we need to emulate:
671 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
672 		*/
673 		fault_address = regs->ior;
674 		fault_space = regs->isr;
675 		break;
676 
677 	case 18:
678 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
679 		/* Check for unaligned access */
680 		if (check_unaligned(regs)) {
681 			handle_unaligned(regs);
682 			return;
683 		}
684 		/* Fall Through */
685 	case 26:
686 		/* PCXL: Data memory access rights trap */
687 		fault_address = regs->ior;
688 		fault_space   = regs->isr;
689 		break;
690 
691 	case 19:
692 		/* Data memory break trap */
693 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
694 		/* fall thru */
695 	case 21:
696 		/* Page reference trap */
697 		handle_gdb_break(regs, TRAP_HWBKPT);
698 		return;
699 
700 	case 25:
701 		/* Taken branch trap */
702 		regs->gr[0] &= ~PSW_T;
703 		if (user_space(regs))
704 			handle_gdb_break(regs, TRAP_BRANCH);
705 		/* else this must be the start of a syscall - just let it
706 		 * run.
707 		 */
708 		return;
709 
710 	case  7:
711 		/* Instruction access rights */
712 		/* PCXL: Instruction memory protection trap */
713 
714 		/*
715 		 * This could be caused by either: 1) a process attempting
716 		 * to execute within a vma that does not have execute
717 		 * permission, or 2) an access rights violation caused by a
718 		 * flush only translation set up by ptep_get_and_clear().
719 		 * So we check the vma permissions to differentiate the two.
720 		 * If the vma indicates we have execute permission, then
721 		 * the cause is the latter one. In this case, we need to
722 		 * call do_page_fault() to fix the problem.
723 		 */
724 
725 		if (user_mode(regs)) {
726 			struct vm_area_struct *vma;
727 
728 			down_read(&current->mm->mmap_sem);
729 			vma = find_vma(current->mm,regs->iaoq[0]);
730 			if (vma && (regs->iaoq[0] >= vma->vm_start)
731 				&& (vma->vm_flags & VM_EXEC)) {
732 
733 				fault_address = regs->iaoq[0];
734 				fault_space = regs->iasq[0];
735 
736 				up_read(&current->mm->mmap_sem);
737 				break; /* call do_page_fault() */
738 			}
739 			up_read(&current->mm->mmap_sem);
740 		}
741 		/* Fall Through */
742 	case 27:
743 		/* Data memory protection ID trap */
744 		if (code == 27 && !user_mode(regs) &&
745 			fixup_exception(regs))
746 			return;
747 
748 		die_if_kernel("Protection id trap", regs, code);
749 		si.si_code = SEGV_MAPERR;
750 		si.si_signo = SIGSEGV;
751 		si.si_errno = 0;
752 		if (code == 7)
753 		    si.si_addr = (void __user *) regs->iaoq[0];
754 		else
755 		    si.si_addr = (void __user *) regs->ior;
756 		force_sig_info(SIGSEGV, &si, current);
757 		return;
758 
759 	case 28:
760 		/* Unaligned data reference trap */
761 		handle_unaligned(regs);
762 		return;
763 
764 	default:
765 		if (user_mode(regs)) {
766 #ifdef PRINT_USER_FAULTS
767 			printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
768 			    task_pid_nr(current), current->comm);
769 			show_regs(regs);
770 #endif
771 			/* SIGBUS, for lack of a better one. */
772 			si.si_signo = SIGBUS;
773 			si.si_code = BUS_OBJERR;
774 			si.si_errno = 0;
775 			si.si_addr = (void __user *) regs->ior;
776 			force_sig_info(SIGBUS, &si, current);
777 			return;
778 		}
779 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
780 
781 		parisc_terminate("Unexpected interruption", regs, code, 0);
782 		/* NOT REACHED */
783 	}
784 
785 	if (user_mode(regs)) {
786 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
787 #ifdef PRINT_USER_FAULTS
788 		if (fault_space == 0)
789 			printk(KERN_DEBUG "User Fault on Kernel Space ");
790 		else
791 			printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
792 			       code);
793 		printk(KERN_CONT "pid=%d command='%s'\n",
794 		       task_pid_nr(current), current->comm);
795 		show_regs(regs);
796 #endif
797 		si.si_signo = SIGSEGV;
798 		si.si_errno = 0;
799 		si.si_code = SEGV_MAPERR;
800 		si.si_addr = (void __user *) regs->ior;
801 		force_sig_info(SIGSEGV, &si, current);
802 		return;
803 	    }
804 	}
805 	else {
806 
807 	    /*
808 	     * The kernel should never fault on its own address space.
809 	     */
810 
811 	    if (fault_space == 0)
812 	    {
813 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 		parisc_terminate("Kernel Fault", regs, code, fault_address);
815 
816 	    }
817 	}
818 
819 	do_page_fault(regs, code, fault_address);
820 }
821 
822 
check_ivt(void * iva)823 int __init check_ivt(void *iva)
824 {
825 	extern u32 os_hpmc_size;
826 	extern const u32 os_hpmc[];
827 
828 	int i;
829 	u32 check = 0;
830 	u32 *ivap;
831 	u32 *hpmcp;
832 	u32 length;
833 
834 	if (strcmp((char *)iva, "cows can fly"))
835 		return -1;
836 
837 	ivap = (u32 *)iva;
838 
839 	for (i = 0; i < 8; i++)
840 	    *ivap++ = 0;
841 
842 	/* Compute Checksum for HPMC handler */
843 	length = os_hpmc_size;
844 	ivap[7] = length;
845 
846 	hpmcp = (u32 *)os_hpmc;
847 
848 	for (i=0; i<length/4; i++)
849 	    check += *hpmcp++;
850 
851 	for (i=0; i<8; i++)
852 	    check += ivap[i];
853 
854 	ivap[5] = -check;
855 
856 	return 0;
857 }
858 
859 #ifndef CONFIG_64BIT
860 extern const void fault_vector_11;
861 #endif
862 extern const void fault_vector_20;
863 
trap_init(void)864 void __init trap_init(void)
865 {
866 	void *iva;
867 
868 	if (boot_cpu_data.cpu_type >= pcxu)
869 		iva = (void *) &fault_vector_20;
870 	else
871 #ifdef CONFIG_64BIT
872 		panic("Can't boot 64-bit OS on PA1.1 processor!");
873 #else
874 		iva = (void *) &fault_vector_11;
875 #endif
876 
877 	if (check_ivt(iva))
878 		panic("IVT invalid");
879 }
880