• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/arch/parisc/traps.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *  Copyright (C) 1999, 2000  Philipp Rumpf <prumpf@tux.org>
7  */
8 
9 /*
10  * 'Traps.c' handles hardware traps and faults after we have saved some
11  * state in 'asm.s'.
12  */
13 
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/ptrace.h>
20 #include <linux/timer.h>
21 #include <linux/delay.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/smp.h>
25 #include <linux/spinlock.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/console.h>
29 #include <linux/bug.h>
30 #include <linux/ratelimit.h>
31 #include <linux/uaccess.h>
32 #include <linux/kdebug.h>
33 
34 #include <asm/assembly.h>
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/traps.h>
38 #include <asm/unaligned.h>
39 #include <linux/atomic.h>
40 #include <asm/smp.h>
41 #include <asm/pdc.h>
42 #include <asm/pdc_chassis.h>
43 #include <asm/unwind.h>
44 #include <asm/tlbflush.h>
45 #include <asm/cacheflush.h>
46 #include <linux/kgdb.h>
47 #include <linux/kprobes.h>
48 
49 #include "../math-emu/math-emu.h"	/* for handle_fpe() */
50 
51 static void parisc_show_stack(struct task_struct *task,
52 	struct pt_regs *regs, const char *loglvl);
53 
printbinary(char * buf,unsigned long x,int nbits)54 static int printbinary(char *buf, unsigned long x, int nbits)
55 {
56 	unsigned long mask = 1UL << (nbits - 1);
57 	while (mask != 0) {
58 		*buf++ = (mask & x ? '1' : '0');
59 		mask >>= 1;
60 	}
61 	*buf = '\0';
62 
63 	return nbits;
64 }
65 
66 #ifdef CONFIG_64BIT
67 #define RFMT "%016lx"
68 #else
69 #define RFMT "%08lx"
70 #endif
71 #define FFMT "%016llx"	/* fpregs are 64-bit always */
72 
73 #define PRINTREGS(lvl,r,f,fmt,x)	\
74 	printk("%s%s%02d-%02d  " fmt " " fmt " " fmt " " fmt "\n",	\
75 		lvl, f, (x), (x+3), (r)[(x)+0], (r)[(x)+1],		\
76 		(r)[(x)+2], (r)[(x)+3])
77 
print_gr(const char * level,struct pt_regs * regs)78 static void print_gr(const char *level, struct pt_regs *regs)
79 {
80 	int i;
81 	char buf[64];
82 
83 	printk("%s\n", level);
84 	printk("%s     YZrvWESTHLNXBCVMcbcbcbcbOGFRQPDI\n", level);
85 	printbinary(buf, regs->gr[0], 32);
86 	printk("%sPSW: %s %s\n", level, buf, print_tainted());
87 
88 	for (i = 0; i < 32; i += 4)
89 		PRINTREGS(level, regs->gr, "r", RFMT, i);
90 }
91 
print_fr(const char * level,struct pt_regs * regs)92 static void print_fr(const char *level, struct pt_regs *regs)
93 {
94 	int i;
95 	char buf[64];
96 	struct { u32 sw[2]; } s;
97 
98 	/* FR are 64bit everywhere. Need to use asm to get the content
99 	 * of fpsr/fper1, and we assume that we won't have a FP Identify
100 	 * in our way, otherwise we're screwed.
101 	 * The fldd is used to restore the T-bit if there was one, as the
102 	 * store clears it anyway.
103 	 * PA2.0 book says "thou shall not use fstw on FPSR/FPERs" - T-Bone */
104 	asm volatile ("fstd %%fr0,0(%1)	\n\t"
105 		      "fldd 0(%1),%%fr0	\n\t"
106 		      : "=m" (s) : "r" (&s) : "r0");
107 
108 	printk("%s\n", level);
109 	printk("%s      VZOUICununcqcqcqcqcqcrmunTDVZOUI\n", level);
110 	printbinary(buf, s.sw[0], 32);
111 	printk("%sFPSR: %s\n", level, buf);
112 	printk("%sFPER1: %08x\n", level, s.sw[1]);
113 
114 	/* here we'll print fr0 again, tho it'll be meaningless */
115 	for (i = 0; i < 32; i += 4)
116 		PRINTREGS(level, regs->fr, "fr", FFMT, i);
117 }
118 
show_regs(struct pt_regs * regs)119 void show_regs(struct pt_regs *regs)
120 {
121 	int i, user;
122 	const char *level;
123 	unsigned long cr30, cr31;
124 
125 	user = user_mode(regs);
126 	level = user ? KERN_DEBUG : KERN_CRIT;
127 
128 	show_regs_print_info(level);
129 
130 	print_gr(level, regs);
131 
132 	for (i = 0; i < 8; i += 4)
133 		PRINTREGS(level, regs->sr, "sr", RFMT, i);
134 
135 	if (user)
136 		print_fr(level, regs);
137 
138 	cr30 = mfctl(30);
139 	cr31 = mfctl(31);
140 	printk("%s\n", level);
141 	printk("%sIASQ: " RFMT " " RFMT " IAOQ: " RFMT " " RFMT "\n",
142 	       level, regs->iasq[0], regs->iasq[1], regs->iaoq[0], regs->iaoq[1]);
143 	printk("%s IIR: %08lx    ISR: " RFMT "  IOR: " RFMT "\n",
144 	       level, regs->iir, regs->isr, regs->ior);
145 	printk("%s CPU: %8d   CR30: " RFMT " CR31: " RFMT "\n",
146 	       level, current_thread_info()->cpu, cr30, cr31);
147 	printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
148 
149 	if (user) {
150 		printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
151 		printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
152 		printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
153 	} else {
154 		printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
155 		printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
156 		printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
157 
158 		parisc_show_stack(current, regs, KERN_DEFAULT);
159 	}
160 }
161 
162 static DEFINE_RATELIMIT_STATE(_hppa_rs,
163 	DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
164 
165 #define parisc_printk_ratelimited(critical, regs, fmt, ...)	{	      \
166 	if ((critical || show_unhandled_signals) && __ratelimit(&_hppa_rs)) { \
167 		printk(fmt, ##__VA_ARGS__);				      \
168 		show_regs(regs);					      \
169 	}								      \
170 }
171 
172 
do_show_stack(struct unwind_frame_info * info,const char * loglvl)173 static void do_show_stack(struct unwind_frame_info *info, const char *loglvl)
174 {
175 	int i = 1;
176 
177 	printk("%sBacktrace:\n", loglvl);
178 	while (i <= MAX_UNWIND_ENTRIES) {
179 		if (unwind_once(info) < 0 || info->ip == 0)
180 			break;
181 
182 		if (__kernel_text_address(info->ip)) {
183 			printk("%s [<" RFMT ">] %pS\n",
184 				loglvl, info->ip, (void *) info->ip);
185 			i++;
186 		}
187 	}
188 	printk("%s\n", loglvl);
189 }
190 
parisc_show_stack(struct task_struct * task,struct pt_regs * regs,const char * loglvl)191 static void parisc_show_stack(struct task_struct *task,
192 	struct pt_regs *regs, const char *loglvl)
193 {
194 	struct unwind_frame_info info;
195 
196 	unwind_frame_init_task(&info, task, regs);
197 
198 	do_show_stack(&info, loglvl);
199 }
200 
show_stack(struct task_struct * t,unsigned long * sp,const char * loglvl)201 void show_stack(struct task_struct *t, unsigned long *sp, const char *loglvl)
202 {
203 	parisc_show_stack(t, NULL, loglvl);
204 }
205 
is_valid_bugaddr(unsigned long iaoq)206 int is_valid_bugaddr(unsigned long iaoq)
207 {
208 	return 1;
209 }
210 
die_if_kernel(char * str,struct pt_regs * regs,long err)211 void die_if_kernel(char *str, struct pt_regs *regs, long err)
212 {
213 	if (user_mode(regs)) {
214 		if (err == 0)
215 			return; /* STFU */
216 
217 		parisc_printk_ratelimited(1, regs,
218 			KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
219 			current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
220 
221 		return;
222 	}
223 
224 	bust_spinlocks(1);
225 
226 	oops_enter();
227 
228 	/* Amuse the user in a SPARC fashion */
229 	if (err) printk(KERN_CRIT
230 			"      _______________________________ \n"
231 			"     < Your System ate a SPARC! Gah! >\n"
232 			"      ------------------------------- \n"
233 			"             \\   ^__^\n"
234 			"                 (__)\\       )\\/\\\n"
235 			"                  U  ||----w |\n"
236 			"                     ||     ||\n");
237 
238 	/* unlock the pdc lock if necessary */
239 	pdc_emergency_unlock();
240 
241 	/* maybe the kernel hasn't booted very far yet and hasn't been able
242 	 * to initialize the serial or STI console. In that case we should
243 	 * re-enable the pdc console, so that the user will be able to
244 	 * identify the problem. */
245 	if (!console_drivers)
246 		pdc_console_restart();
247 
248 	if (err)
249 		printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
250 			current->comm, task_pid_nr(current), str, err);
251 
252 	/* Wot's wrong wif bein' racy? */
253 	if (current->thread.flags & PARISC_KERNEL_DEATH) {
254 		printk(KERN_CRIT "%s() recursion detected.\n", __func__);
255 		local_irq_enable();
256 		while (1);
257 	}
258 	current->thread.flags |= PARISC_KERNEL_DEATH;
259 
260 	show_regs(regs);
261 	dump_stack();
262 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
263 
264 	if (in_interrupt())
265 		panic("Fatal exception in interrupt");
266 
267 	if (panic_on_oops)
268 		panic("Fatal exception");
269 
270 	oops_exit();
271 	do_exit(SIGSEGV);
272 }
273 
274 /* gdb uses break 4,8 */
275 #define GDB_BREAK_INSN 0x10004
handle_gdb_break(struct pt_regs * regs,int wot)276 static void handle_gdb_break(struct pt_regs *regs, int wot)
277 {
278 	force_sig_fault(SIGTRAP, wot,
279 			(void __user *) (regs->iaoq[0] & ~3));
280 }
281 
handle_break(struct pt_regs * regs)282 static void handle_break(struct pt_regs *regs)
283 {
284 	unsigned iir = regs->iir;
285 
286 	if (unlikely(iir == PARISC_BUG_BREAK_INSN && !user_mode(regs))) {
287 		/* check if a BUG() or WARN() trapped here.  */
288 		enum bug_trap_type tt;
289 		tt = report_bug(regs->iaoq[0] & ~3, regs);
290 		if (tt == BUG_TRAP_TYPE_WARN) {
291 			regs->iaoq[0] += 4;
292 			regs->iaoq[1] += 4;
293 			return; /* return to next instruction when WARN_ON().  */
294 		}
295 		die_if_kernel("Unknown kernel breakpoint", regs,
296 			(tt == BUG_TRAP_TYPE_NONE) ? 9 : 0);
297 	}
298 
299 #ifdef CONFIG_KPROBES
300 	if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
301 		parisc_kprobe_break_handler(regs);
302 		return;
303 	}
304 
305 #endif
306 
307 #ifdef CONFIG_KGDB
308 	if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
309 		iir == PARISC_KGDB_BREAK_INSN)) {
310 		kgdb_handle_exception(9, SIGTRAP, 0, regs);
311 		return;
312 	}
313 #endif
314 
315 	if (unlikely(iir != GDB_BREAK_INSN))
316 		parisc_printk_ratelimited(0, regs,
317 			KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
318 			iir & 31, (iir>>13) & ((1<<13)-1),
319 			task_pid_nr(current), current->comm);
320 
321 	/* send standard GDB signal */
322 	handle_gdb_break(regs, TRAP_BRKPT);
323 }
324 
default_trap(int code,struct pt_regs * regs)325 static void default_trap(int code, struct pt_regs *regs)
326 {
327 	printk(KERN_ERR "Trap %d on CPU %d\n", code, smp_processor_id());
328 	show_regs(regs);
329 }
330 
331 void (*cpu_lpmc) (int code, struct pt_regs *regs) __read_mostly = default_trap;
332 
333 
transfer_pim_to_trap_frame(struct pt_regs * regs)334 void transfer_pim_to_trap_frame(struct pt_regs *regs)
335 {
336     register int i;
337     extern unsigned int hpmc_pim_data[];
338     struct pdc_hpmc_pim_11 *pim_narrow;
339     struct pdc_hpmc_pim_20 *pim_wide;
340 
341     if (boot_cpu_data.cpu_type >= pcxu) {
342 
343 	pim_wide = (struct pdc_hpmc_pim_20 *)hpmc_pim_data;
344 
345 	/*
346 	 * Note: The following code will probably generate a
347 	 * bunch of truncation error warnings from the compiler.
348 	 * Could be handled with an ifdef, but perhaps there
349 	 * is a better way.
350 	 */
351 
352 	regs->gr[0] = pim_wide->cr[22];
353 
354 	for (i = 1; i < 32; i++)
355 	    regs->gr[i] = pim_wide->gr[i];
356 
357 	for (i = 0; i < 32; i++)
358 	    regs->fr[i] = pim_wide->fr[i];
359 
360 	for (i = 0; i < 8; i++)
361 	    regs->sr[i] = pim_wide->sr[i];
362 
363 	regs->iasq[0] = pim_wide->cr[17];
364 	regs->iasq[1] = pim_wide->iasq_back;
365 	regs->iaoq[0] = pim_wide->cr[18];
366 	regs->iaoq[1] = pim_wide->iaoq_back;
367 
368 	regs->sar  = pim_wide->cr[11];
369 	regs->iir  = pim_wide->cr[19];
370 	regs->isr  = pim_wide->cr[20];
371 	regs->ior  = pim_wide->cr[21];
372     }
373     else {
374 	pim_narrow = (struct pdc_hpmc_pim_11 *)hpmc_pim_data;
375 
376 	regs->gr[0] = pim_narrow->cr[22];
377 
378 	for (i = 1; i < 32; i++)
379 	    regs->gr[i] = pim_narrow->gr[i];
380 
381 	for (i = 0; i < 32; i++)
382 	    regs->fr[i] = pim_narrow->fr[i];
383 
384 	for (i = 0; i < 8; i++)
385 	    regs->sr[i] = pim_narrow->sr[i];
386 
387 	regs->iasq[0] = pim_narrow->cr[17];
388 	regs->iasq[1] = pim_narrow->iasq_back;
389 	regs->iaoq[0] = pim_narrow->cr[18];
390 	regs->iaoq[1] = pim_narrow->iaoq_back;
391 
392 	regs->sar  = pim_narrow->cr[11];
393 	regs->iir  = pim_narrow->cr[19];
394 	regs->isr  = pim_narrow->cr[20];
395 	regs->ior  = pim_narrow->cr[21];
396     }
397 
398     /*
399      * The following fields only have meaning if we came through
400      * another path. So just zero them here.
401      */
402 
403     regs->ksp = 0;
404     regs->kpc = 0;
405     regs->orig_r28 = 0;
406 }
407 
408 
409 /*
410  * This routine is called as a last resort when everything else
411  * has gone clearly wrong. We get called for faults in kernel space,
412  * and HPMC's.
413  */
parisc_terminate(char * msg,struct pt_regs * regs,int code,unsigned long offset)414 void parisc_terminate(char *msg, struct pt_regs *regs, int code, unsigned long offset)
415 {
416 	static DEFINE_SPINLOCK(terminate_lock);
417 
418 	(void)notify_die(DIE_OOPS, msg, regs, 0, code, SIGTRAP);
419 	bust_spinlocks(1);
420 
421 	set_eiem(0);
422 	local_irq_disable();
423 	spin_lock(&terminate_lock);
424 
425 	/* unlock the pdc lock if necessary */
426 	pdc_emergency_unlock();
427 
428 	/* restart pdc console if necessary */
429 	if (!console_drivers)
430 		pdc_console_restart();
431 
432 	/* Not all paths will gutter the processor... */
433 	switch(code){
434 
435 	case 1:
436 		transfer_pim_to_trap_frame(regs);
437 		break;
438 
439 	default:
440 		break;
441 
442 	}
443 
444 	{
445 		/* show_stack(NULL, (unsigned long *)regs->gr[30]); */
446 		struct unwind_frame_info info;
447 		unwind_frame_init(&info, current, regs);
448 		do_show_stack(&info, KERN_CRIT);
449 	}
450 
451 	printk("\n");
452 	pr_crit("%s: Code=%d (%s) at addr " RFMT "\n",
453 		msg, code, trap_name(code), offset);
454 	show_regs(regs);
455 
456 	spin_unlock(&terminate_lock);
457 
458 	/* put soft power button back under hardware control;
459 	 * if the user had pressed it once at any time, the
460 	 * system will shut down immediately right here. */
461 	pdc_soft_power_button(0);
462 
463 	/* Call kernel panic() so reboot timeouts work properly
464 	 * FIXME: This function should be on the list of
465 	 * panic notifiers, and we should call panic
466 	 * directly from the location that we wish.
467 	 * e.g. We should not call panic from
468 	 * parisc_terminate, but rather the oter way around.
469 	 * This hack works, prints the panic message twice,
470 	 * and it enables reboot timers!
471 	 */
472 	panic(msg);
473 }
474 
handle_interruption(int code,struct pt_regs * regs)475 void notrace handle_interruption(int code, struct pt_regs *regs)
476 {
477 	unsigned long fault_address = 0;
478 	unsigned long fault_space = 0;
479 	int si_code;
480 
481 	if (code == 1)
482 	    pdc_console_restart();  /* switch back to pdc if HPMC */
483 	else
484 	    local_irq_enable();
485 
486 	/* Security check:
487 	 * If the priority level is still user, and the
488 	 * faulting space is not equal to the active space
489 	 * then the user is attempting something in a space
490 	 * that does not belong to them. Kill the process.
491 	 *
492 	 * This is normally the situation when the user
493 	 * attempts to jump into the kernel space at the
494 	 * wrong offset, be it at the gateway page or a
495 	 * random location.
496 	 *
497 	 * We cannot normally signal the process because it
498 	 * could *be* on the gateway page, and processes
499 	 * executing on the gateway page can't have signals
500 	 * delivered.
501 	 *
502 	 * We merely readjust the address into the users
503 	 * space, at a destination address of zero, and
504 	 * allow processing to continue.
505 	 */
506 	if (((unsigned long)regs->iaoq[0] & 3) &&
507 	    ((unsigned long)regs->iasq[0] != (unsigned long)regs->sr[7])) {
508 		/* Kill the user process later */
509 		regs->iaoq[0] = 0 | 3;
510 		regs->iaoq[1] = regs->iaoq[0] + 4;
511 		regs->iasq[0] = regs->iasq[1] = regs->sr[7];
512 		regs->gr[0] &= ~PSW_B;
513 		return;
514 	}
515 
516 #if 0
517 	printk(KERN_CRIT "Interruption # %d\n", code);
518 #endif
519 
520 	switch(code) {
521 
522 	case  1:
523 		/* High-priority machine check (HPMC) */
524 
525 		/* set up a new led state on systems shipped with a LED State panel */
526 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_HPMC);
527 
528 		parisc_terminate("High Priority Machine Check (HPMC)",
529 				regs, code, 0);
530 		/* NOT REACHED */
531 
532 	case  2:
533 		/* Power failure interrupt */
534 		printk(KERN_CRIT "Power failure interrupt !\n");
535 		return;
536 
537 	case  3:
538 		/* Recovery counter trap */
539 		regs->gr[0] &= ~PSW_R;
540 
541 #ifdef CONFIG_KPROBES
542 		if (parisc_kprobe_ss_handler(regs))
543 			return;
544 #endif
545 
546 #ifdef CONFIG_KGDB
547 		if (kgdb_single_step) {
548 			kgdb_handle_exception(0, SIGTRAP, 0, regs);
549 			return;
550 		}
551 #endif
552 
553 		if (user_space(regs))
554 			handle_gdb_break(regs, TRAP_TRACE);
555 		/* else this must be the start of a syscall - just let it run */
556 		return;
557 
558 	case  5:
559 		/* Low-priority machine check */
560 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_LPMC);
561 
562 		flush_cache_all();
563 		flush_tlb_all();
564 		cpu_lpmc(5, regs);
565 		return;
566 
567 	case  PARISC_ITLB_TRAP:
568 		/* Instruction TLB miss fault/Instruction page fault */
569 		fault_address = regs->iaoq[0];
570 		fault_space   = regs->iasq[0];
571 		break;
572 
573 	case  8:
574 		/* Illegal instruction trap */
575 		die_if_kernel("Illegal instruction", regs, code);
576 		si_code = ILL_ILLOPC;
577 		goto give_sigill;
578 
579 	case  9:
580 		/* Break instruction trap */
581 		handle_break(regs);
582 		return;
583 
584 	case 10:
585 		/* Privileged operation trap */
586 		die_if_kernel("Privileged operation", regs, code);
587 		si_code = ILL_PRVOPC;
588 		goto give_sigill;
589 
590 	case 11:
591 		/* Privileged register trap */
592 		if ((regs->iir & 0xffdfffe0) == 0x034008a0) {
593 
594 			/* This is a MFCTL cr26/cr27 to gr instruction.
595 			 * PCXS traps on this, so we need to emulate it.
596 			 */
597 
598 			if (regs->iir & 0x00200000)
599 				regs->gr[regs->iir & 0x1f] = mfctl(27);
600 			else
601 				regs->gr[regs->iir & 0x1f] = mfctl(26);
602 
603 			regs->iaoq[0] = regs->iaoq[1];
604 			regs->iaoq[1] += 4;
605 			regs->iasq[0] = regs->iasq[1];
606 			return;
607 		}
608 
609 		die_if_kernel("Privileged register usage", regs, code);
610 		si_code = ILL_PRVREG;
611 	give_sigill:
612 		force_sig_fault(SIGILL, si_code,
613 				(void __user *) regs->iaoq[0]);
614 		return;
615 
616 	case 12:
617 		/* Overflow Trap, let the userland signal handler do the cleanup */
618 		force_sig_fault(SIGFPE, FPE_INTOVF,
619 				(void __user *) regs->iaoq[0]);
620 		return;
621 
622 	case 13:
623 		/* Conditional Trap
624 		   The condition succeeds in an instruction which traps
625 		   on condition  */
626 		if(user_mode(regs)){
627 			/* Let userspace app figure it out from the insn pointed
628 			 * to by si_addr.
629 			 */
630 			force_sig_fault(SIGFPE, FPE_CONDTRAP,
631 					(void __user *) regs->iaoq[0]);
632 			return;
633 		}
634 		/* The kernel doesn't want to handle condition codes */
635 		break;
636 
637 	case 14:
638 		/* Assist Exception Trap, i.e. floating point exception. */
639 		die_if_kernel("Floating point exception", regs, 0); /* quiet */
640 		__inc_irq_stat(irq_fpassist_count);
641 		handle_fpe(regs);
642 		return;
643 
644 	case 15:
645 		/* Data TLB miss fault/Data page fault */
646 		fallthrough;
647 	case 16:
648 		/* Non-access instruction TLB miss fault */
649 		/* The instruction TLB entry needed for the target address of the FIC
650 		   is absent, and hardware can't find it, so we get to cleanup */
651 		fallthrough;
652 	case 17:
653 		/* Non-access data TLB miss fault/Non-access data page fault */
654 		/* FIXME:
655 			 Still need to add slow path emulation code here!
656 			 If the insn used a non-shadow register, then the tlb
657 			 handlers could not have their side-effect (e.g. probe
658 			 writing to a target register) emulated since rfir would
659 			 erase the changes to said register. Instead we have to
660 			 setup everything, call this function we are in, and emulate
661 			 by hand. Technically we need to emulate:
662 			 fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw
663 		*/
664 		if (code == 17 && handle_nadtlb_fault(regs))
665 			return;
666 		fault_address = regs->ior;
667 		fault_space = regs->isr;
668 		break;
669 
670 	case 18:
671 		/* PCXS only -- later cpu's split this into types 26,27 & 28 */
672 		/* Check for unaligned access */
673 		if (check_unaligned(regs)) {
674 			handle_unaligned(regs);
675 			return;
676 		}
677 		fallthrough;
678 	case 26:
679 		/* PCXL: Data memory access rights trap */
680 		fault_address = regs->ior;
681 		fault_space   = regs->isr;
682 		break;
683 
684 	case 19:
685 		/* Data memory break trap */
686 		regs->gr[0] |= PSW_X; /* So we can single-step over the trap */
687 		fallthrough;
688 	case 21:
689 		/* Page reference trap */
690 		handle_gdb_break(regs, TRAP_HWBKPT);
691 		return;
692 
693 	case 25:
694 		/* Taken branch trap */
695 		regs->gr[0] &= ~PSW_T;
696 		if (user_space(regs))
697 			handle_gdb_break(regs, TRAP_BRANCH);
698 		/* else this must be the start of a syscall - just let it
699 		 * run.
700 		 */
701 		return;
702 
703 	case  7:
704 		/* Instruction access rights */
705 		/* PCXL: Instruction memory protection trap */
706 
707 		/*
708 		 * This could be caused by either: 1) a process attempting
709 		 * to execute within a vma that does not have execute
710 		 * permission, or 2) an access rights violation caused by a
711 		 * flush only translation set up by ptep_get_and_clear().
712 		 * So we check the vma permissions to differentiate the two.
713 		 * If the vma indicates we have execute permission, then
714 		 * the cause is the latter one. In this case, we need to
715 		 * call do_page_fault() to fix the problem.
716 		 */
717 
718 		if (user_mode(regs)) {
719 			struct vm_area_struct *vma;
720 
721 			mmap_read_lock(current->mm);
722 			vma = find_vma(current->mm,regs->iaoq[0]);
723 			if (vma && (regs->iaoq[0] >= vma->vm_start)
724 				&& (vma->vm_flags & VM_EXEC)) {
725 
726 				fault_address = regs->iaoq[0];
727 				fault_space = regs->iasq[0];
728 
729 				mmap_read_unlock(current->mm);
730 				break; /* call do_page_fault() */
731 			}
732 			mmap_read_unlock(current->mm);
733 		}
734 		/* CPU could not fetch instruction, so clear stale IIR value. */
735 		regs->iir = 0xbaadf00d;
736 		fallthrough;
737 	case 27:
738 		/* Data memory protection ID trap */
739 		if (code == 27 && !user_mode(regs) &&
740 			fixup_exception(regs))
741 			return;
742 
743 		die_if_kernel("Protection id trap", regs, code);
744 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
745 				(code == 7)?
746 				((void __user *) regs->iaoq[0]) :
747 				((void __user *) regs->ior));
748 		return;
749 
750 	case 28:
751 		/* Unaligned data reference trap */
752 		handle_unaligned(regs);
753 		return;
754 
755 	default:
756 		if (user_mode(regs)) {
757 			parisc_printk_ratelimited(0, regs, KERN_DEBUG
758 				"handle_interruption() pid=%d command='%s'\n",
759 				task_pid_nr(current), current->comm);
760 			/* SIGBUS, for lack of a better one. */
761 			force_sig_fault(SIGBUS, BUS_OBJERR,
762 					(void __user *)regs->ior);
763 			return;
764 		}
765 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
766 
767 		parisc_terminate("Unexpected interruption", regs, code, 0);
768 		/* NOT REACHED */
769 	}
770 
771 	if (user_mode(regs)) {
772 	    if ((fault_space >> SPACEID_SHIFT) != (regs->sr[7] >> SPACEID_SHIFT)) {
773 		parisc_printk_ratelimited(0, regs, KERN_DEBUG
774 				"User fault %d on space 0x%08lx, pid=%d command='%s'\n",
775 				code, fault_space,
776 				task_pid_nr(current), current->comm);
777 		force_sig_fault(SIGSEGV, SEGV_MAPERR,
778 				(void __user *)regs->ior);
779 		return;
780 	    }
781 	}
782 	else {
783 
784 	    /*
785 	     * The kernel should never fault on its own address space,
786 	     * unless pagefault_disable() was called before.
787 	     */
788 
789 	    if (faulthandler_disabled() || fault_space == 0)
790 	    {
791 		/* Clean up and return if in exception table. */
792 		if (fixup_exception(regs))
793 			return;
794 		pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
795 		parisc_terminate("Kernel Fault", regs, code, fault_address);
796 	    }
797 	}
798 
799 	do_page_fault(regs, code, fault_address);
800 }
801 
802 
initialize_ivt(const void * iva)803 void __init initialize_ivt(const void *iva)
804 {
805 	extern u32 os_hpmc_size;
806 	extern const u32 os_hpmc[];
807 
808 	int i;
809 	u32 check = 0;
810 	u32 *ivap;
811 	u32 *hpmcp;
812 	u32 length, instr;
813 
814 	if (strcmp((const char *)iva, "cows can fly"))
815 		panic("IVT invalid");
816 
817 	ivap = (u32 *)iva;
818 
819 	for (i = 0; i < 8; i++)
820 	    *ivap++ = 0;
821 
822 	/*
823 	 * Use PDC_INSTR firmware function to get instruction that invokes
824 	 * PDCE_CHECK in HPMC handler.  See programming note at page 1-31 of
825 	 * the PA 1.1 Firmware Architecture document.
826 	 */
827 	if (pdc_instr(&instr) == PDC_OK)
828 		ivap[0] = instr;
829 
830 	/*
831 	 * Rules for the checksum of the HPMC handler:
832 	 * 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
833 	 *    its own IVA).
834 	 * 2. The word at IVA + 32 is nonzero.
835 	 * 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
836 	 *    Address (IVA + 56) are word-aligned.
837 	 * 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
838 	 *    the Length/4 words starting at Address is zero.
839 	 */
840 
841 	/* Setup IVA and compute checksum for HPMC handler */
842 	ivap[6] = (u32)__pa(os_hpmc);
843 	length = os_hpmc_size;
844 	ivap[7] = length;
845 
846 	hpmcp = (u32 *)os_hpmc;
847 
848 	for (i=0; i<length/4; i++)
849 	    check += *hpmcp++;
850 
851 	for (i=0; i<8; i++)
852 	    check += ivap[i];
853 
854 	ivap[5] = -check;
855 }
856 
857 
858 /* early_trap_init() is called before we set up kernel mappings and
859  * write-protect the kernel */
early_trap_init(void)860 void  __init early_trap_init(void)
861 {
862 	extern const void fault_vector_20;
863 
864 #ifndef CONFIG_64BIT
865 	extern const void fault_vector_11;
866 	initialize_ivt(&fault_vector_11);
867 #endif
868 
869 	initialize_ivt(&fault_vector_20);
870 }
871 
trap_init(void)872 void __init trap_init(void)
873 {
874 }
875