1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 */
14 #include <linux/bug.h>
15 #include <linux/compiler.h>
16 #include <linux/context_tracking.h>
17 #include <linux/kexec.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mm.h>
22 #include <linux/sched.h>
23 #include <linux/smp.h>
24 #include <linux/spinlock.h>
25 #include <linux/kallsyms.h>
26 #include <linux/bootmem.h>
27 #include <linux/interrupt.h>
28 #include <linux/ptrace.h>
29 #include <linux/kgdb.h>
30 #include <linux/kdebug.h>
31 #include <linux/kprobes.h>
32 #include <linux/notifier.h>
33 #include <linux/kdb.h>
34 #include <linux/irq.h>
35 #include <linux/perf_event.h>
36
37 #include <asm/bootinfo.h>
38 #include <asm/branch.h>
39 #include <asm/break.h>
40 #include <asm/cop2.h>
41 #include <asm/cpu.h>
42 #include <asm/dsp.h>
43 #include <asm/fpu.h>
44 #include <asm/fpu_emulator.h>
45 #include <asm/idle.h>
46 #include <asm/mipsregs.h>
47 #include <asm/mipsmtregs.h>
48 #include <asm/module.h>
49 #include <asm/msa.h>
50 #include <asm/pgtable.h>
51 #include <asm/ptrace.h>
52 #include <asm/sections.h>
53 #include <asm/tlbdebug.h>
54 #include <asm/traps.h>
55 #include <asm/uaccess.h>
56 #include <asm/watch.h>
57 #include <asm/mmu_context.h>
58 #include <asm/types.h>
59 #include <asm/stacktrace.h>
60 #include <asm/uasm.h>
61
62 extern void check_wait(void);
63 extern asmlinkage void rollback_handle_int(void);
64 extern asmlinkage void handle_int(void);
65 extern u32 handle_tlbl[];
66 extern u32 handle_tlbs[];
67 extern u32 handle_tlbm[];
68 extern asmlinkage void handle_adel(void);
69 extern asmlinkage void handle_ades(void);
70 extern asmlinkage void handle_ibe(void);
71 extern asmlinkage void handle_dbe(void);
72 extern asmlinkage void handle_sys(void);
73 extern asmlinkage void handle_bp(void);
74 extern asmlinkage void handle_ri(void);
75 extern asmlinkage void handle_ri_rdhwr_vivt(void);
76 extern asmlinkage void handle_ri_rdhwr(void);
77 extern asmlinkage void handle_cpu(void);
78 extern asmlinkage void handle_ov(void);
79 extern asmlinkage void handle_tr(void);
80 extern asmlinkage void handle_msa_fpe(void);
81 extern asmlinkage void handle_fpe(void);
82 extern asmlinkage void handle_ftlb(void);
83 extern asmlinkage void handle_msa(void);
84 extern asmlinkage void handle_mdmx(void);
85 extern asmlinkage void handle_watch(void);
86 extern asmlinkage void handle_mt(void);
87 extern asmlinkage void handle_dsp(void);
88 extern asmlinkage void handle_mcheck(void);
89 extern asmlinkage void handle_reserved(void);
90
91 void (*board_be_init)(void);
92 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
93 void (*board_nmi_handler_setup)(void);
94 void (*board_ejtag_handler_setup)(void);
95 void (*board_bind_eic_interrupt)(int irq, int regset);
96 void (*board_ebase_setup)(void);
97 void __cpuinitdata(*board_cache_error_setup)(void);
98
show_raw_backtrace(unsigned long reg29)99 static void show_raw_backtrace(unsigned long reg29)
100 {
101 unsigned long *sp = (unsigned long *)(reg29 & ~3);
102 unsigned long addr;
103
104 printk("Call Trace:");
105 #ifdef CONFIG_KALLSYMS
106 printk("\n");
107 #endif
108 while (!kstack_end(sp)) {
109 unsigned long __user *p =
110 (unsigned long __user *)(unsigned long)sp++;
111 if (__get_user(addr, p)) {
112 printk(" (Bad stack address)");
113 break;
114 }
115 if (__kernel_text_address(addr))
116 print_ip_sym(addr);
117 }
118 printk("\n");
119 }
120
121 #ifdef CONFIG_KALLSYMS
122 int raw_show_trace;
set_raw_show_trace(char * str)123 static int __init set_raw_show_trace(char *str)
124 {
125 raw_show_trace = 1;
126 return 1;
127 }
128 __setup("raw_show_trace", set_raw_show_trace);
129 #endif
130
show_backtrace(struct task_struct * task,const struct pt_regs * regs)131 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
132 {
133 unsigned long sp = regs->regs[29];
134 unsigned long ra = regs->regs[31];
135 unsigned long pc = regs->cp0_epc;
136
137 if (!task)
138 task = current;
139
140 if (raw_show_trace || !__kernel_text_address(pc)) {
141 show_raw_backtrace(sp);
142 return;
143 }
144 printk("Call Trace:\n");
145 do {
146 print_ip_sym(pc);
147 pc = unwind_stack(task, &sp, pc, &ra);
148 } while (pc);
149 printk("\n");
150 }
151
152 /*
153 * This routine abuses get_user()/put_user() to reference pointers
154 * with at least a bit of error checking ...
155 */
show_stacktrace(struct task_struct * task,const struct pt_regs * regs)156 static void show_stacktrace(struct task_struct *task,
157 const struct pt_regs *regs)
158 {
159 const int field = 2 * sizeof(unsigned long);
160 long stackdata;
161 int i;
162 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
163
164 printk("Stack :");
165 i = 0;
166 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
167 if (i && ((i % (64 / field)) == 0))
168 printk("\n ");
169 if (i > 39) {
170 printk(" ...");
171 break;
172 }
173
174 if (__get_user(stackdata, sp++)) {
175 printk(" (Bad stack address)");
176 break;
177 }
178
179 printk(" %0*lx", field, stackdata);
180 i++;
181 }
182 printk("\n");
183 show_backtrace(task, regs);
184 }
185
show_stack(struct task_struct * task,unsigned long * sp)186 void show_stack(struct task_struct *task, unsigned long *sp)
187 {
188 struct pt_regs regs;
189 if (sp) {
190 regs.regs[29] = (unsigned long)sp;
191 regs.regs[31] = 0;
192 regs.cp0_epc = 0;
193 } else {
194 if (task && task != current) {
195 regs.regs[29] = task->thread.reg29;
196 regs.regs[31] = 0;
197 regs.cp0_epc = task->thread.reg31;
198 #ifdef CONFIG_KGDB_KDB
199 } else if (atomic_read(&kgdb_active) != -1 &&
200 kdb_current_regs) {
201 memcpy(®s, kdb_current_regs, sizeof(regs));
202 #endif /* CONFIG_KGDB_KDB */
203 } else {
204 prepare_frametrace(®s);
205 }
206 }
207 show_stacktrace(task, ®s);
208 }
209
show_code(unsigned int __user * pc)210 static void show_code(unsigned int __user *pc)
211 {
212 long i;
213 unsigned short __user *pc16 = NULL;
214
215 printk("\nCode:");
216
217 if ((unsigned long)pc & 1)
218 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
219 for(i = -3 ; i < 6 ; i++) {
220 unsigned int insn;
221 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
222 printk(" (Bad address in epc)\n");
223 break;
224 }
225 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
226 }
227 }
228
__show_regs(const struct pt_regs * regs)229 static void __show_regs(const struct pt_regs *regs)
230 {
231 const int field = 2 * sizeof(unsigned long);
232 unsigned int cause = regs->cp0_cause;
233 int i;
234
235 show_regs_print_info(KERN_DEFAULT);
236
237 /*
238 * Saved main processor registers
239 */
240 for (i = 0; i < 32; ) {
241 if ((i % 4) == 0)
242 printk("$%2d :", i);
243 if (i == 0)
244 printk(" %0*lx", field, 0UL);
245 else if (i == 26 || i == 27)
246 printk(" %*s", field, "");
247 else
248 printk(" %0*lx", field, regs->regs[i]);
249
250 i++;
251 if ((i % 4) == 0)
252 printk("\n");
253 }
254
255 #ifdef CONFIG_CPU_HAS_SMARTMIPS
256 printk("Acx : %0*lx\n", field, regs->acx);
257 #endif
258 printk("Hi : %0*lx\n", field, regs->hi);
259 printk("Lo : %0*lx\n", field, regs->lo);
260
261 /*
262 * Saved cp0 registers
263 */
264 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
265 (void *) regs->cp0_epc);
266 printk(" %s\n", print_tainted());
267 printk("ra : %0*lx %pS\n", field, regs->regs[31],
268 (void *) regs->regs[31]);
269
270 printk("Status: %08x ", (uint32_t) regs->cp0_status);
271
272 if (raw_current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
273 if (regs->cp0_status & ST0_KUO)
274 printk("KUo ");
275 if (regs->cp0_status & ST0_IEO)
276 printk("IEo ");
277 if (regs->cp0_status & ST0_KUP)
278 printk("KUp ");
279 if (regs->cp0_status & ST0_IEP)
280 printk("IEp ");
281 if (regs->cp0_status & ST0_KUC)
282 printk("KUc ");
283 if (regs->cp0_status & ST0_IEC)
284 printk("IEc ");
285 } else {
286 if (regs->cp0_status & ST0_KX)
287 printk("KX ");
288 if (regs->cp0_status & ST0_SX)
289 printk("SX ");
290 if (regs->cp0_status & ST0_UX)
291 printk("UX ");
292 switch (regs->cp0_status & ST0_KSU) {
293 case KSU_USER:
294 printk("USER ");
295 break;
296 case KSU_SUPERVISOR:
297 printk("SUPERVISOR ");
298 break;
299 case KSU_KERNEL:
300 printk("KERNEL ");
301 break;
302 default:
303 printk("BAD_MODE ");
304 break;
305 }
306 if (regs->cp0_status & ST0_ERL)
307 printk("ERL ");
308 if (regs->cp0_status & ST0_EXL)
309 printk("EXL ");
310 if (regs->cp0_status & ST0_IE)
311 printk("IE ");
312 }
313 printk("\n");
314
315 printk("Cause : %08x\n", cause);
316
317 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
318 if (1 <= cause && cause <= 5)
319 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
320
321 printk("PrId : %08x (%s)\n", read_c0_prid(),
322 cpu_name_string());
323 }
324
325 /*
326 * FIXME: really the generic show_regs should take a const pointer argument.
327 */
show_regs(struct pt_regs * regs)328 void show_regs(struct pt_regs *regs)
329 {
330 __show_regs((struct pt_regs *)regs);
331 }
332
show_registers(struct pt_regs * regs)333 void show_registers(struct pt_regs *regs)
334 {
335 const int field = 2 * sizeof(unsigned long);
336 mm_segment_t old_fs = get_fs();
337
338 __show_regs(regs);
339 print_modules();
340 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
341 current->comm, current->pid, current_thread_info(), current,
342 field, current_thread_info()->tp_value);
343 if (cpu_has_userlocal) {
344 unsigned long tls;
345
346 tls = read_c0_userlocal();
347 if (tls != current_thread_info()->tp_value)
348 printk("*HwTLS: %0*lx\n", field, tls);
349 }
350
351 if (!user_mode(regs))
352 set_fs(KERNEL_DS);
353 show_stacktrace(current, regs);
354 show_code((unsigned int __user *) regs->cp0_epc);
355 printk("\n");
356 set_fs(old_fs);
357 }
358
regs_to_trapnr(struct pt_regs * regs)359 static int regs_to_trapnr(struct pt_regs *regs)
360 {
361 return (regs->cp0_cause >> 2) & 0x1f;
362 }
363
364 static DEFINE_RAW_SPINLOCK(die_lock);
365
die(const char * str,struct pt_regs * regs)366 void __noreturn die(const char *str, struct pt_regs *regs)
367 {
368 static int die_counter;
369 int sig = SIGSEGV;
370 #ifdef CONFIG_MIPS_MT_SMTC
371 unsigned long dvpret;
372 #endif /* CONFIG_MIPS_MT_SMTC */
373
374 oops_enter();
375
376 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
377 sig = 0;
378
379 console_verbose();
380 raw_spin_lock_irq(&die_lock);
381 #ifdef CONFIG_MIPS_MT_SMTC
382 dvpret = dvpe();
383 #endif /* CONFIG_MIPS_MT_SMTC */
384 bust_spinlocks(1);
385 #ifdef CONFIG_MIPS_MT_SMTC
386 mips_mt_regdump(dvpret);
387 #endif /* CONFIG_MIPS_MT_SMTC */
388
389 printk("%s[#%d]:\n", str, ++die_counter);
390 show_registers(regs);
391 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
392 raw_spin_unlock_irq(&die_lock);
393
394 oops_exit();
395
396 if (in_interrupt())
397 panic("Fatal exception in interrupt");
398
399 if (panic_on_oops) {
400 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
401 ssleep(5);
402 panic("Fatal exception");
403 }
404
405 if (regs && kexec_should_crash(current))
406 crash_kexec(regs);
407
408 do_exit(sig);
409 }
410
411 extern struct exception_table_entry __start___dbe_table[];
412 extern struct exception_table_entry __stop___dbe_table[];
413
414 __asm__(
415 " .section __dbe_table, \"a\"\n"
416 " .previous \n");
417
418 /* Given an address, look for it in the exception tables. */
search_dbe_tables(unsigned long addr)419 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
420 {
421 const struct exception_table_entry *e;
422
423 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
424 if (!e)
425 e = search_module_dbetables(addr);
426 return e;
427 }
428
do_be(struct pt_regs * regs)429 asmlinkage void do_be(struct pt_regs *regs)
430 {
431 const int field = 2 * sizeof(unsigned long);
432 const struct exception_table_entry *fixup = NULL;
433 int data = regs->cp0_cause & 4;
434 int action = MIPS_BE_FATAL;
435 enum ctx_state prev_state;
436
437 prev_state = exception_enter();
438 /* XXX For now. Fixme, this searches the wrong table ... */
439 if (data && !user_mode(regs))
440 fixup = search_dbe_tables(exception_epc(regs));
441
442 if (fixup)
443 action = MIPS_BE_FIXUP;
444
445 if (board_be_handler)
446 action = board_be_handler(regs, fixup != NULL);
447
448 switch (action) {
449 case MIPS_BE_DISCARD:
450 goto out;
451 case MIPS_BE_FIXUP:
452 if (fixup) {
453 regs->cp0_epc = fixup->nextinsn;
454 goto out;
455 }
456 break;
457 default:
458 break;
459 }
460
461 /*
462 * Assume it would be too dangerous to continue ...
463 */
464 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
465 data ? "Data" : "Instruction",
466 field, regs->cp0_epc, field, regs->regs[31]);
467 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
468 == NOTIFY_STOP)
469 goto out;
470
471 die_if_kernel("Oops", regs);
472 force_sig(SIGBUS, current);
473
474 out:
475 exception_exit(prev_state);
476 }
477
478 /*
479 * ll/sc, rdhwr, sync emulation
480 */
481
482 #define OPCODE 0xfc000000
483 #define BASE 0x03e00000
484 #define RT 0x001f0000
485 #define OFFSET 0x0000ffff
486 #define LL 0xc0000000
487 #define SC 0xe0000000
488 #define SPEC0 0x00000000
489 #define SPEC3 0x7c000000
490 #define RD 0x0000f800
491 #define FUNC 0x0000003f
492 #define SYNC 0x0000000f
493 #define RDHWR 0x0000003b
494
495 /* microMIPS definitions */
496 #define MM_POOL32A_FUNC 0xfc00ffff
497 #define MM_RDHWR 0x00006b3c
498 #define MM_RS 0x001f0000
499 #define MM_RT 0x03e00000
500
501 /*
502 * The ll_bit is cleared by r*_switch.S
503 */
504
505 unsigned int ll_bit;
506 struct task_struct *ll_task;
507
simulate_ll(struct pt_regs * regs,unsigned int opcode)508 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
509 {
510 unsigned long value, __user *vaddr;
511 long offset;
512
513 /*
514 * analyse the ll instruction that just caused a ri exception
515 * and put the referenced address to addr.
516 */
517
518 /* sign extend offset */
519 offset = opcode & OFFSET;
520 offset <<= 16;
521 offset >>= 16;
522
523 vaddr = (unsigned long __user *)
524 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
525
526 if ((unsigned long)vaddr & 3)
527 return SIGBUS;
528 if (get_user(value, vaddr))
529 return SIGSEGV;
530
531 preempt_disable();
532
533 if (ll_task == NULL || ll_task == current) {
534 ll_bit = 1;
535 } else {
536 ll_bit = 0;
537 }
538 ll_task = current;
539
540 preempt_enable();
541
542 regs->regs[(opcode & RT) >> 16] = value;
543
544 return 0;
545 }
546
simulate_sc(struct pt_regs * regs,unsigned int opcode)547 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
548 {
549 unsigned long __user *vaddr;
550 unsigned long reg;
551 long offset;
552
553 /*
554 * analyse the sc instruction that just caused a ri exception
555 * and put the referenced address to addr.
556 */
557
558 /* sign extend offset */
559 offset = opcode & OFFSET;
560 offset <<= 16;
561 offset >>= 16;
562
563 vaddr = (unsigned long __user *)
564 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
565 reg = (opcode & RT) >> 16;
566
567 if ((unsigned long)vaddr & 3)
568 return SIGBUS;
569
570 preempt_disable();
571
572 if (ll_bit == 0 || ll_task != current) {
573 regs->regs[reg] = 0;
574 preempt_enable();
575 return 0;
576 }
577
578 preempt_enable();
579
580 if (put_user(regs->regs[reg], vaddr))
581 return SIGSEGV;
582
583 regs->regs[reg] = 1;
584
585 return 0;
586 }
587
588 /*
589 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
590 * opcodes are supposed to result in coprocessor unusable exceptions if
591 * executed on ll/sc-less processors. That's the theory. In practice a
592 * few processors such as NEC's VR4100 throw reserved instruction exceptions
593 * instead, so we're doing the emulation thing in both exception handlers.
594 */
simulate_llsc(struct pt_regs * regs,unsigned int opcode)595 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
596 {
597 if ((opcode & OPCODE) == LL) {
598 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
599 1, regs, 0);
600 return simulate_ll(regs, opcode);
601 }
602 if ((opcode & OPCODE) == SC) {
603 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
604 1, regs, 0);
605 return simulate_sc(regs, opcode);
606 }
607
608 return -1; /* Must be something else ... */
609 }
610
611 /*
612 * Simulate trapping 'rdhwr' instructions to provide user accessible
613 * registers not implemented in hardware.
614 */
simulate_rdhwr(struct pt_regs * regs,int rd,int rt)615 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
616 {
617 struct thread_info *ti = task_thread_info(current);
618
619 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
620 1, regs, 0);
621 switch (rd) {
622 case 0: /* CPU number */
623 regs->regs[rt] = smp_processor_id();
624 return 0;
625 case 1: /* SYNCI length */
626 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
627 current_cpu_data.icache.linesz);
628 return 0;
629 case 2: /* Read count register */
630 regs->regs[rt] = read_c0_count();
631 return 0;
632 case 3: /* Count register resolution */
633 switch (current_cpu_data.cputype) {
634 case CPU_20KC:
635 case CPU_25KF:
636 regs->regs[rt] = 1;
637 break;
638 default:
639 regs->regs[rt] = 2;
640 }
641 return 0;
642 case 29:
643 regs->regs[rt] = ti->tp_value;
644 return 0;
645 default:
646 return -1;
647 }
648 }
649
simulate_rdhwr_normal(struct pt_regs * regs,unsigned int opcode)650 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
651 {
652 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
653 int rd = (opcode & RD) >> 11;
654 int rt = (opcode & RT) >> 16;
655
656 simulate_rdhwr(regs, rd, rt);
657 return 0;
658 }
659
660 /* Not ours. */
661 return -1;
662 }
663
simulate_rdhwr_mm(struct pt_regs * regs,unsigned short opcode)664 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
665 {
666 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
667 int rd = (opcode & MM_RS) >> 16;
668 int rt = (opcode & MM_RT) >> 21;
669 simulate_rdhwr(regs, rd, rt);
670 return 0;
671 }
672
673 /* Not ours. */
674 return -1;
675 }
676
simulate_sync(struct pt_regs * regs,unsigned int opcode)677 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
678 {
679 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
680 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
681 1, regs, 0);
682 return 0;
683 }
684
685 return -1; /* Must be something else ... */
686 }
687
do_ov(struct pt_regs * regs)688 asmlinkage void do_ov(struct pt_regs *regs)
689 {
690 enum ctx_state prev_state;
691 siginfo_t info;
692
693 prev_state = exception_enter();
694 die_if_kernel("Integer overflow", regs);
695
696 info.si_code = FPE_INTOVF;
697 info.si_signo = SIGFPE;
698 info.si_errno = 0;
699 info.si_addr = (void __user *) regs->cp0_epc;
700 force_sig_info(SIGFPE, &info, current);
701 exception_exit(prev_state);
702 }
703
process_fpemu_return(int sig,void __user * fault_addr)704 int process_fpemu_return(int sig, void __user *fault_addr)
705 {
706 /*
707 * We can't allow the emulated instruction to leave any of the cause
708 * bits set in FCSR. If they were then the kernel would take an FP
709 * exception when restoring FP context.
710 */
711 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
712
713 if (sig == SIGSEGV || sig == SIGBUS) {
714 struct siginfo si = {0};
715 si.si_addr = fault_addr;
716 si.si_signo = sig;
717 if (sig == SIGSEGV) {
718 if (find_vma(current->mm, (unsigned long)fault_addr))
719 si.si_code = SEGV_ACCERR;
720 else
721 si.si_code = SEGV_MAPERR;
722 } else {
723 si.si_code = BUS_ADRERR;
724 }
725 force_sig_info(sig, &si, current);
726 return 1;
727 } else if (sig) {
728 force_sig(sig, current);
729 return 1;
730 } else {
731 return 0;
732 }
733 }
734
735 /*
736 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
737 */
do_fpe(struct pt_regs * regs,unsigned long fcr31)738 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
739 {
740 enum ctx_state prev_state;
741 siginfo_t info = {0};
742
743 prev_state = exception_enter();
744 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
745 == NOTIFY_STOP)
746 goto out;
747 die_if_kernel("FP exception in kernel code", regs);
748
749 if (fcr31 & FPU_CSR_UNI_X) {
750 int sig;
751 void __user *fault_addr = NULL;
752
753 if (!used_math())
754 init_fpu();
755 /*
756 * Unimplemented operation exception. If we've got the full
757 * software emulator on-board, let's use it...
758 *
759 * Force FPU to dump state into task/thread context. We're
760 * moving a lot of data here for what is probably a single
761 * instruction, but the alternative is to pre-decode the FP
762 * register operands before invoking the emulator, which seems
763 * a bit extreme for what should be an infrequent event.
764 */
765 /* Ensure 'resume' not overwrite saved fp context again. */
766 lose_fpu(1);
767
768 /* Run the emulator */
769 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
770 &fault_addr);
771
772 /* If something went wrong, signal */
773 process_fpemu_return(sig, fault_addr);
774
775 /* Restore the hardware register state */
776 own_fpu(1); /* Using the FPU again. */
777
778 goto out;
779 } else if (fcr31 & FPU_CSR_INV_X)
780 info.si_code = FPE_FLTINV;
781 else if (fcr31 & FPU_CSR_DIV_X)
782 info.si_code = FPE_FLTDIV;
783 else if (fcr31 & FPU_CSR_OVF_X)
784 info.si_code = FPE_FLTOVF;
785 else if (fcr31 & FPU_CSR_UDF_X)
786 info.si_code = FPE_FLTUND;
787 else if (fcr31 & FPU_CSR_INE_X)
788 info.si_code = FPE_FLTRES;
789 else
790 info.si_code = __SI_FAULT;
791 info.si_signo = SIGFPE;
792 info.si_errno = 0;
793 info.si_addr = (void __user *) regs->cp0_epc;
794 force_sig_info(SIGFPE, &info, current);
795
796 out:
797 exception_exit(prev_state);
798 }
799
do_trap_or_bp(struct pt_regs * regs,unsigned int code,const char * str)800 void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
801 const char *str)
802 {
803 siginfo_t info;
804 char b[40];
805
806 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
807 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
808 return;
809 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
810
811 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
812 return;
813
814 /*
815 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
816 * insns, even for trap and break codes that indicate arithmetic
817 * failures. Weird ...
818 * But should we continue the brokenness??? --macro
819 */
820 switch (code) {
821 case BRK_OVERFLOW:
822 case BRK_DIVZERO:
823 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
824 die_if_kernel(b, regs);
825 if (code == BRK_DIVZERO)
826 info.si_code = FPE_INTDIV;
827 else
828 info.si_code = FPE_INTOVF;
829 info.si_signo = SIGFPE;
830 info.si_errno = 0;
831 info.si_addr = (void __user *) regs->cp0_epc;
832 force_sig_info(SIGFPE, &info, current);
833 break;
834 case BRK_BUG:
835 die_if_kernel("Kernel bug detected", regs);
836 force_sig(SIGTRAP, current);
837 break;
838 case BRK_MEMU:
839 /*
840 * Address errors may be deliberately induced by the FPU
841 * emulator to retake control of the CPU after executing the
842 * instruction in the delay slot of an emulated branch.
843 *
844 * Terminate if exception was recognized as a delay slot return
845 * otherwise handle as normal.
846 */
847 if (do_dsemulret(regs))
848 return;
849
850 die_if_kernel("Math emu break/trap", regs);
851 force_sig(SIGTRAP, current);
852 break;
853 default:
854 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
855 die_if_kernel(b, regs);
856 force_sig(SIGTRAP, current);
857 }
858 }
859
do_bp(struct pt_regs * regs)860 asmlinkage void do_bp(struct pt_regs *regs)
861 {
862 unsigned int opcode, bcode;
863 enum ctx_state prev_state;
864 unsigned long epc;
865 u16 instr[2];
866 #ifdef CONFIG_EVA
867 mm_segment_t seg;
868
869 seg = get_fs();
870 if (!user_mode(regs))
871 set_fs(KERNEL_DS);
872 #endif
873
874 prev_state = exception_enter();
875 if (get_isa16_mode(regs->cp0_epc)) {
876 /* Calculate EPC. */
877 epc = exception_epc(regs);
878 if (cpu_has_mmips) {
879 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
880 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
881 goto out_sigsegv;
882 opcode = (instr[0] << 16) | instr[1];
883 } else {
884 /* MIPS16e mode */
885 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
886 goto out_sigsegv;
887 bcode = (instr[0] >> 6) & 0x3f;
888 do_trap_or_bp(regs, bcode, "Break");
889 #ifdef CONFIG_EVA
890 set_fs(seg);
891 #endif
892 goto out;
893 }
894 } else {
895 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
896 goto out_sigsegv;
897 }
898
899 /*
900 * There is the ancient bug in the MIPS assemblers that the break
901 * code starts left to bit 16 instead to bit 6 in the opcode.
902 * Gas is bug-compatible, but not always, grrr...
903 * We handle both cases with a simple heuristics. --macro
904 */
905 bcode = ((opcode >> 6) & ((1 << 20) - 1));
906 if (bcode >= (1 << 10))
907 bcode >>= 10;
908
909 /*
910 * notify the kprobe handlers, if instruction is likely to
911 * pertain to them.
912 */
913 switch (bcode) {
914 case BRK_KPROBE_BP:
915 if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) {
916 #ifdef CONFIG_EVA
917 set_fs(seg);
918 #endif
919 goto out;
920 } else
921 break;
922 case BRK_KPROBE_SSTEPBP:
923 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) {
924 #ifdef CONFIG_EVA
925 set_fs(seg);
926 #endif
927 goto out;
928 } else
929 break;
930 default:
931 break;
932 }
933
934 do_trap_or_bp(regs, bcode, "Break");
935 #ifdef CONFIG_EVA
936 set_fs(seg);
937 #endif
938 out:
939 exception_exit(prev_state);
940 return;
941
942 out_sigsegv:
943 #ifdef CONFIG_EVA
944 set_fs(seg);
945 #endif
946 force_sig(SIGSEGV, current);
947 goto out;
948 }
949
do_tr(struct pt_regs * regs)950 asmlinkage void do_tr(struct pt_regs *regs)
951 {
952 u32 opcode, tcode = 0;
953 enum ctx_state prev_state;
954 u16 instr[2];
955 unsigned long epc = msk_isa16_mode(exception_epc(regs));
956 #ifdef CONFIG_EVA
957 mm_segment_t seg;
958
959 seg = get_fs();
960 if (!user_mode(regs))
961 set_fs(KERNEL_DS);
962 #endif
963
964 prev_state = exception_enter();
965 if (get_isa16_mode(regs->cp0_epc)) {
966 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
967 __get_user(instr[1], (u16 __user *)(epc + 2)))
968 goto out_sigsegv;
969 opcode = (instr[0] << 16) | instr[1];
970 /* Immediate versions don't provide a code. */
971 if (!(opcode & OPCODE))
972 tcode = (opcode >> 12) & ((1 << 4) - 1);
973 } else {
974 if (__get_user(opcode, (u32 __user *)epc))
975 goto out_sigsegv;
976 /* Immediate versions don't provide a code. */
977 if (!(opcode & OPCODE))
978 tcode = (opcode >> 6) & ((1 << 10) - 1);
979 }
980
981 do_trap_or_bp(regs, tcode, "Trap");
982 #ifdef CONFIG_EVA
983 set_fs(seg);
984 #endif
985 out:
986 exception_exit(prev_state);
987 return;
988
989 out_sigsegv:
990 #ifdef CONFIG_EVA
991 set_fs(seg);
992 #endif
993 force_sig(SIGSEGV, current);
994 goto out;
995 }
996
do_ri(struct pt_regs * regs)997 asmlinkage void do_ri(struct pt_regs *regs)
998 {
999 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1000 unsigned long old_epc = regs->cp0_epc;
1001 unsigned long old31 = regs->regs[31];
1002 enum ctx_state prev_state;
1003 unsigned int opcode = 0;
1004 int status = -1;
1005
1006 #ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
1007 if (mipsr2_emulation && likely(user_mode(regs))) {
1008 if (likely(get_user(opcode, epc) >= 0)) {
1009 status = mipsr2_decoder(regs, opcode);
1010 switch (status) {
1011 case SIGEMT:
1012 case 0:
1013 return;
1014 case SIGILL:
1015 break;
1016 default:
1017 process_fpemu_return(status, (void __user *)current->thread.cp0_baduaddr);
1018 return;
1019 }
1020 }
1021 }
1022 #endif
1023
1024 prev_state = exception_enter();
1025 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
1026 == NOTIFY_STOP)
1027 goto out;
1028
1029 die_if_kernel("Reserved instruction in kernel code", regs);
1030
1031 if (unlikely(compute_return_epc(regs) < 0))
1032 goto out;
1033
1034 if (get_isa16_mode(regs->cp0_epc)) {
1035 unsigned short mmop[2] = { 0 };
1036
1037 if (unlikely(get_user(mmop[0], epc) < 0))
1038 status = SIGSEGV;
1039 if (unlikely(get_user(mmop[1], epc) < 0))
1040 status = SIGSEGV;
1041 opcode = (mmop[0] << 16) | mmop[1];
1042
1043 if (status < 0)
1044 status = simulate_rdhwr_mm(regs, opcode);
1045 } else {
1046 if (unlikely(get_user(opcode, epc) < 0))
1047 status = SIGSEGV;
1048
1049 if (!cpu_has_llsc && status < 0)
1050 status = simulate_llsc(regs, opcode);
1051
1052 if (status < 0)
1053 status = simulate_rdhwr_normal(regs, opcode);
1054
1055 if (status < 0)
1056 status = simulate_sync(regs, opcode);
1057 }
1058
1059 if (status < 0)
1060 status = SIGILL;
1061
1062 if (unlikely(status > 0)) {
1063 regs->cp0_epc = old_epc; /* Undo skip-over. */
1064 regs->regs[31] = old31;
1065 force_sig(status, current);
1066 }
1067
1068 out:
1069 exception_exit(prev_state);
1070 }
1071
1072 /*
1073 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1074 * emulated more than some threshold number of instructions, force migration to
1075 * a "CPU" that has FP support.
1076 */
mt_ase_fp_affinity(void)1077 static void mt_ase_fp_affinity(void)
1078 {
1079 #ifdef CONFIG_MIPS_MT_FPAFF
1080 if (mt_fpemul_threshold > 0 &&
1081 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1082 /*
1083 * If there's no FPU present, or if the application has already
1084 * restricted the allowed set to exclude any CPUs with FPUs,
1085 * we'll skip the procedure.
1086 */
1087 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1088 cpumask_t tmask;
1089
1090 current->thread.user_cpus_allowed
1091 = current->cpus_allowed;
1092 cpus_and(tmask, current->cpus_allowed,
1093 mt_fpu_cpumask);
1094 set_cpus_allowed_ptr(current, &tmask);
1095 set_thread_flag(TIF_FPUBOUND);
1096 }
1097 }
1098 #endif /* CONFIG_MIPS_MT_FPAFF */
1099 }
1100
1101 /*
1102 * No lock; only written during early bootup by CPU 0.
1103 */
1104 static RAW_NOTIFIER_HEAD(cu2_chain);
1105
register_cu2_notifier(struct notifier_block * nb)1106 int __ref register_cu2_notifier(struct notifier_block *nb)
1107 {
1108 return raw_notifier_chain_register(&cu2_chain, nb);
1109 }
1110
cu2_notifier_call_chain(unsigned long val,void * v)1111 int cu2_notifier_call_chain(unsigned long val, void *v)
1112 {
1113 return raw_notifier_call_chain(&cu2_chain, val, v);
1114 }
1115
default_cu2_call(struct notifier_block * nfb,unsigned long action,void * data)1116 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1117 void *data)
1118 {
1119 struct pt_regs *regs = data;
1120
1121 switch (action) {
1122 default:
1123 die_if_kernel("Unhandled kernel unaligned access or invalid "
1124 "instruction", regs);
1125 /* Fall through */
1126
1127 case CU2_EXCEPTION:
1128 force_sig(SIGILL, current);
1129 }
1130
1131 return NOTIFY_OK;
1132 }
1133
enable_restore_fp_context(int msa)1134 static int enable_restore_fp_context(int msa)
1135 {
1136 int err, was_fpu_owner, prior_msa;
1137
1138 if (!used_math()) {
1139 /* First time FP context user. */
1140 if (msa && !raw_cpu_has_fpu) {
1141 force_sig(SIGFPE, current);
1142 return(-1);
1143 }
1144 preempt_disable();
1145 err = init_fpu();
1146 if (msa && !err) {
1147 enable_msa();
1148 _init_msa_upper();
1149 set_thread_flag(TIF_USEDMSA);
1150 set_thread_flag(TIF_MSA_CTX_LIVE);
1151 }
1152 preempt_enable();
1153 if (err && raw_cpu_has_fpu)
1154 #ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
1155 if (!mipsr2_emulation)
1156 #endif
1157 {
1158 force_sig(SIGFPE, current);
1159 return(-1);
1160 }
1161 return err;
1162 }
1163
1164 /*
1165 * This task has formerly used the FP context.
1166 *
1167 * If this thread has no live MSA vector context then we can simply
1168 * restore the scalar FP context. If it has live MSA vector context
1169 * (that is, it has or may have used MSA since last performing a
1170 * function call) then we'll need to restore the vector context. This
1171 * applies even if we're currently only executing a scalar FP
1172 * instruction. This is because if we were to later execute an MSA
1173 * instruction then we'd either have to:
1174 *
1175 * - Restore the vector context & clobber any registers modified by
1176 * scalar FP instructions between now & then.
1177 *
1178 * or
1179 *
1180 * - Not restore the vector context & lose the most significant bits
1181 * of all vector registers.
1182 *
1183 * Neither of those options is acceptable. We cannot restore the least
1184 * significant bits of the registers now & only restore the most
1185 * significant bits later because the most significant bits of any
1186 * vector registers whose aliased FP register is modified now will have
1187 * been zeroed. We'd have no way to know that when restoring the vector
1188 * context & thus may load an outdated value for the most significant
1189 * bits of a vector register.
1190 *
1191 * Note LY22: It is possible to restore a partial MSA context via "insert"
1192 * This can be used to restore an upper parts of MSA.
1193 * But that upper parts should exists and be saved.
1194 */
1195 if (!msa && !thread_msa_context_live())
1196 return own_fpu(1);
1197
1198 if (!raw_cpu_has_fpu) {
1199 force_sig(SIGFPE, current);
1200 return(-1);
1201 }
1202 /*
1203 * This task is using or has previously used MSA. Thus we require
1204 * that Status.FR == 1.
1205 */
1206 preempt_disable();
1207 was_fpu_owner = is_fpu_owner();
1208 err = own_fpu_inatomic(0);
1209 if (err)
1210 goto out;
1211
1212 enable_msa();
1213 write_msa_csr(current->thread.fpu.msacsr);
1214 set_thread_flag(TIF_USEDMSA);
1215
1216 /*
1217 * If this is the first time that the task is using MSA and it has
1218 * previously used scalar FP in this time slice then we already nave
1219 * FP context which we shouldn't clobber. We do however need to clear
1220 * the upper 64b of each vector register so that this task has no
1221 * opportunity to see data left behind by another.
1222 */
1223 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1224 if (!prior_msa && was_fpu_owner) {
1225 _init_msa_upper();
1226
1227 goto out;
1228 }
1229
1230 if (!prior_msa) {
1231 /*
1232 * Restore the least significant 64b of each vector register
1233 * from the existing scalar FP context.
1234 */
1235 _restore_fp(current);
1236
1237 /*
1238 * The task has not formerly used MSA, so clear the upper 64b
1239 * of each vector register such that it cannot see data left
1240 * behind by another task.
1241 */
1242 _init_msa_upper();
1243 } else {
1244 /* prior_msa: We need to restore the vector context. */
1245 if (!was_fpu_owner) {
1246 restore_msa(current);
1247 write_32bit_cp1_register(CP1_STATUS,current->thread.fpu.fcr31);
1248 } else {
1249 _restore_msa_uppers_from_thread(¤t->thread.fpu.fpr[0]);
1250 }
1251 }
1252
1253 out:
1254 preempt_enable();
1255
1256 return 0;
1257 }
1258
do_cpu(struct pt_regs * regs)1259 asmlinkage void do_cpu(struct pt_regs *regs)
1260 {
1261 enum ctx_state prev_state;
1262 unsigned int __user *epc;
1263 unsigned long old_epc, old31;
1264 unsigned int opcode;
1265 unsigned int cpid;
1266 int status;
1267 unsigned long __maybe_unused flags;
1268
1269 prev_state = exception_enter();
1270 die_if_kernel("do_cpu invoked from kernel context!", regs);
1271
1272 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1273
1274 switch (cpid) {
1275 case 0:
1276 epc = (unsigned int __user *)exception_epc(regs);
1277 old_epc = regs->cp0_epc;
1278 old31 = regs->regs[31];
1279 opcode = 0;
1280 status = -1;
1281
1282 if (unlikely(compute_return_epc(regs) < 0))
1283 goto out;
1284
1285 if (get_isa16_mode(regs->cp0_epc)) {
1286 unsigned short mmop[2] = { 0 };
1287
1288 if (unlikely(get_user(mmop[0], epc) < 0))
1289 status = SIGSEGV;
1290 if (unlikely(get_user(mmop[1], epc) < 0))
1291 status = SIGSEGV;
1292 opcode = (mmop[0] << 16) | mmop[1];
1293
1294 if (status < 0)
1295 status = simulate_rdhwr_mm(regs, opcode);
1296 } else {
1297 if (unlikely(get_user(opcode, epc) < 0))
1298 status = SIGSEGV;
1299
1300 if (!cpu_has_llsc && status < 0)
1301 status = simulate_llsc(regs, opcode);
1302
1303 if (status < 0)
1304 status = simulate_rdhwr_normal(regs, opcode);
1305 }
1306
1307 if (status < 0)
1308 status = SIGILL;
1309
1310 if (unlikely(status > 0)) {
1311 regs->cp0_epc = old_epc; /* Undo skip-over. */
1312 regs->regs[31] = old31;
1313 force_sig(status, current);
1314 }
1315
1316 goto out;
1317
1318 case 3:
1319 /*
1320 * Old (MIPS I and MIPS II) processors will set this code
1321 * for COP1X opcode instructions that replaced the original
1322 * COP3 space. We don't limit COP1 space instructions in
1323 * the emulator according to the CPU ISA, so we want to
1324 * treat COP1X instructions consistently regardless of which
1325 * code the CPU chose. Therefore we redirect this trap to
1326 * the FP emulator too.
1327 *
1328 * Then some newer FPU-less processors use this code
1329 * erroneously too, so they are covered by this choice
1330 * as well.
1331 */
1332 if (raw_cpu_has_fpu)
1333 break;
1334 /* Fall through. */
1335
1336 case 1:
1337 status = enable_restore_fp_context(0);
1338 if (status < 0)
1339 return;
1340
1341 if ((!raw_cpu_has_fpu) || status) {
1342 int sig;
1343 void __user *fault_addr = NULL;
1344 sig = fpu_emulator_cop1Handler(regs,
1345 ¤t->thread.fpu,
1346 0, &fault_addr);
1347 if ((!process_fpemu_return(sig, fault_addr)) && !status)
1348 mt_ase_fp_affinity();
1349 }
1350
1351 goto out;
1352
1353 case 2:
1354 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1355 goto out;
1356 }
1357
1358 force_sig(SIGILL, current);
1359
1360 out:
1361 exception_exit(prev_state);
1362 }
1363
do_msa_fpe(struct pt_regs * regs)1364 asmlinkage void do_msa_fpe(struct pt_regs *regs)
1365 {
1366 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1367 force_sig(SIGFPE, current);
1368 }
1369
do_msa(struct pt_regs * regs)1370 asmlinkage void do_msa(struct pt_regs *regs)
1371 {
1372 int err;
1373
1374 if ((!cpu_has_msa) || !test_thread_local_flags(LTIF_FPU_FR)) {
1375 force_sig(SIGILL, current);
1376 goto out;
1377 }
1378
1379 die_if_kernel("do_msa invoked from kernel context!", regs);
1380
1381 err = enable_restore_fp_context(1);
1382 if (err)
1383 force_sig(SIGILL, current);
1384 out:
1385 ;
1386 }
1387
do_mdmx(struct pt_regs * regs)1388 asmlinkage void do_mdmx(struct pt_regs *regs)
1389 {
1390 enum ctx_state prev_state;
1391
1392 prev_state = exception_enter();
1393 force_sig(SIGILL, current);
1394 exception_exit(prev_state);
1395 }
1396
1397 /*
1398 * Called with interrupts disabled.
1399 */
do_watch(struct pt_regs * regs)1400 asmlinkage void do_watch(struct pt_regs *regs)
1401 {
1402 enum ctx_state prev_state;
1403 u32 cause;
1404
1405 prev_state = exception_enter();
1406 /*
1407 * Clear WP (bit 22) bit of cause register so we don't loop
1408 * forever.
1409 */
1410 cause = read_c0_cause();
1411 cause &= ~(1 << 22);
1412 write_c0_cause(cause);
1413
1414 /*
1415 * If the current thread has the watch registers loaded, save
1416 * their values and send SIGTRAP. Otherwise another thread
1417 * left the registers set, clear them and continue.
1418 */
1419 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1420 mips_read_watch_registers();
1421 local_irq_enable();
1422 force_sig(SIGTRAP, current);
1423 } else {
1424 mips_clear_watch_registers();
1425 local_irq_enable();
1426 }
1427 exception_exit(prev_state);
1428 }
1429
1430 #ifdef CONFIG_CPU_MIPSR6
1431 char *mcheck_code[32] = { "non R6 multiple hit in TLB: Status.TS = 1",
1432 "multiple hit in TLB",
1433 "multiple hit in TLB, speculative access",
1434 "page size mismatch, unsupported FTLB page mask",
1435 "index doesn't match EntryHI.VPN2 position in FTLB",
1436 "HW PageTableWalker: Valid bits mismatch in PTE pair on directory level",
1437 "HW PageTableWalker: Dual page mode is not implemented"
1438 };
1439 #endif
1440
do_mcheck(struct pt_regs * regs)1441 asmlinkage void do_mcheck(struct pt_regs *regs)
1442 {
1443 const int field = 2 * sizeof(unsigned long);
1444 int multi_match = regs->cp0_status & ST0_TS;
1445 enum ctx_state prev_state;
1446 #ifdef CONFIG_CPU_MIPSR6
1447 int code = 0;
1448 #endif
1449
1450 prev_state = exception_enter();
1451 show_regs(regs);
1452
1453 #ifdef CONFIG_CPU_MIPSR6
1454 if (multi_match || (code = read_c0_pagegrain() & PG_MCCAUSE)) {
1455 printk("PageGrain: %0x\n", read_c0_pagegrain());
1456 printk("BadVAddr: %0*lx\n", field, read_c0_badvaddr());
1457 #else
1458 if (multi_match) {
1459 #endif
1460 printk("Index : %0x\n", read_c0_index());
1461 printk("Pagemask: %0x\n", read_c0_pagemask());
1462 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
1463 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1464 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1465 printk("\n");
1466 dump_tlb_all();
1467 }
1468
1469 show_code((unsigned int __user *) regs->cp0_epc);
1470
1471 #ifdef CONFIG_CPU_MIPSR6
1472 panic("Caught Machine Check exception - %s",mcheck_code[code]);
1473 #else
1474 /*
1475 * Some chips may have other causes of machine check (e.g. SB1
1476 * graduation timer)
1477 */
1478 panic("Caught Machine Check exception - %scaused by multiple "
1479 "matching entries in the TLB.",
1480 (multi_match) ? "" : "not ");
1481 exception_exit(prev_state);
1482 #endif
1483 }
1484
1485 asmlinkage void do_mt(struct pt_regs *regs)
1486 {
1487 int subcode;
1488
1489 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1490 >> VPECONTROL_EXCPT_SHIFT;
1491 switch (subcode) {
1492 case 0:
1493 printk(KERN_DEBUG "Thread Underflow\n");
1494 break;
1495 case 1:
1496 printk(KERN_DEBUG "Thread Overflow\n");
1497 break;
1498 case 2:
1499 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1500 break;
1501 case 3:
1502 printk(KERN_DEBUG "Gating Storage Exception\n");
1503 break;
1504 case 4:
1505 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1506 break;
1507 case 5:
1508 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1509 break;
1510 default:
1511 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1512 subcode);
1513 break;
1514 }
1515 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1516
1517 force_sig(SIGILL, current);
1518 }
1519
1520
1521 asmlinkage void do_dsp(struct pt_regs *regs)
1522 {
1523 if (cpu_has_dsp)
1524 panic("Unexpected DSP exception");
1525
1526 force_sig(SIGILL, current);
1527 }
1528
1529 asmlinkage void do_reserved(struct pt_regs *regs)
1530 {
1531 /*
1532 * Game over - no way to handle this if it ever occurs. Most probably
1533 * caused by a new unknown cpu type or after another deadly
1534 * hard/software error.
1535 */
1536 show_regs(regs);
1537 panic("Caught reserved exception %ld - should not happen.",
1538 (regs->cp0_cause & 0x7f) >> 2);
1539 }
1540
1541 static int __initdata l1parity = 1;
1542 static int __init nol1parity(char *s)
1543 {
1544 l1parity = 0;
1545 return 1;
1546 }
1547 __setup("nol1par", nol1parity);
1548 static int __initdata l2parity = 1;
1549 static int __init nol2parity(char *s)
1550 {
1551 l2parity = 0;
1552 return 1;
1553 }
1554 __setup("nol2par", nol2parity);
1555
1556 /*
1557 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1558 * it different ways.
1559 */
1560 static inline void parity_protection_init(void)
1561 {
1562 switch (current_cpu_type()) {
1563 case CPU_24K:
1564 case CPU_34K:
1565 case CPU_74K:
1566 case CPU_1004K:
1567 case CPU_PROAPTIV:
1568 case CPU_INTERAPTIV:
1569 case CPU_SAMURAI:
1570 {
1571 #define ERRCTL_PE 0x80000000
1572 #define ERRCTL_L2P 0x00800000
1573 unsigned long errctl;
1574 unsigned int l1parity_present, l2parity_present;
1575
1576 errctl = read_c0_ecc();
1577 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1578
1579 /* probe L1 parity support */
1580 write_c0_ecc(errctl | ERRCTL_PE);
1581 back_to_back_c0_hazard();
1582 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1583
1584 /* probe L2 parity support */
1585 write_c0_ecc(errctl|ERRCTL_L2P);
1586 back_to_back_c0_hazard();
1587 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1588
1589 if (l1parity_present && l2parity_present) {
1590 if (l1parity)
1591 errctl |= ERRCTL_PE;
1592 if (l1parity ^ l2parity)
1593 errctl |= ERRCTL_L2P;
1594 } else if (l1parity_present) {
1595 if (l1parity)
1596 errctl |= ERRCTL_PE;
1597 } else if (l2parity_present) {
1598 if (l2parity)
1599 errctl |= ERRCTL_L2P;
1600 } else {
1601 /* No parity available */
1602 }
1603
1604 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1605
1606 write_c0_ecc(errctl);
1607 back_to_back_c0_hazard();
1608 errctl = read_c0_ecc();
1609 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1610
1611 if (l1parity_present)
1612 printk(KERN_INFO "Cache parity protection %sabled\n",
1613 (errctl & ERRCTL_PE) ? "en" : "dis");
1614
1615 if (l2parity_present) {
1616 if (l1parity_present && l1parity)
1617 errctl ^= ERRCTL_L2P;
1618 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1619 (errctl & ERRCTL_L2P) ? "en" : "dis");
1620 }
1621 }
1622 break;
1623
1624 case CPU_5KC:
1625 case CPU_5KE:
1626 case CPU_LOONGSON1:
1627 write_c0_ecc(0x80000000);
1628 back_to_back_c0_hazard();
1629 /* Set the PE bit (bit 31) in the c0_errctl register. */
1630 printk(KERN_INFO "Cache parity protection %sabled\n",
1631 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1632 break;
1633 case CPU_20KC:
1634 case CPU_25KF:
1635 /* Clear the DE bit (bit 16) in the c0_status register. */
1636 printk(KERN_INFO "Enable cache parity protection for "
1637 "MIPS 20KC/25KF CPUs.\n");
1638 clear_c0_status(ST0_DE);
1639 break;
1640 default:
1641 break;
1642 }
1643 }
1644
1645 asmlinkage void cache_parity_error(void)
1646 {
1647 const int field = 2 * sizeof(unsigned long);
1648 unsigned int reg_val;
1649
1650 /* For the moment, report the problem and hang. */
1651 printk("Cache error exception, cp0_ecc=0x%08x:\n",read_c0_ecc());
1652 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1653 reg_val = read_c0_cacheerr();
1654 printk("c0_cacheerr == %08x\n", reg_val);
1655
1656 if ((reg_val & 0xc0000000) == 0xc0000000)
1657 printk("Decoded c0_cacheerr: FTLB parity error\n");
1658 else
1659 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1660 reg_val & (1<<30) ? "secondary" : "primary",
1661 reg_val & (1<<31) ? "data" : "insn");
1662 printk("Error bits: %s%s%s%s%s%s%s%s\n",
1663 reg_val & (1<<29) ? "ED " : "",
1664 reg_val & (1<<28) ? "ET " : "",
1665 reg_val & (1<<27) ? "ES " : "",
1666 reg_val & (1<<26) ? "EE " : "",
1667 reg_val & (1<<25) ? "EB " : "",
1668 reg_val & (1<<24) ? "EI/EF " : "",
1669 reg_val & (1<<23) ? "E1/SP " : "",
1670 reg_val & (1<<22) ? "E0/EW " : "");
1671 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1672
1673 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1674 if (reg_val & (1<<22))
1675 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1676
1677 if (reg_val & (1<<23))
1678 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1679 #endif
1680
1681 panic("Can't handle the cache error!");
1682 }
1683
1684 asmlinkage void do_ftlb(void)
1685 {
1686 const int field = 2 * sizeof(unsigned long);
1687 unsigned int reg_val;
1688
1689 /* For the moment, report the problem and hang. */
1690 printk("FTLB error exception, cp0_ecc=0x%08x:\n",read_c0_ecc());
1691 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1692 reg_val = read_c0_cacheerr();
1693 printk("c0_cacheerr == %08x\n", reg_val);
1694
1695 if ((reg_val & 0xc0000000) == 0xc0000000)
1696 printk("Decoded c0_cacheerr: FTLB parity error\n");
1697 else
1698 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1699 reg_val & (1<<30) ? "secondary" : "primary",
1700 reg_val & (1<<31) ? "data" : "insn");
1701 printk("Error bits: %s%s%s%s%s%s%s%s\n",
1702 reg_val & (1<<29) ? "ED " : "",
1703 reg_val & (1<<28) ? "ET " : "",
1704 reg_val & (1<<27) ? "ES " : "",
1705 reg_val & (1<<26) ? "EE " : "",
1706 reg_val & (1<<25) ? "EB " : "",
1707 reg_val & (1<<24) ? "EI/EF " : "",
1708 reg_val & (1<<23) ? "E1/SP " : "",
1709 reg_val & (1<<22) ? "E0/EW " : "");
1710 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1711
1712 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1713 if (reg_val & (1<<22))
1714 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1715
1716 if (reg_val & (1<<23))
1717 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1718 #endif
1719
1720 panic("Can't handle the FTLB parity error!");
1721 }
1722
1723 /*
1724 * SDBBP EJTAG debug exception handler.
1725 * We skip the instruction and return to the next instruction.
1726 */
1727 void ejtag_exception_handler(struct pt_regs *regs)
1728 {
1729 const int field = 2 * sizeof(unsigned long);
1730 unsigned long depc, old_epc, old_ra;
1731 unsigned int debug;
1732
1733 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1734 depc = read_c0_depc();
1735 debug = read_c0_debug();
1736 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1737 if (debug & 0x80000000) {
1738 /*
1739 * In branch delay slot.
1740 * We cheat a little bit here and use EPC to calculate the
1741 * debug return address (DEPC). EPC is restored after the
1742 * calculation.
1743 */
1744 old_epc = regs->cp0_epc;
1745 old_ra = regs->regs[31];
1746 regs->cp0_epc = depc;
1747 compute_return_epc(regs);
1748 depc = regs->cp0_epc;
1749 regs->cp0_epc = old_epc;
1750 regs->regs[31] = old_ra;
1751 } else
1752 depc += 4;
1753 write_c0_depc(depc);
1754
1755 #if 0
1756 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1757 write_c0_debug(debug | 0x100);
1758 #endif
1759 }
1760
1761 /*
1762 * NMI exception handler.
1763 * No lock; only written during early bootup by CPU 0.
1764 */
1765 static RAW_NOTIFIER_HEAD(nmi_chain);
1766
1767 int register_nmi_notifier(struct notifier_block *nb)
1768 {
1769 return raw_notifier_chain_register(&nmi_chain, nb);
1770 }
1771
1772 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1773 {
1774 unsigned long epc;
1775 char str[100];
1776
1777 raw_notifier_call_chain(&nmi_chain, 0, regs);
1778 bust_spinlocks(1);
1779 epc = regs->cp0_epc;
1780 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx (before replacement by CP0_ERROREPC)\n",smp_processor_id(),regs->cp0_epc);
1781 regs->cp0_epc = read_c0_errorepc();
1782 die(str, regs);
1783 regs->cp0_epc = epc;
1784 }
1785
1786 #define VECTORSPACING 0x100 /* for EI/VI mode */
1787
1788 unsigned long ebase;
1789 unsigned long exception_handlers[32];
1790 unsigned long vi_handlers[64];
1791
1792 void __init *set_except_vector(int n, void *addr)
1793 {
1794 unsigned long handler = (unsigned long) addr;
1795 unsigned long old_handler;
1796
1797 #ifdef CONFIG_CPU_MICROMIPS
1798 /*
1799 * Only the TLB handlers are cache aligned with an even
1800 * address. All other handlers are on an odd address and
1801 * require no modification. Otherwise, MIPS32 mode will
1802 * be entered when handling any TLB exceptions. That
1803 * would be bad...since we must stay in microMIPS mode.
1804 */
1805 if (!(handler & 0x1))
1806 handler |= 1;
1807 #endif
1808 old_handler = xchg(&exception_handlers[n], handler);
1809
1810 if (n == 0 && cpu_has_divec) {
1811 #ifdef CONFIG_CPU_MICROMIPS
1812 unsigned long jump_mask = ~((1 << 27) - 1);
1813 #else
1814 unsigned long jump_mask = ~((1 << 28) - 1);
1815 #endif
1816 u32 *buf = (u32 *)(ebase + 0x200);
1817 unsigned int k0 = 26;
1818 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1819 uasm_i_j(&buf, handler & ~jump_mask);
1820 uasm_i_nop(&buf);
1821 } else {
1822 UASM_i_LA(&buf, k0, handler);
1823 uasm_i_jr(&buf, k0);
1824 uasm_i_nop(&buf);
1825 }
1826 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1827 }
1828 return (void *)old_handler;
1829 }
1830
1831 static void do_default_vi(void)
1832 {
1833 show_regs(get_irq_regs());
1834 panic("Caught unexpected vectored interrupt.");
1835 }
1836
1837 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1838 {
1839 unsigned long handler;
1840 unsigned long old_handler = vi_handlers[n];
1841 int srssets = current_cpu_data.srsets;
1842 u16 *h;
1843 unsigned char *b;
1844
1845 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1846
1847 if (addr == NULL) {
1848 handler = (unsigned long) do_default_vi;
1849 srs = 0;
1850 } else
1851 handler = (unsigned long) addr;
1852 vi_handlers[n] = handler;
1853
1854 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1855
1856 if (srs >= srssets)
1857 panic("Shadow register set %d not supported", srs);
1858
1859 if (cpu_has_veic) {
1860 if (board_bind_eic_interrupt)
1861 board_bind_eic_interrupt(n, srs);
1862 } else if (cpu_has_vint) {
1863 /* SRSMap is only defined if shadow sets are implemented */
1864 if (srssets > 1)
1865 change_c0_srsmap(0xf << n*4, srs << n*4);
1866 }
1867
1868 if (srs == 0) {
1869 /*
1870 * If no shadow set is selected then use the default handler
1871 * that does normal register saving and standard interrupt exit
1872 */
1873 extern char except_vec_vi, except_vec_vi_lui;
1874 extern char except_vec_vi_ori, except_vec_vi_end;
1875 extern char rollback_except_vec_vi;
1876 char *vec_start = using_rollback_handler() ?
1877 &rollback_except_vec_vi : &except_vec_vi;
1878 #ifdef CONFIG_MIPS_MT_SMTC
1879 /*
1880 * We need to provide the SMTC vectored interrupt handler
1881 * not only with the address of the handler, but with the
1882 * Status.IM bit to be masked before going there.
1883 */
1884 extern char except_vec_vi_mori;
1885 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1886 const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1887 #else
1888 const int mori_offset = &except_vec_vi_mori - vec_start;
1889 #endif
1890 #endif /* CONFIG_MIPS_MT_SMTC */
1891 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1892 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1893 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1894 #else
1895 const int lui_offset = &except_vec_vi_lui - vec_start;
1896 const int ori_offset = &except_vec_vi_ori - vec_start;
1897 #endif
1898 const int handler_len = &except_vec_vi_end - vec_start;
1899
1900 if (handler_len > VECTORSPACING) {
1901 /*
1902 * Sigh... panicing won't help as the console
1903 * is probably not configured :(
1904 */
1905 panic("VECTORSPACING too small");
1906 }
1907
1908 set_handler(((unsigned long)b - ebase), vec_start,
1909 #ifdef CONFIG_CPU_MICROMIPS
1910 (handler_len - 1));
1911 #else
1912 handler_len);
1913 #endif
1914 #ifdef CONFIG_MIPS_MT_SMTC
1915 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1916
1917 h = (u16 *)(b + mori_offset);
1918 *h = (0x100 << n);
1919 #endif /* CONFIG_MIPS_MT_SMTC */
1920 h = (u16 *)(b + lui_offset);
1921 *h = (handler >> 16) & 0xffff;
1922 h = (u16 *)(b + ori_offset);
1923 *h = (handler & 0xffff);
1924 local_flush_icache_range((unsigned long)b,
1925 (unsigned long)(b+handler_len));
1926 }
1927 else {
1928 /*
1929 * In other cases jump directly to the interrupt handler. It
1930 * is the handler's responsibility to save registers if required
1931 * (eg hi/lo) and return from the exception using "eret".
1932 */
1933 u32 insn;
1934
1935 h = (u16 *)b;
1936 /* j handler */
1937 #ifdef CONFIG_CPU_MICROMIPS
1938 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1939 #else
1940 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1941 #endif
1942 h[0] = (insn >> 16) & 0xffff;
1943 h[1] = insn & 0xffff;
1944 h[2] = 0;
1945 h[3] = 0;
1946 local_flush_icache_range((unsigned long)b,
1947 (unsigned long)(b+8));
1948 }
1949
1950 return (void *)old_handler;
1951 }
1952
1953 void *set_vi_handler(int n, vi_handler_t addr)
1954 {
1955 return set_vi_srs_handler(n, addr, 0);
1956 }
1957
1958 extern void tlb_init(void);
1959 extern void flush_tlb_handlers(void);
1960
1961 /*
1962 * Timer interrupt
1963 */
1964 int cp0_compare_irq;
1965 EXPORT_SYMBOL_GPL(cp0_compare_irq);
1966 int cp0_compare_irq_shift;
1967
1968 /*
1969 * Performance counter IRQ or -1 if shared with timer
1970 */
1971 int cp0_perfcount_irq;
1972 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1973
1974 static int __cpuinitdata noulri;
1975
1976 static int __init ulri_disable(char *s)
1977 {
1978 pr_info("Disabling ulri\n");
1979 noulri = 1;
1980
1981 return 1;
1982 }
1983 __setup("noulri", ulri_disable);
1984
1985 void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1986 {
1987 unsigned int cpu = smp_processor_id();
1988 unsigned int status_set = ST0_CU0;
1989 unsigned int hwrena = cpu_hwrena_impl_bits;
1990 #ifdef CONFIG_MIPS_MT_SMTC
1991 int secondaryTC = 0;
1992 int bootTC = (cpu == 0);
1993
1994 /*
1995 * Only do per_cpu_trap_init() for first TC of Each VPE.
1996 * Note that this hack assumes that the SMTC init code
1997 * assigns TCs consecutively and in ascending order.
1998 */
1999
2000 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
2001 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
2002 secondaryTC = 1;
2003 #endif /* CONFIG_MIPS_MT_SMTC */
2004
2005 /*
2006 * Disable coprocessors and select 32-bit or 64-bit addressing
2007 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2008 * flag that some firmware may have left set and the TS bit (for
2009 * IP27). Set XX for ISA IV code to work.
2010 */
2011 #ifdef CONFIG_64BIT
2012 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2013 #endif
2014 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2015 status_set |= ST0_XX;
2016 if (cpu_has_dsp)
2017 status_set |= ST0_MX;
2018
2019 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2020 status_set);
2021
2022 if (cpu_has_mips_r2 || cpu_has_mips_r6)
2023 hwrena |= 0x0000000f;
2024
2025 if (!noulri && cpu_has_userlocal)
2026 hwrena |= (1 << 29);
2027
2028 if (hwrena)
2029 write_c0_hwrena(hwrena);
2030
2031 #ifdef CONFIG_MIPS_MT_SMTC
2032 if (!secondaryTC) {
2033 #endif /* CONFIG_MIPS_MT_SMTC */
2034
2035 if (cpu_has_veic || cpu_has_vint) {
2036 unsigned long sr = set_c0_status(ST0_BEV);
2037 #if defined(CONFIG_EVA) || defined(CONFIG_CPU_MIPS64_R6)
2038 write_c0_ebase(ebase|MIPS_EBASE_WG);
2039 back_to_back_c0_hazard();
2040 #endif
2041 write_c0_ebase(ebase);
2042 write_c0_status(sr);
2043 /* Setting vector spacing enables EI/VI mode */
2044 change_c0_intctl(0x3e0, VECTORSPACING);
2045 }
2046 if (cpu_has_divec) {
2047 if (cpu_has_mipsmt) {
2048 unsigned int vpflags = dvpe();
2049 set_c0_cause(CAUSEF_IV);
2050 evpe(vpflags);
2051 } else
2052 set_c0_cause(CAUSEF_IV);
2053 }
2054
2055 /*
2056 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2057 *
2058 * o read IntCtl.IPTI to determine the timer interrupt
2059 * o read IntCtl.IPPCI to determine the performance counter interrupt
2060 */
2061 if (cpu_has_mips_r2 || cpu_has_mips_r6) {
2062 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2063 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2064 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2065 if (cp0_perfcount_irq == cp0_compare_irq)
2066 cp0_perfcount_irq = -1;
2067 } else {
2068 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2069 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2070 cp0_perfcount_irq = -1;
2071 }
2072
2073 #ifdef CONFIG_MIPS_MT_SMTC
2074 }
2075 #endif /* CONFIG_MIPS_MT_SMTC */
2076
2077 if (!cpu_data[cpu].asid_cache)
2078 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2079
2080 atomic_inc(&init_mm.mm_count);
2081 current->active_mm = &init_mm;
2082 BUG_ON(current->mm);
2083 enter_lazy_tlb(&init_mm, current);
2084
2085 #ifdef CONFIG_MIPS_MT_SMTC
2086 if (bootTC) {
2087 #endif /* CONFIG_MIPS_MT_SMTC */
2088 /* Boot CPU's cache setup in setup_arch(). */
2089 if (!is_boot_cpu)
2090 cpu_cache_init();
2091 tlb_init();
2092 #ifdef CONFIG_MIPS_MT_SMTC
2093 } else if (!secondaryTC) {
2094 /*
2095 * First TC in non-boot VPE must do subset of tlb_init()
2096 * for MMU countrol registers.
2097 */
2098 write_c0_pagemask(PM_DEFAULT_MASK);
2099 write_c0_wired(0);
2100 }
2101 #endif /* CONFIG_MIPS_MT_SMTC */
2102 TLBMISS_HANDLER_SETUP();
2103 }
2104
2105 /* Install CPU exception handler */
2106 void set_handler(unsigned long offset, void *addr, unsigned long size)
2107 {
2108 #ifdef CONFIG_CPU_MICROMIPS
2109 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2110 #else
2111 memcpy((void *)(ebase + offset), addr, size);
2112 #endif
2113 local_flush_icache_range(ebase + offset, ebase + offset + size);
2114 }
2115
2116 static char panic_null_cerr[] __cpuinitdata =
2117 "Trying to set NULL cache error exception handler";
2118
2119 /*
2120 * Install uncached CPU exception handler.
2121 * This is suitable only for the cache error exception which is the only
2122 * exception handler that is being run uncached.
2123 */
2124 void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
2125 unsigned long size)
2126 {
2127 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2128
2129 if (!addr)
2130 panic(panic_null_cerr);
2131
2132 memcpy((void *)(uncached_ebase + offset), addr, size);
2133 }
2134
2135 static int __initdata rdhwr_noopt;
2136 static int __init set_rdhwr_noopt(char *str)
2137 {
2138 rdhwr_noopt = 1;
2139 return 1;
2140 }
2141
2142 __setup("rdhwr_noopt", set_rdhwr_noopt);
2143
2144 extern void tlb_do_page_fault_0(void);
2145
2146 void __init trap_init(void)
2147 {
2148 extern char except_vec3_generic;
2149 extern char except_vec4;
2150 extern char except_vec3_r4000;
2151 unsigned long i;
2152
2153 check_wait();
2154
2155 #if defined(CONFIG_KGDB)
2156 if (kgdb_early_setup)
2157 return; /* Already done */
2158 #endif
2159
2160 if (cpu_has_veic || cpu_has_vint) {
2161 unsigned long size = 0x200 + VECTORSPACING*64;
2162 ebase = (unsigned long)
2163 __alloc_bootmem(size, 1 << fls(size), 0);
2164 } else {
2165 #ifdef CONFIG_KVM_GUEST
2166 #define KVM_GUEST_KSEG0 0x40000000
2167 ebase = KVM_GUEST_KSEG0;
2168 #else
2169 ebase = CKSEG0;
2170 #endif
2171 if (cpu_has_mips_r2 || cpu_has_mips_r6)
2172 ebase += (read_c0_ebase() & 0x3ffff000);
2173 }
2174
2175 if (board_ebase_setup)
2176 board_ebase_setup();
2177 per_cpu_trap_init(true);
2178
2179 /*
2180 * Copy the generic exception handlers to their final destination.
2181 * This will be overriden later as suitable for a particular
2182 * configuration.
2183 */
2184 set_handler(0x180, &except_vec3_generic, 0x80);
2185
2186 /*
2187 * Setup default vectors
2188 */
2189 for (i = 0; i <= 31; i++)
2190 set_except_vector(i, handle_reserved);
2191
2192 /*
2193 * Copy the EJTAG debug exception vector handler code to it's final
2194 * destination.
2195 */
2196 if (cpu_has_ejtag && board_ejtag_handler_setup)
2197 board_ejtag_handler_setup();
2198
2199 /*
2200 * Only some CPUs have the watch exceptions.
2201 */
2202 if (cpu_has_watch)
2203 set_except_vector(23, handle_watch);
2204
2205 /*
2206 * Initialise interrupt handlers
2207 */
2208 if (cpu_has_veic || cpu_has_vint) {
2209 int nvec = cpu_has_veic ? 64 : 8;
2210 for (i = 0; i < nvec; i++)
2211 set_vi_handler(i, NULL);
2212 }
2213 else if (cpu_has_divec)
2214 set_handler(0x200, &except_vec4, 0x8);
2215
2216 /*
2217 * Some CPUs can enable/disable for cache parity detection, but does
2218 * it different ways.
2219 */
2220 parity_protection_init();
2221
2222 /*
2223 * The Data Bus Errors / Instruction Bus Errors are signaled
2224 * by external hardware. Therefore these two exceptions
2225 * may have board specific handlers.
2226 */
2227 if (board_be_init)
2228 board_be_init();
2229
2230 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2231 : handle_int);
2232 set_except_vector(1, handle_tlbm);
2233 set_except_vector(2, handle_tlbl);
2234 set_except_vector(3, handle_tlbs);
2235
2236 set_except_vector(4, handle_adel);
2237 set_except_vector(5, handle_ades);
2238
2239 set_except_vector(6, handle_ibe);
2240 set_except_vector(7, handle_dbe);
2241
2242 set_except_vector(8, handle_sys);
2243 set_except_vector(9, handle_bp);
2244 set_except_vector(10, rdhwr_noopt ? handle_ri :
2245 (cpu_has_vtag_icache ?
2246 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2247 set_except_vector(11, handle_cpu);
2248 set_except_vector(12, handle_ov);
2249 set_except_vector(13, handle_tr);
2250 set_except_vector(14, handle_msa_fpe);
2251
2252 if (current_cpu_type() == CPU_R6000 ||
2253 current_cpu_type() == CPU_R6000A) {
2254 /*
2255 * The R6000 is the only R-series CPU that features a machine
2256 * check exception (similar to the R4000 cache error) and
2257 * unaligned ldc1/sdc1 exception. The handlers have not been
2258 * written yet. Well, anyway there is no R6000 machine on the
2259 * current list of targets for Linux/MIPS.
2260 * (Duh, crap, there is someone with a triple R6k machine)
2261 */
2262 //set_except_vector(14, handle_mc);
2263 //set_except_vector(15, handle_ndc);
2264 }
2265
2266
2267 if (board_nmi_handler_setup)
2268 board_nmi_handler_setup();
2269
2270 if (cpu_has_fpu && !cpu_has_nofpuex)
2271 set_except_vector(15, handle_fpe);
2272
2273 set_except_vector(16, handle_ftlb);
2274
2275 if (cpu_has_rixi && cpu_has_rixi_except) {
2276 set_except_vector(19, tlb_do_page_fault_0);
2277 set_except_vector(20, tlb_do_page_fault_0);
2278 }
2279
2280 set_except_vector(21, handle_msa);
2281 set_except_vector(22, handle_mdmx);
2282
2283 if (cpu_has_mcheck)
2284 set_except_vector(24, handle_mcheck);
2285
2286 if (cpu_has_mipsmt)
2287 set_except_vector(25, handle_mt);
2288
2289 set_except_vector(26, handle_dsp);
2290
2291 if (board_cache_error_setup)
2292 board_cache_error_setup();
2293
2294 if (cpu_has_vce)
2295 /* Special exception: R4[04]00 uses also the divec space. */
2296 set_handler(0x180, &except_vec3_r4000, 0x100);
2297 else if (cpu_has_4kex)
2298 set_handler(0x180, &except_vec3_generic, 0x80);
2299 else
2300 set_handler(0x080, &except_vec3_generic, 0x80);
2301
2302 local_flush_icache_range(ebase, ebase + 0x400);
2303 flush_tlb_handlers();
2304
2305 sort_extable(__start___dbe_table, __stop___dbe_table);
2306
2307 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2308 }
2309