1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
14 */
15 #include <linux/bitops.h>
16 #include <linux/bug.h>
17 #include <linux/compiler.h>
18 #include <linux/context_tracking.h>
19 #include <linux/cpu_pm.h>
20 #include <linux/kexec.h>
21 #include <linux/init.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/mm.h>
25 #include <linux/sched.h>
26 #include <linux/smp.h>
27 #include <linux/spinlock.h>
28 #include <linux/kallsyms.h>
29 #include <linux/bootmem.h>
30 #include <linux/interrupt.h>
31 #include <linux/ptrace.h>
32 #include <linux/kgdb.h>
33 #include <linux/kdebug.h>
34 #include <linux/kprobes.h>
35 #include <linux/notifier.h>
36 #include <linux/kdb.h>
37 #include <linux/irq.h>
38 #include <linux/perf_event.h>
39
40 #include <asm/addrspace.h>
41 #include <asm/bootinfo.h>
42 #include <asm/branch.h>
43 #include <asm/break.h>
44 #include <asm/cop2.h>
45 #include <asm/cpu.h>
46 #include <asm/cpu-type.h>
47 #include <asm/dsp.h>
48 #include <asm/fpu.h>
49 #include <asm/fpu_emulator.h>
50 #include <asm/idle.h>
51 #include <asm/mips-r2-to-r6-emul.h>
52 #include <asm/mipsregs.h>
53 #include <asm/mipsmtregs.h>
54 #include <asm/module.h>
55 #include <asm/msa.h>
56 #include <asm/pgtable.h>
57 #include <asm/ptrace.h>
58 #include <asm/sections.h>
59 #include <asm/siginfo.h>
60 #include <asm/tlbdebug.h>
61 #include <asm/traps.h>
62 #include <asm/uaccess.h>
63 #include <asm/watch.h>
64 #include <asm/mmu_context.h>
65 #include <asm/types.h>
66 #include <asm/stacktrace.h>
67 #include <asm/uasm.h>
68
69 extern void check_wait(void);
70 extern asmlinkage void rollback_handle_int(void);
71 extern asmlinkage void handle_int(void);
72 extern u32 handle_tlbl[];
73 extern u32 handle_tlbs[];
74 extern u32 handle_tlbm[];
75 extern asmlinkage void handle_adel(void);
76 extern asmlinkage void handle_ades(void);
77 extern asmlinkage void handle_ibe(void);
78 extern asmlinkage void handle_dbe(void);
79 extern asmlinkage void handle_sys(void);
80 extern asmlinkage void handle_bp(void);
81 extern asmlinkage void handle_ri(void);
82 extern asmlinkage void handle_ri_rdhwr_vivt(void);
83 extern asmlinkage void handle_ri_rdhwr(void);
84 extern asmlinkage void handle_cpu(void);
85 extern asmlinkage void handle_ov(void);
86 extern asmlinkage void handle_tr(void);
87 extern asmlinkage void handle_msa_fpe(void);
88 extern asmlinkage void handle_fpe(void);
89 extern asmlinkage void handle_ftlb(void);
90 extern asmlinkage void handle_msa(void);
91 extern asmlinkage void handle_mdmx(void);
92 extern asmlinkage void handle_watch(void);
93 extern asmlinkage void handle_mt(void);
94 extern asmlinkage void handle_dsp(void);
95 extern asmlinkage void handle_mcheck(void);
96 extern asmlinkage void handle_reserved(void);
97 extern void tlb_do_page_fault_0(void);
98
99 void (*board_be_init)(void);
100 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
101 void (*board_nmi_handler_setup)(void);
102 void (*board_ejtag_handler_setup)(void);
103 void (*board_bind_eic_interrupt)(int irq, int regset);
104 void (*board_ebase_setup)(void);
105 void(*board_cache_error_setup)(void);
106
show_raw_backtrace(unsigned long reg29)107 static void show_raw_backtrace(unsigned long reg29)
108 {
109 unsigned long *sp = (unsigned long *)(reg29 & ~3);
110 unsigned long addr;
111
112 printk("Call Trace:");
113 #ifdef CONFIG_KALLSYMS
114 printk("\n");
115 #endif
116 while (!kstack_end(sp)) {
117 unsigned long __user *p =
118 (unsigned long __user *)(unsigned long)sp++;
119 if (__get_user(addr, p)) {
120 printk(" (Bad stack address)");
121 break;
122 }
123 if (__kernel_text_address(addr))
124 print_ip_sym(addr);
125 }
126 printk("\n");
127 }
128
129 #ifdef CONFIG_KALLSYMS
130 int raw_show_trace;
set_raw_show_trace(char * str)131 static int __init set_raw_show_trace(char *str)
132 {
133 raw_show_trace = 1;
134 return 1;
135 }
136 __setup("raw_show_trace", set_raw_show_trace);
137 #endif
138
show_backtrace(struct task_struct * task,const struct pt_regs * regs)139 static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
140 {
141 unsigned long sp = regs->regs[29];
142 unsigned long ra = regs->regs[31];
143 unsigned long pc = regs->cp0_epc;
144
145 if (!task)
146 task = current;
147
148 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
149 show_raw_backtrace(sp);
150 return;
151 }
152 printk("Call Trace:\n");
153 do {
154 print_ip_sym(pc);
155 pc = unwind_stack(task, &sp, pc, &ra);
156 } while (pc);
157 printk("\n");
158 }
159
160 /*
161 * This routine abuses get_user()/put_user() to reference pointers
162 * with at least a bit of error checking ...
163 */
show_stacktrace(struct task_struct * task,const struct pt_regs * regs)164 static void show_stacktrace(struct task_struct *task,
165 const struct pt_regs *regs)
166 {
167 const int field = 2 * sizeof(unsigned long);
168 long stackdata;
169 int i;
170 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
171
172 printk("Stack :");
173 i = 0;
174 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
175 if (i && ((i % (64 / field)) == 0))
176 printk("\n ");
177 if (i > 39) {
178 printk(" ...");
179 break;
180 }
181
182 if (__get_user(stackdata, sp++)) {
183 printk(" (Bad stack address)");
184 break;
185 }
186
187 printk(" %0*lx", field, stackdata);
188 i++;
189 }
190 printk("\n");
191 show_backtrace(task, regs);
192 }
193
show_stack(struct task_struct * task,unsigned long * sp)194 void show_stack(struct task_struct *task, unsigned long *sp)
195 {
196 struct pt_regs regs;
197 mm_segment_t old_fs = get_fs();
198
199 regs.cp0_status = KSU_KERNEL;
200 if (sp) {
201 regs.regs[29] = (unsigned long)sp;
202 regs.regs[31] = 0;
203 regs.cp0_epc = 0;
204 } else {
205 if (task && task != current) {
206 regs.regs[29] = task->thread.reg29;
207 regs.regs[31] = 0;
208 regs.cp0_epc = task->thread.reg31;
209 #ifdef CONFIG_KGDB_KDB
210 } else if (atomic_read(&kgdb_active) != -1 &&
211 kdb_current_regs) {
212 memcpy(®s, kdb_current_regs, sizeof(regs));
213 #endif /* CONFIG_KGDB_KDB */
214 } else {
215 prepare_frametrace(®s);
216 }
217 }
218 /*
219 * show_stack() deals exclusively with kernel mode, so be sure to access
220 * the stack in the kernel (not user) address space.
221 */
222 set_fs(KERNEL_DS);
223 show_stacktrace(task, ®s);
224 set_fs(old_fs);
225 }
226
show_code(unsigned int __user * pc)227 static void show_code(unsigned int __user *pc)
228 {
229 long i;
230 unsigned short __user *pc16 = NULL;
231
232 printk("\nCode:");
233
234 if ((unsigned long)pc & 1)
235 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
236 for(i = -3 ; i < 6 ; i++) {
237 unsigned int insn;
238 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
239 printk(" (Bad address in epc)\n");
240 break;
241 }
242 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
243 }
244 }
245
__show_regs(const struct pt_regs * regs)246 static void __show_regs(const struct pt_regs *regs)
247 {
248 const int field = 2 * sizeof(unsigned long);
249 unsigned int cause = regs->cp0_cause;
250 unsigned int exccode;
251 int i;
252
253 show_regs_print_info(KERN_DEFAULT);
254
255 /*
256 * Saved main processor registers
257 */
258 for (i = 0; i < 32; ) {
259 if ((i % 4) == 0)
260 printk("$%2d :", i);
261 if (i == 0)
262 printk(" %0*lx", field, 0UL);
263 else if (i == 26 || i == 27)
264 printk(" %*s", field, "");
265 else
266 printk(" %0*lx", field, regs->regs[i]);
267
268 i++;
269 if ((i % 4) == 0)
270 printk("\n");
271 }
272
273 #ifdef CONFIG_CPU_HAS_SMARTMIPS
274 printk("Acx : %0*lx\n", field, regs->acx);
275 #endif
276 printk("Hi : %0*lx\n", field, regs->hi);
277 printk("Lo : %0*lx\n", field, regs->lo);
278
279 /*
280 * Saved cp0 registers
281 */
282 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
283 (void *) regs->cp0_epc);
284 printk("ra : %0*lx %pS\n", field, regs->regs[31],
285 (void *) regs->regs[31]);
286
287 printk("Status: %08x ", (uint32_t) regs->cp0_status);
288
289 if (cpu_has_3kex) {
290 if (regs->cp0_status & ST0_KUO)
291 printk("KUo ");
292 if (regs->cp0_status & ST0_IEO)
293 printk("IEo ");
294 if (regs->cp0_status & ST0_KUP)
295 printk("KUp ");
296 if (regs->cp0_status & ST0_IEP)
297 printk("IEp ");
298 if (regs->cp0_status & ST0_KUC)
299 printk("KUc ");
300 if (regs->cp0_status & ST0_IEC)
301 printk("IEc ");
302 } else if (cpu_has_4kex) {
303 if (regs->cp0_status & ST0_KX)
304 printk("KX ");
305 if (regs->cp0_status & ST0_SX)
306 printk("SX ");
307 if (regs->cp0_status & ST0_UX)
308 printk("UX ");
309 switch (regs->cp0_status & ST0_KSU) {
310 case KSU_USER:
311 printk("USER ");
312 break;
313 case KSU_SUPERVISOR:
314 printk("SUPERVISOR ");
315 break;
316 case KSU_KERNEL:
317 printk("KERNEL ");
318 break;
319 default:
320 printk("BAD_MODE ");
321 break;
322 }
323 if (regs->cp0_status & ST0_ERL)
324 printk("ERL ");
325 if (regs->cp0_status & ST0_EXL)
326 printk("EXL ");
327 if (regs->cp0_status & ST0_IE)
328 printk("IE ");
329 }
330 printk("\n");
331
332 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
333 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
334
335 if (1 <= exccode && exccode <= 5)
336 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
337
338 printk("PrId : %08x (%s)\n", read_c0_prid(),
339 cpu_name_string());
340 }
341
342 /*
343 * FIXME: really the generic show_regs should take a const pointer argument.
344 */
show_regs(struct pt_regs * regs)345 void show_regs(struct pt_regs *regs)
346 {
347 __show_regs((struct pt_regs *)regs);
348 dump_stack();
349 }
350
show_registers(struct pt_regs * regs)351 void show_registers(struct pt_regs *regs)
352 {
353 const int field = 2 * sizeof(unsigned long);
354 mm_segment_t old_fs = get_fs();
355
356 __show_regs(regs);
357 print_modules();
358 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
359 current->comm, current->pid, current_thread_info(), current,
360 field, current_thread_info()->tp_value);
361 if (cpu_has_userlocal) {
362 unsigned long tls;
363
364 tls = read_c0_userlocal();
365 if (tls != current_thread_info()->tp_value)
366 printk("*HwTLS: %0*lx\n", field, tls);
367 }
368
369 if (!user_mode(regs))
370 /* Necessary for getting the correct stack content */
371 set_fs(KERNEL_DS);
372 show_stacktrace(current, regs);
373 show_code((unsigned int __user *) regs->cp0_epc);
374 printk("\n");
375 set_fs(old_fs);
376 }
377
378 static DEFINE_RAW_SPINLOCK(die_lock);
379
die(const char * str,struct pt_regs * regs)380 void __noreturn die(const char *str, struct pt_regs *regs)
381 {
382 static int die_counter;
383 int sig = SIGSEGV;
384
385 oops_enter();
386
387 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
388 SIGSEGV) == NOTIFY_STOP)
389 sig = 0;
390
391 console_verbose();
392 raw_spin_lock_irq(&die_lock);
393 bust_spinlocks(1);
394
395 printk("%s[#%d]:\n", str, ++die_counter);
396 show_registers(regs);
397 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
398 raw_spin_unlock_irq(&die_lock);
399
400 oops_exit();
401
402 if (in_interrupt())
403 panic("Fatal exception in interrupt");
404
405 if (panic_on_oops) {
406 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
407 ssleep(5);
408 panic("Fatal exception");
409 }
410
411 if (regs && kexec_should_crash(current))
412 crash_kexec(regs);
413
414 do_exit(sig);
415 }
416
417 extern struct exception_table_entry __start___dbe_table[];
418 extern struct exception_table_entry __stop___dbe_table[];
419
420 __asm__(
421 " .section __dbe_table, \"a\"\n"
422 " .previous \n");
423
424 /* Given an address, look for it in the exception tables. */
search_dbe_tables(unsigned long addr)425 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
426 {
427 const struct exception_table_entry *e;
428
429 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
430 if (!e)
431 e = search_module_dbetables(addr);
432 return e;
433 }
434
do_be(struct pt_regs * regs)435 asmlinkage void do_be(struct pt_regs *regs)
436 {
437 const int field = 2 * sizeof(unsigned long);
438 const struct exception_table_entry *fixup = NULL;
439 int data = regs->cp0_cause & 4;
440 int action = MIPS_BE_FATAL;
441 enum ctx_state prev_state;
442
443 prev_state = exception_enter();
444 /* XXX For now. Fixme, this searches the wrong table ... */
445 if (data && !user_mode(regs))
446 fixup = search_dbe_tables(exception_epc(regs));
447
448 if (fixup)
449 action = MIPS_BE_FIXUP;
450
451 if (board_be_handler)
452 action = board_be_handler(regs, fixup != NULL);
453
454 switch (action) {
455 case MIPS_BE_DISCARD:
456 goto out;
457 case MIPS_BE_FIXUP:
458 if (fixup) {
459 regs->cp0_epc = fixup->nextinsn;
460 goto out;
461 }
462 break;
463 default:
464 break;
465 }
466
467 /*
468 * Assume it would be too dangerous to continue ...
469 */
470 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
471 data ? "Data" : "Instruction",
472 field, regs->cp0_epc, field, regs->regs[31]);
473 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
474 SIGBUS) == NOTIFY_STOP)
475 goto out;
476
477 die_if_kernel("Oops", regs);
478 force_sig(SIGBUS, current);
479
480 out:
481 exception_exit(prev_state);
482 }
483
484 /*
485 * ll/sc, rdhwr, sync emulation
486 */
487
488 #define OPCODE 0xfc000000
489 #define BASE 0x03e00000
490 #define RT 0x001f0000
491 #define OFFSET 0x0000ffff
492 #define LL 0xc0000000
493 #define SC 0xe0000000
494 #define SPEC0 0x00000000
495 #define SPEC3 0x7c000000
496 #define RD 0x0000f800
497 #define FUNC 0x0000003f
498 #define SYNC 0x0000000f
499 #define RDHWR 0x0000003b
500
501 /* microMIPS definitions */
502 #define MM_POOL32A_FUNC 0xfc00ffff
503 #define MM_RDHWR 0x00006b3c
504 #define MM_RS 0x001f0000
505 #define MM_RT 0x03e00000
506
507 /*
508 * The ll_bit is cleared by r*_switch.S
509 */
510
511 unsigned int ll_bit;
512 struct task_struct *ll_task;
513
simulate_ll(struct pt_regs * regs,unsigned int opcode)514 static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
515 {
516 unsigned long value, __user *vaddr;
517 long offset;
518
519 /*
520 * analyse the ll instruction that just caused a ri exception
521 * and put the referenced address to addr.
522 */
523
524 /* sign extend offset */
525 offset = opcode & OFFSET;
526 offset <<= 16;
527 offset >>= 16;
528
529 vaddr = (unsigned long __user *)
530 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
531
532 if ((unsigned long)vaddr & 3)
533 return SIGBUS;
534 if (get_user(value, vaddr))
535 return SIGSEGV;
536
537 preempt_disable();
538
539 if (ll_task == NULL || ll_task == current) {
540 ll_bit = 1;
541 } else {
542 ll_bit = 0;
543 }
544 ll_task = current;
545
546 preempt_enable();
547
548 regs->regs[(opcode & RT) >> 16] = value;
549
550 return 0;
551 }
552
simulate_sc(struct pt_regs * regs,unsigned int opcode)553 static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
554 {
555 unsigned long __user *vaddr;
556 unsigned long reg;
557 long offset;
558
559 /*
560 * analyse the sc instruction that just caused a ri exception
561 * and put the referenced address to addr.
562 */
563
564 /* sign extend offset */
565 offset = opcode & OFFSET;
566 offset <<= 16;
567 offset >>= 16;
568
569 vaddr = (unsigned long __user *)
570 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
571 reg = (opcode & RT) >> 16;
572
573 if ((unsigned long)vaddr & 3)
574 return SIGBUS;
575
576 preempt_disable();
577
578 if (ll_bit == 0 || ll_task != current) {
579 regs->regs[reg] = 0;
580 preempt_enable();
581 return 0;
582 }
583
584 preempt_enable();
585
586 if (put_user(regs->regs[reg], vaddr))
587 return SIGSEGV;
588
589 regs->regs[reg] = 1;
590
591 return 0;
592 }
593
594 /*
595 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
596 * opcodes are supposed to result in coprocessor unusable exceptions if
597 * executed on ll/sc-less processors. That's the theory. In practice a
598 * few processors such as NEC's VR4100 throw reserved instruction exceptions
599 * instead, so we're doing the emulation thing in both exception handlers.
600 */
simulate_llsc(struct pt_regs * regs,unsigned int opcode)601 static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
602 {
603 if ((opcode & OPCODE) == LL) {
604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605 1, regs, 0);
606 return simulate_ll(regs, opcode);
607 }
608 if ((opcode & OPCODE) == SC) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 1, regs, 0);
611 return simulate_sc(regs, opcode);
612 }
613
614 return -1; /* Must be something else ... */
615 }
616
617 /*
618 * Simulate trapping 'rdhwr' instructions to provide user accessible
619 * registers not implemented in hardware.
620 */
simulate_rdhwr(struct pt_regs * regs,int rd,int rt)621 static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
622 {
623 struct thread_info *ti = task_thread_info(current);
624
625 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
626 1, regs, 0);
627 switch (rd) {
628 case 0: /* CPU number */
629 regs->regs[rt] = smp_processor_id();
630 return 0;
631 case 1: /* SYNCI length */
632 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
633 current_cpu_data.icache.linesz);
634 return 0;
635 case 2: /* Read count register */
636 regs->regs[rt] = read_c0_count();
637 return 0;
638 case 3: /* Count register resolution */
639 switch (current_cpu_type()) {
640 case CPU_20KC:
641 case CPU_25KF:
642 regs->regs[rt] = 1;
643 break;
644 default:
645 regs->regs[rt] = 2;
646 }
647 return 0;
648 case 29:
649 regs->regs[rt] = ti->tp_value;
650 return 0;
651 default:
652 return -1;
653 }
654 }
655
simulate_rdhwr_normal(struct pt_regs * regs,unsigned int opcode)656 static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
657 {
658 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
659 int rd = (opcode & RD) >> 11;
660 int rt = (opcode & RT) >> 16;
661
662 simulate_rdhwr(regs, rd, rt);
663 return 0;
664 }
665
666 /* Not ours. */
667 return -1;
668 }
669
simulate_rdhwr_mm(struct pt_regs * regs,unsigned short opcode)670 static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
671 {
672 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
673 int rd = (opcode & MM_RS) >> 16;
674 int rt = (opcode & MM_RT) >> 21;
675 simulate_rdhwr(regs, rd, rt);
676 return 0;
677 }
678
679 /* Not ours. */
680 return -1;
681 }
682
simulate_sync(struct pt_regs * regs,unsigned int opcode)683 static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
684 {
685 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
686 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
687 1, regs, 0);
688 return 0;
689 }
690
691 return -1; /* Must be something else ... */
692 }
693
do_ov(struct pt_regs * regs)694 asmlinkage void do_ov(struct pt_regs *regs)
695 {
696 enum ctx_state prev_state;
697 siginfo_t info = {
698 .si_signo = SIGFPE,
699 .si_code = FPE_INTOVF,
700 .si_addr = (void __user *)regs->cp0_epc,
701 };
702
703 prev_state = exception_enter();
704 die_if_kernel("Integer overflow", regs);
705
706 force_sig_info(SIGFPE, &info, current);
707 exception_exit(prev_state);
708 }
709
710 /*
711 * Send SIGFPE according to FCSR Cause bits, which must have already
712 * been masked against Enable bits. This is impotant as Inexact can
713 * happen together with Overflow or Underflow, and `ptrace' can set
714 * any bits.
715 */
force_fcr31_sig(unsigned long fcr31,void __user * fault_addr,struct task_struct * tsk)716 void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
717 struct task_struct *tsk)
718 {
719 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
720
721 if (fcr31 & FPU_CSR_INV_X)
722 si.si_code = FPE_FLTINV;
723 else if (fcr31 & FPU_CSR_DIV_X)
724 si.si_code = FPE_FLTDIV;
725 else if (fcr31 & FPU_CSR_OVF_X)
726 si.si_code = FPE_FLTOVF;
727 else if (fcr31 & FPU_CSR_UDF_X)
728 si.si_code = FPE_FLTUND;
729 else if (fcr31 & FPU_CSR_INE_X)
730 si.si_code = FPE_FLTRES;
731 else
732 si.si_code = __SI_FAULT;
733 force_sig_info(SIGFPE, &si, tsk);
734 }
735
process_fpemu_return(int sig,void __user * fault_addr,unsigned long fcr31)736 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
737 {
738 struct siginfo si = { 0 };
739
740 switch (sig) {
741 case 0:
742 return 0;
743
744 case SIGFPE:
745 force_fcr31_sig(fcr31, fault_addr, current);
746 return 1;
747
748 case SIGBUS:
749 si.si_addr = fault_addr;
750 si.si_signo = sig;
751 si.si_code = BUS_ADRERR;
752 force_sig_info(sig, &si, current);
753 return 1;
754
755 case SIGSEGV:
756 si.si_addr = fault_addr;
757 si.si_signo = sig;
758 down_read(¤t->mm->mmap_sem);
759 if (find_vma(current->mm, (unsigned long)fault_addr))
760 si.si_code = SEGV_ACCERR;
761 else
762 si.si_code = SEGV_MAPERR;
763 up_read(¤t->mm->mmap_sem);
764 force_sig_info(sig, &si, current);
765 return 1;
766
767 default:
768 force_sig(sig, current);
769 return 1;
770 }
771 }
772
simulate_fp(struct pt_regs * regs,unsigned int opcode,unsigned long old_epc,unsigned long old_ra)773 static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
774 unsigned long old_epc, unsigned long old_ra)
775 {
776 union mips_instruction inst = { .word = opcode };
777 void __user *fault_addr;
778 unsigned long fcr31;
779 int sig;
780
781 /* If it's obviously not an FP instruction, skip it */
782 switch (inst.i_format.opcode) {
783 case cop1_op:
784 case cop1x_op:
785 case lwc1_op:
786 case ldc1_op:
787 case swc1_op:
788 case sdc1_op:
789 break;
790
791 default:
792 return -1;
793 }
794
795 /*
796 * do_ri skipped over the instruction via compute_return_epc, undo
797 * that for the FPU emulator.
798 */
799 regs->cp0_epc = old_epc;
800 regs->regs[31] = old_ra;
801
802 /* Save the FP context to struct thread_struct */
803 lose_fpu(1);
804
805 /* Run the emulator */
806 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
807 &fault_addr);
808
809 /*
810 * We can't allow the emulated instruction to leave any
811 * enabled Cause bits set in $fcr31.
812 */
813 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
814 current->thread.fpu.fcr31 &= ~fcr31;
815
816 /* Restore the hardware register state */
817 own_fpu(1);
818
819 /* Send a signal if required. */
820 process_fpemu_return(sig, fault_addr, fcr31);
821
822 return 0;
823 }
824
825 /*
826 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
827 */
do_fpe(struct pt_regs * regs,unsigned long fcr31)828 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
829 {
830 enum ctx_state prev_state;
831 void __user *fault_addr;
832 int sig;
833
834 prev_state = exception_enter();
835 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
836 SIGFPE) == NOTIFY_STOP)
837 goto out;
838
839 /* Clear FCSR.Cause before enabling interrupts */
840 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
841 local_irq_enable();
842
843 die_if_kernel("FP exception in kernel code", regs);
844
845 if (fcr31 & FPU_CSR_UNI_X) {
846 /*
847 * Unimplemented operation exception. If we've got the full
848 * software emulator on-board, let's use it...
849 *
850 * Force FPU to dump state into task/thread context. We're
851 * moving a lot of data here for what is probably a single
852 * instruction, but the alternative is to pre-decode the FP
853 * register operands before invoking the emulator, which seems
854 * a bit extreme for what should be an infrequent event.
855 */
856 /* Ensure 'resume' not overwrite saved fp context again. */
857 lose_fpu(1);
858
859 /* Run the emulator */
860 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
861 &fault_addr);
862
863 /*
864 * We can't allow the emulated instruction to leave any
865 * enabled Cause bits set in $fcr31.
866 */
867 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
868 current->thread.fpu.fcr31 &= ~fcr31;
869
870 /* Restore the hardware register state */
871 own_fpu(1); /* Using the FPU again. */
872 } else {
873 sig = SIGFPE;
874 fault_addr = (void __user *) regs->cp0_epc;
875 }
876
877 /* Send a signal if required. */
878 process_fpemu_return(sig, fault_addr, fcr31);
879
880 out:
881 exception_exit(prev_state);
882 }
883
do_trap_or_bp(struct pt_regs * regs,unsigned int code,int si_code,const char * str)884 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
885 const char *str)
886 {
887 siginfo_t info = { 0 };
888 char b[40];
889
890 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
891 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
892 SIGTRAP) == NOTIFY_STOP)
893 return;
894 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
895
896 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
897 SIGTRAP) == NOTIFY_STOP)
898 return;
899
900 /*
901 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
902 * insns, even for trap and break codes that indicate arithmetic
903 * failures. Weird ...
904 * But should we continue the brokenness??? --macro
905 */
906 switch (code) {
907 case BRK_OVERFLOW:
908 case BRK_DIVZERO:
909 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
910 die_if_kernel(b, regs);
911 if (code == BRK_DIVZERO)
912 info.si_code = FPE_INTDIV;
913 else
914 info.si_code = FPE_INTOVF;
915 info.si_signo = SIGFPE;
916 info.si_addr = (void __user *) regs->cp0_epc;
917 force_sig_info(SIGFPE, &info, current);
918 break;
919 case BRK_BUG:
920 die_if_kernel("Kernel bug detected", regs);
921 force_sig(SIGTRAP, current);
922 break;
923 case BRK_MEMU:
924 /*
925 * This breakpoint code is used by the FPU emulator to retake
926 * control of the CPU after executing the instruction from the
927 * delay slot of an emulated branch.
928 *
929 * Terminate if exception was recognized as a delay slot return
930 * otherwise handle as normal.
931 */
932 if (do_dsemulret(regs))
933 return;
934
935 die_if_kernel("Math emu break/trap", regs);
936 force_sig(SIGTRAP, current);
937 break;
938 default:
939 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
940 die_if_kernel(b, regs);
941 if (si_code) {
942 info.si_signo = SIGTRAP;
943 info.si_code = si_code;
944 force_sig_info(SIGTRAP, &info, current);
945 } else {
946 force_sig(SIGTRAP, current);
947 }
948 }
949 }
950
do_bp(struct pt_regs * regs)951 asmlinkage void do_bp(struct pt_regs *regs)
952 {
953 unsigned long epc = msk_isa16_mode(exception_epc(regs));
954 unsigned int opcode, bcode;
955 enum ctx_state prev_state;
956 mm_segment_t seg;
957
958 seg = get_fs();
959 if (!user_mode(regs))
960 set_fs(KERNEL_DS);
961
962 prev_state = exception_enter();
963 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
964 if (get_isa16_mode(regs->cp0_epc)) {
965 u16 instr[2];
966
967 if (__get_user(instr[0], (u16 __user *)epc))
968 goto out_sigsegv;
969
970 if (!cpu_has_mmips) {
971 /* MIPS16e mode */
972 bcode = (instr[0] >> 5) & 0x3f;
973 } else if (mm_insn_16bit(instr[0])) {
974 /* 16-bit microMIPS BREAK */
975 bcode = instr[0] & 0xf;
976 } else {
977 /* 32-bit microMIPS BREAK */
978 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
979 goto out_sigsegv;
980 opcode = (instr[0] << 16) | instr[1];
981 bcode = (opcode >> 6) & ((1 << 20) - 1);
982 }
983 } else {
984 if (__get_user(opcode, (unsigned int __user *)epc))
985 goto out_sigsegv;
986 bcode = (opcode >> 6) & ((1 << 20) - 1);
987 }
988
989 /*
990 * There is the ancient bug in the MIPS assemblers that the break
991 * code starts left to bit 16 instead to bit 6 in the opcode.
992 * Gas is bug-compatible, but not always, grrr...
993 * We handle both cases with a simple heuristics. --macro
994 */
995 if (bcode >= (1 << 10))
996 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
997
998 /*
999 * notify the kprobe handlers, if instruction is likely to
1000 * pertain to them.
1001 */
1002 switch (bcode) {
1003 case BRK_UPROBE:
1004 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1005 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1006 goto out;
1007 else
1008 break;
1009 case BRK_UPROBE_XOL:
1010 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1011 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1012 goto out;
1013 else
1014 break;
1015 case BRK_KPROBE_BP:
1016 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1017 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1018 goto out;
1019 else
1020 break;
1021 case BRK_KPROBE_SSTEPBP:
1022 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1023 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1024 goto out;
1025 else
1026 break;
1027 default:
1028 break;
1029 }
1030
1031 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1032
1033 out:
1034 set_fs(seg);
1035 exception_exit(prev_state);
1036 return;
1037
1038 out_sigsegv:
1039 force_sig(SIGSEGV, current);
1040 goto out;
1041 }
1042
do_tr(struct pt_regs * regs)1043 asmlinkage void do_tr(struct pt_regs *regs)
1044 {
1045 u32 opcode, tcode = 0;
1046 enum ctx_state prev_state;
1047 u16 instr[2];
1048 mm_segment_t seg;
1049 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1050
1051 seg = get_fs();
1052 if (!user_mode(regs))
1053 set_fs(get_ds());
1054
1055 prev_state = exception_enter();
1056 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1057 if (get_isa16_mode(regs->cp0_epc)) {
1058 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1059 __get_user(instr[1], (u16 __user *)(epc + 2)))
1060 goto out_sigsegv;
1061 opcode = (instr[0] << 16) | instr[1];
1062 /* Immediate versions don't provide a code. */
1063 if (!(opcode & OPCODE))
1064 tcode = (opcode >> 12) & ((1 << 4) - 1);
1065 } else {
1066 if (__get_user(opcode, (u32 __user *)epc))
1067 goto out_sigsegv;
1068 /* Immediate versions don't provide a code. */
1069 if (!(opcode & OPCODE))
1070 tcode = (opcode >> 6) & ((1 << 10) - 1);
1071 }
1072
1073 do_trap_or_bp(regs, tcode, 0, "Trap");
1074
1075 out:
1076 set_fs(seg);
1077 exception_exit(prev_state);
1078 return;
1079
1080 out_sigsegv:
1081 force_sig(SIGSEGV, current);
1082 goto out;
1083 }
1084
do_ri(struct pt_regs * regs)1085 asmlinkage void do_ri(struct pt_regs *regs)
1086 {
1087 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1088 unsigned long old_epc = regs->cp0_epc;
1089 unsigned long old31 = regs->regs[31];
1090 enum ctx_state prev_state;
1091 unsigned int opcode = 0;
1092 int status = -1;
1093
1094 /*
1095 * Avoid any kernel code. Just emulate the R2 instruction
1096 * as quickly as possible.
1097 */
1098 if (mipsr2_emulation && cpu_has_mips_r6 &&
1099 likely(user_mode(regs)) &&
1100 likely(get_user(opcode, epc) >= 0)) {
1101 unsigned long fcr31 = 0;
1102
1103 status = mipsr2_decoder(regs, opcode, &fcr31);
1104 switch (status) {
1105 case 0:
1106 case SIGEMT:
1107 task_thread_info(current)->r2_emul_return = 1;
1108 return;
1109 case SIGILL:
1110 goto no_r2_instr;
1111 default:
1112 process_fpemu_return(status,
1113 ¤t->thread.cp0_baduaddr,
1114 fcr31);
1115 task_thread_info(current)->r2_emul_return = 1;
1116 return;
1117 }
1118 }
1119
1120 no_r2_instr:
1121
1122 prev_state = exception_enter();
1123 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1124
1125 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1126 SIGILL) == NOTIFY_STOP)
1127 goto out;
1128
1129 die_if_kernel("Reserved instruction in kernel code", regs);
1130
1131 if (unlikely(compute_return_epc(regs) < 0))
1132 goto out;
1133
1134 if (get_isa16_mode(regs->cp0_epc)) {
1135 unsigned short mmop[2] = { 0 };
1136
1137 if (unlikely(get_user(mmop[0], epc) < 0))
1138 status = SIGSEGV;
1139 if (unlikely(get_user(mmop[1], epc) < 0))
1140 status = SIGSEGV;
1141 opcode = (mmop[0] << 16) | mmop[1];
1142
1143 if (status < 0)
1144 status = simulate_rdhwr_mm(regs, opcode);
1145 } else {
1146 if (unlikely(get_user(opcode, epc) < 0))
1147 status = SIGSEGV;
1148
1149 if (!cpu_has_llsc && status < 0)
1150 status = simulate_llsc(regs, opcode);
1151
1152 if (status < 0)
1153 status = simulate_rdhwr_normal(regs, opcode);
1154
1155 if (status < 0)
1156 status = simulate_sync(regs, opcode);
1157
1158 if (status < 0)
1159 status = simulate_fp(regs, opcode, old_epc, old31);
1160 }
1161
1162 if (status < 0)
1163 status = SIGILL;
1164
1165 if (unlikely(status > 0)) {
1166 regs->cp0_epc = old_epc; /* Undo skip-over. */
1167 regs->regs[31] = old31;
1168 force_sig(status, current);
1169 }
1170
1171 out:
1172 exception_exit(prev_state);
1173 }
1174
1175 /*
1176 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1177 * emulated more than some threshold number of instructions, force migration to
1178 * a "CPU" that has FP support.
1179 */
mt_ase_fp_affinity(void)1180 static void mt_ase_fp_affinity(void)
1181 {
1182 #ifdef CONFIG_MIPS_MT_FPAFF
1183 if (mt_fpemul_threshold > 0 &&
1184 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1185 /*
1186 * If there's no FPU present, or if the application has already
1187 * restricted the allowed set to exclude any CPUs with FPUs,
1188 * we'll skip the procedure.
1189 */
1190 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1191 cpumask_t tmask;
1192
1193 current->thread.user_cpus_allowed
1194 = current->cpus_allowed;
1195 cpumask_and(&tmask, ¤t->cpus_allowed,
1196 &mt_fpu_cpumask);
1197 set_cpus_allowed_ptr(current, &tmask);
1198 set_thread_flag(TIF_FPUBOUND);
1199 }
1200 }
1201 #endif /* CONFIG_MIPS_MT_FPAFF */
1202 }
1203
1204 /*
1205 * No lock; only written during early bootup by CPU 0.
1206 */
1207 static RAW_NOTIFIER_HEAD(cu2_chain);
1208
register_cu2_notifier(struct notifier_block * nb)1209 int __ref register_cu2_notifier(struct notifier_block *nb)
1210 {
1211 return raw_notifier_chain_register(&cu2_chain, nb);
1212 }
1213
cu2_notifier_call_chain(unsigned long val,void * v)1214 int cu2_notifier_call_chain(unsigned long val, void *v)
1215 {
1216 return raw_notifier_call_chain(&cu2_chain, val, v);
1217 }
1218
default_cu2_call(struct notifier_block * nfb,unsigned long action,void * data)1219 static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1220 void *data)
1221 {
1222 struct pt_regs *regs = data;
1223
1224 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1225 "instruction", regs);
1226 force_sig(SIGILL, current);
1227
1228 return NOTIFY_OK;
1229 }
1230
wait_on_fp_mode_switch(atomic_t * p)1231 static int wait_on_fp_mode_switch(atomic_t *p)
1232 {
1233 /*
1234 * The FP mode for this task is currently being switched. That may
1235 * involve modifications to the format of this tasks FP context which
1236 * make it unsafe to proceed with execution for the moment. Instead,
1237 * schedule some other task.
1238 */
1239 schedule();
1240 return 0;
1241 }
1242
enable_restore_fp_context(int msa)1243 static int enable_restore_fp_context(int msa)
1244 {
1245 int err, was_fpu_owner, prior_msa;
1246
1247 /*
1248 * If an FP mode switch is currently underway, wait for it to
1249 * complete before proceeding.
1250 */
1251 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1252 wait_on_fp_mode_switch, TASK_KILLABLE);
1253
1254 if (!used_math()) {
1255 /* First time FP context user. */
1256 preempt_disable();
1257 err = init_fpu();
1258 if (msa && !err) {
1259 enable_msa();
1260 init_msa_upper();
1261 set_thread_flag(TIF_USEDMSA);
1262 set_thread_flag(TIF_MSA_CTX_LIVE);
1263 }
1264 preempt_enable();
1265 if (!err)
1266 set_used_math();
1267 return err;
1268 }
1269
1270 /*
1271 * This task has formerly used the FP context.
1272 *
1273 * If this thread has no live MSA vector context then we can simply
1274 * restore the scalar FP context. If it has live MSA vector context
1275 * (that is, it has or may have used MSA since last performing a
1276 * function call) then we'll need to restore the vector context. This
1277 * applies even if we're currently only executing a scalar FP
1278 * instruction. This is because if we were to later execute an MSA
1279 * instruction then we'd either have to:
1280 *
1281 * - Restore the vector context & clobber any registers modified by
1282 * scalar FP instructions between now & then.
1283 *
1284 * or
1285 *
1286 * - Not restore the vector context & lose the most significant bits
1287 * of all vector registers.
1288 *
1289 * Neither of those options is acceptable. We cannot restore the least
1290 * significant bits of the registers now & only restore the most
1291 * significant bits later because the most significant bits of any
1292 * vector registers whose aliased FP register is modified now will have
1293 * been zeroed. We'd have no way to know that when restoring the vector
1294 * context & thus may load an outdated value for the most significant
1295 * bits of a vector register.
1296 */
1297 if (!msa && !thread_msa_context_live())
1298 return own_fpu(1);
1299
1300 /*
1301 * This task is using or has previously used MSA. Thus we require
1302 * that Status.FR == 1.
1303 */
1304 preempt_disable();
1305 was_fpu_owner = is_fpu_owner();
1306 err = own_fpu_inatomic(0);
1307 if (err)
1308 goto out;
1309
1310 enable_msa();
1311 write_msa_csr(current->thread.fpu.msacsr);
1312 set_thread_flag(TIF_USEDMSA);
1313
1314 /*
1315 * If this is the first time that the task is using MSA and it has
1316 * previously used scalar FP in this time slice then we already nave
1317 * FP context which we shouldn't clobber. We do however need to clear
1318 * the upper 64b of each vector register so that this task has no
1319 * opportunity to see data left behind by another.
1320 */
1321 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1322 if (!prior_msa && was_fpu_owner) {
1323 init_msa_upper();
1324
1325 goto out;
1326 }
1327
1328 if (!prior_msa) {
1329 /*
1330 * Restore the least significant 64b of each vector register
1331 * from the existing scalar FP context.
1332 */
1333 _restore_fp(current);
1334
1335 /*
1336 * The task has not formerly used MSA, so clear the upper 64b
1337 * of each vector register such that it cannot see data left
1338 * behind by another task.
1339 */
1340 init_msa_upper();
1341 } else {
1342 /* We need to restore the vector context. */
1343 restore_msa(current);
1344
1345 /* Restore the scalar FP control & status register */
1346 if (!was_fpu_owner)
1347 write_32bit_cp1_register(CP1_STATUS,
1348 current->thread.fpu.fcr31);
1349 }
1350
1351 out:
1352 preempt_enable();
1353
1354 return 0;
1355 }
1356
do_cpu(struct pt_regs * regs)1357 asmlinkage void do_cpu(struct pt_regs *regs)
1358 {
1359 enum ctx_state prev_state;
1360 unsigned int __user *epc;
1361 unsigned long old_epc, old31;
1362 void __user *fault_addr;
1363 unsigned int opcode;
1364 unsigned long fcr31;
1365 unsigned int cpid;
1366 int status, err;
1367 unsigned long __maybe_unused flags;
1368 int sig;
1369
1370 prev_state = exception_enter();
1371 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1372
1373 if (cpid != 2)
1374 die_if_kernel("do_cpu invoked from kernel context!", regs);
1375
1376 switch (cpid) {
1377 case 0:
1378 epc = (unsigned int __user *)exception_epc(regs);
1379 old_epc = regs->cp0_epc;
1380 old31 = regs->regs[31];
1381 opcode = 0;
1382 status = -1;
1383
1384 if (unlikely(compute_return_epc(regs) < 0))
1385 break;
1386
1387 if (get_isa16_mode(regs->cp0_epc)) {
1388 unsigned short mmop[2] = { 0 };
1389
1390 if (unlikely(get_user(mmop[0], epc) < 0))
1391 status = SIGSEGV;
1392 if (unlikely(get_user(mmop[1], epc) < 0))
1393 status = SIGSEGV;
1394 opcode = (mmop[0] << 16) | mmop[1];
1395
1396 if (status < 0)
1397 status = simulate_rdhwr_mm(regs, opcode);
1398 } else {
1399 if (unlikely(get_user(opcode, epc) < 0))
1400 status = SIGSEGV;
1401
1402 if (!cpu_has_llsc && status < 0)
1403 status = simulate_llsc(regs, opcode);
1404
1405 if (status < 0)
1406 status = simulate_rdhwr_normal(regs, opcode);
1407 }
1408
1409 if (status < 0)
1410 status = SIGILL;
1411
1412 if (unlikely(status > 0)) {
1413 regs->cp0_epc = old_epc; /* Undo skip-over. */
1414 regs->regs[31] = old31;
1415 force_sig(status, current);
1416 }
1417
1418 break;
1419
1420 case 3:
1421 /*
1422 * The COP3 opcode space and consequently the CP0.Status.CU3
1423 * bit and the CP0.Cause.CE=3 encoding have been removed as
1424 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1425 * up the space has been reused for COP1X instructions, that
1426 * are enabled by the CP0.Status.CU1 bit and consequently
1427 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1428 * exceptions. Some FPU-less processors that implement one
1429 * of these ISAs however use this code erroneously for COP1X
1430 * instructions. Therefore we redirect this trap to the FP
1431 * emulator too.
1432 */
1433 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1434 force_sig(SIGILL, current);
1435 break;
1436 }
1437 /* Fall through. */
1438
1439 case 1:
1440 err = enable_restore_fp_context(0);
1441
1442 if (raw_cpu_has_fpu && !err)
1443 break;
1444
1445 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1446 &fault_addr);
1447
1448 /*
1449 * We can't allow the emulated instruction to leave
1450 * any enabled Cause bits set in $fcr31.
1451 */
1452 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1453 current->thread.fpu.fcr31 &= ~fcr31;
1454
1455 /* Send a signal if required. */
1456 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1457 mt_ase_fp_affinity();
1458
1459 break;
1460
1461 case 2:
1462 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1463 break;
1464 }
1465
1466 exception_exit(prev_state);
1467 }
1468
do_msa_fpe(struct pt_regs * regs,unsigned int msacsr)1469 asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1470 {
1471 enum ctx_state prev_state;
1472
1473 prev_state = exception_enter();
1474 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1475 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1476 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1477 goto out;
1478
1479 /* Clear MSACSR.Cause before enabling interrupts */
1480 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1481 local_irq_enable();
1482
1483 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1484 force_sig(SIGFPE, current);
1485 out:
1486 exception_exit(prev_state);
1487 }
1488
do_msa(struct pt_regs * regs)1489 asmlinkage void do_msa(struct pt_regs *regs)
1490 {
1491 enum ctx_state prev_state;
1492 int err;
1493
1494 prev_state = exception_enter();
1495
1496 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1497 force_sig(SIGILL, current);
1498 goto out;
1499 }
1500
1501 die_if_kernel("do_msa invoked from kernel context!", regs);
1502
1503 err = enable_restore_fp_context(1);
1504 if (err)
1505 force_sig(SIGILL, current);
1506 out:
1507 exception_exit(prev_state);
1508 }
1509
do_mdmx(struct pt_regs * regs)1510 asmlinkage void do_mdmx(struct pt_regs *regs)
1511 {
1512 enum ctx_state prev_state;
1513
1514 prev_state = exception_enter();
1515 force_sig(SIGILL, current);
1516 exception_exit(prev_state);
1517 }
1518
1519 /*
1520 * Called with interrupts disabled.
1521 */
do_watch(struct pt_regs * regs)1522 asmlinkage void do_watch(struct pt_regs *regs)
1523 {
1524 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1525 enum ctx_state prev_state;
1526 u32 cause;
1527
1528 prev_state = exception_enter();
1529 /*
1530 * Clear WP (bit 22) bit of cause register so we don't loop
1531 * forever.
1532 */
1533 cause = read_c0_cause();
1534 cause &= ~(1 << 22);
1535 write_c0_cause(cause);
1536
1537 /*
1538 * If the current thread has the watch registers loaded, save
1539 * their values and send SIGTRAP. Otherwise another thread
1540 * left the registers set, clear them and continue.
1541 */
1542 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1543 mips_read_watch_registers();
1544 local_irq_enable();
1545 force_sig_info(SIGTRAP, &info, current);
1546 } else {
1547 mips_clear_watch_registers();
1548 local_irq_enable();
1549 }
1550 exception_exit(prev_state);
1551 }
1552
do_mcheck(struct pt_regs * regs)1553 asmlinkage void do_mcheck(struct pt_regs *regs)
1554 {
1555 int multi_match = regs->cp0_status & ST0_TS;
1556 enum ctx_state prev_state;
1557 mm_segment_t old_fs = get_fs();
1558
1559 prev_state = exception_enter();
1560 show_regs(regs);
1561
1562 if (multi_match) {
1563 dump_tlb_regs();
1564 pr_info("\n");
1565 dump_tlb_all();
1566 }
1567
1568 if (!user_mode(regs))
1569 set_fs(KERNEL_DS);
1570
1571 show_code((unsigned int __user *) regs->cp0_epc);
1572
1573 set_fs(old_fs);
1574
1575 /*
1576 * Some chips may have other causes of machine check (e.g. SB1
1577 * graduation timer)
1578 */
1579 panic("Caught Machine Check exception - %scaused by multiple "
1580 "matching entries in the TLB.",
1581 (multi_match) ? "" : "not ");
1582 }
1583
do_mt(struct pt_regs * regs)1584 asmlinkage void do_mt(struct pt_regs *regs)
1585 {
1586 int subcode;
1587
1588 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1589 >> VPECONTROL_EXCPT_SHIFT;
1590 switch (subcode) {
1591 case 0:
1592 printk(KERN_DEBUG "Thread Underflow\n");
1593 break;
1594 case 1:
1595 printk(KERN_DEBUG "Thread Overflow\n");
1596 break;
1597 case 2:
1598 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1599 break;
1600 case 3:
1601 printk(KERN_DEBUG "Gating Storage Exception\n");
1602 break;
1603 case 4:
1604 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1605 break;
1606 case 5:
1607 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1608 break;
1609 default:
1610 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1611 subcode);
1612 break;
1613 }
1614 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1615
1616 force_sig(SIGILL, current);
1617 }
1618
1619
do_dsp(struct pt_regs * regs)1620 asmlinkage void do_dsp(struct pt_regs *regs)
1621 {
1622 if (cpu_has_dsp)
1623 panic("Unexpected DSP exception");
1624
1625 force_sig(SIGILL, current);
1626 }
1627
do_reserved(struct pt_regs * regs)1628 asmlinkage void do_reserved(struct pt_regs *regs)
1629 {
1630 /*
1631 * Game over - no way to handle this if it ever occurs. Most probably
1632 * caused by a new unknown cpu type or after another deadly
1633 * hard/software error.
1634 */
1635 show_regs(regs);
1636 panic("Caught reserved exception %ld - should not happen.",
1637 (regs->cp0_cause & 0x7f) >> 2);
1638 }
1639
1640 static int __initdata l1parity = 1;
nol1parity(char * s)1641 static int __init nol1parity(char *s)
1642 {
1643 l1parity = 0;
1644 return 1;
1645 }
1646 __setup("nol1par", nol1parity);
1647 static int __initdata l2parity = 1;
nol2parity(char * s)1648 static int __init nol2parity(char *s)
1649 {
1650 l2parity = 0;
1651 return 1;
1652 }
1653 __setup("nol2par", nol2parity);
1654
1655 /*
1656 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1657 * it different ways.
1658 */
parity_protection_init(void)1659 static inline void parity_protection_init(void)
1660 {
1661 switch (current_cpu_type()) {
1662 case CPU_24K:
1663 case CPU_34K:
1664 case CPU_74K:
1665 case CPU_1004K:
1666 case CPU_1074K:
1667 case CPU_INTERAPTIV:
1668 case CPU_PROAPTIV:
1669 case CPU_P5600:
1670 case CPU_QEMU_GENERIC:
1671 case CPU_I6400:
1672 {
1673 #define ERRCTL_PE 0x80000000
1674 #define ERRCTL_L2P 0x00800000
1675 unsigned long errctl;
1676 unsigned int l1parity_present, l2parity_present;
1677
1678 errctl = read_c0_ecc();
1679 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1680
1681 /* probe L1 parity support */
1682 write_c0_ecc(errctl | ERRCTL_PE);
1683 back_to_back_c0_hazard();
1684 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1685
1686 /* probe L2 parity support */
1687 write_c0_ecc(errctl|ERRCTL_L2P);
1688 back_to_back_c0_hazard();
1689 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1690
1691 if (l1parity_present && l2parity_present) {
1692 if (l1parity)
1693 errctl |= ERRCTL_PE;
1694 if (l1parity ^ l2parity)
1695 errctl |= ERRCTL_L2P;
1696 } else if (l1parity_present) {
1697 if (l1parity)
1698 errctl |= ERRCTL_PE;
1699 } else if (l2parity_present) {
1700 if (l2parity)
1701 errctl |= ERRCTL_L2P;
1702 } else {
1703 /* No parity available */
1704 }
1705
1706 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1707
1708 write_c0_ecc(errctl);
1709 back_to_back_c0_hazard();
1710 errctl = read_c0_ecc();
1711 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1712
1713 if (l1parity_present)
1714 printk(KERN_INFO "Cache parity protection %sabled\n",
1715 (errctl & ERRCTL_PE) ? "en" : "dis");
1716
1717 if (l2parity_present) {
1718 if (l1parity_present && l1parity)
1719 errctl ^= ERRCTL_L2P;
1720 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1721 (errctl & ERRCTL_L2P) ? "en" : "dis");
1722 }
1723 }
1724 break;
1725
1726 case CPU_5KC:
1727 case CPU_5KE:
1728 case CPU_LOONGSON1:
1729 write_c0_ecc(0x80000000);
1730 back_to_back_c0_hazard();
1731 /* Set the PE bit (bit 31) in the c0_errctl register. */
1732 printk(KERN_INFO "Cache parity protection %sabled\n",
1733 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1734 break;
1735 case CPU_20KC:
1736 case CPU_25KF:
1737 /* Clear the DE bit (bit 16) in the c0_status register. */
1738 printk(KERN_INFO "Enable cache parity protection for "
1739 "MIPS 20KC/25KF CPUs.\n");
1740 clear_c0_status(ST0_DE);
1741 break;
1742 default:
1743 break;
1744 }
1745 }
1746
cache_parity_error(void)1747 asmlinkage void cache_parity_error(void)
1748 {
1749 const int field = 2 * sizeof(unsigned long);
1750 unsigned int reg_val;
1751
1752 /* For the moment, report the problem and hang. */
1753 printk("Cache error exception:\n");
1754 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1755 reg_val = read_c0_cacheerr();
1756 printk("c0_cacheerr == %08x\n", reg_val);
1757
1758 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1759 reg_val & (1<<30) ? "secondary" : "primary",
1760 reg_val & (1<<31) ? "data" : "insn");
1761 if ((cpu_has_mips_r2_r6) &&
1762 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1763 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1764 reg_val & (1<<29) ? "ED " : "",
1765 reg_val & (1<<28) ? "ET " : "",
1766 reg_val & (1<<27) ? "ES " : "",
1767 reg_val & (1<<26) ? "EE " : "",
1768 reg_val & (1<<25) ? "EB " : "",
1769 reg_val & (1<<24) ? "EI " : "",
1770 reg_val & (1<<23) ? "E1 " : "",
1771 reg_val & (1<<22) ? "E0 " : "");
1772 } else {
1773 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1774 reg_val & (1<<29) ? "ED " : "",
1775 reg_val & (1<<28) ? "ET " : "",
1776 reg_val & (1<<26) ? "EE " : "",
1777 reg_val & (1<<25) ? "EB " : "",
1778 reg_val & (1<<24) ? "EI " : "",
1779 reg_val & (1<<23) ? "E1 " : "",
1780 reg_val & (1<<22) ? "E0 " : "");
1781 }
1782 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1783
1784 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1785 if (reg_val & (1<<22))
1786 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1787
1788 if (reg_val & (1<<23))
1789 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1790 #endif
1791
1792 panic("Can't handle the cache error!");
1793 }
1794
do_ftlb(void)1795 asmlinkage void do_ftlb(void)
1796 {
1797 const int field = 2 * sizeof(unsigned long);
1798 unsigned int reg_val;
1799
1800 /* For the moment, report the problem and hang. */
1801 if ((cpu_has_mips_r2_r6) &&
1802 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1803 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1804 read_c0_ecc());
1805 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1806 reg_val = read_c0_cacheerr();
1807 pr_err("c0_cacheerr == %08x\n", reg_val);
1808
1809 if ((reg_val & 0xc0000000) == 0xc0000000) {
1810 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1811 } else {
1812 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1813 reg_val & (1<<30) ? "secondary" : "primary",
1814 reg_val & (1<<31) ? "data" : "insn");
1815 }
1816 } else {
1817 pr_err("FTLB error exception\n");
1818 }
1819 /* Just print the cacheerr bits for now */
1820 cache_parity_error();
1821 }
1822
1823 /*
1824 * SDBBP EJTAG debug exception handler.
1825 * We skip the instruction and return to the next instruction.
1826 */
ejtag_exception_handler(struct pt_regs * regs)1827 void ejtag_exception_handler(struct pt_regs *regs)
1828 {
1829 const int field = 2 * sizeof(unsigned long);
1830 unsigned long depc, old_epc, old_ra;
1831 unsigned int debug;
1832
1833 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1834 depc = read_c0_depc();
1835 debug = read_c0_debug();
1836 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1837 if (debug & 0x80000000) {
1838 /*
1839 * In branch delay slot.
1840 * We cheat a little bit here and use EPC to calculate the
1841 * debug return address (DEPC). EPC is restored after the
1842 * calculation.
1843 */
1844 old_epc = regs->cp0_epc;
1845 old_ra = regs->regs[31];
1846 regs->cp0_epc = depc;
1847 compute_return_epc(regs);
1848 depc = regs->cp0_epc;
1849 regs->cp0_epc = old_epc;
1850 regs->regs[31] = old_ra;
1851 } else
1852 depc += 4;
1853 write_c0_depc(depc);
1854
1855 #if 0
1856 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1857 write_c0_debug(debug | 0x100);
1858 #endif
1859 }
1860
1861 /*
1862 * NMI exception handler.
1863 * No lock; only written during early bootup by CPU 0.
1864 */
1865 static RAW_NOTIFIER_HEAD(nmi_chain);
1866
register_nmi_notifier(struct notifier_block * nb)1867 int register_nmi_notifier(struct notifier_block *nb)
1868 {
1869 return raw_notifier_chain_register(&nmi_chain, nb);
1870 }
1871
nmi_exception_handler(struct pt_regs * regs)1872 void __noreturn nmi_exception_handler(struct pt_regs *regs)
1873 {
1874 char str[100];
1875
1876 nmi_enter();
1877 raw_notifier_call_chain(&nmi_chain, 0, regs);
1878 bust_spinlocks(1);
1879 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1880 smp_processor_id(), regs->cp0_epc);
1881 regs->cp0_epc = read_c0_errorepc();
1882 die(str, regs);
1883 nmi_exit();
1884 }
1885
1886 #define VECTORSPACING 0x100 /* for EI/VI mode */
1887
1888 unsigned long ebase;
1889 unsigned long exception_handlers[32];
1890 unsigned long vi_handlers[64];
1891
set_except_vector(int n,void * addr)1892 void __init *set_except_vector(int n, void *addr)
1893 {
1894 unsigned long handler = (unsigned long) addr;
1895 unsigned long old_handler;
1896
1897 #ifdef CONFIG_CPU_MICROMIPS
1898 /*
1899 * Only the TLB handlers are cache aligned with an even
1900 * address. All other handlers are on an odd address and
1901 * require no modification. Otherwise, MIPS32 mode will
1902 * be entered when handling any TLB exceptions. That
1903 * would be bad...since we must stay in microMIPS mode.
1904 */
1905 if (!(handler & 0x1))
1906 handler |= 1;
1907 #endif
1908 old_handler = xchg(&exception_handlers[n], handler);
1909
1910 if (n == 0 && cpu_has_divec) {
1911 #ifdef CONFIG_CPU_MICROMIPS
1912 unsigned long jump_mask = ~((1 << 27) - 1);
1913 #else
1914 unsigned long jump_mask = ~((1 << 28) - 1);
1915 #endif
1916 u32 *buf = (u32 *)(ebase + 0x200);
1917 unsigned int k0 = 26;
1918 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1919 uasm_i_j(&buf, handler & ~jump_mask);
1920 uasm_i_nop(&buf);
1921 } else {
1922 UASM_i_LA(&buf, k0, handler);
1923 uasm_i_jr(&buf, k0);
1924 uasm_i_nop(&buf);
1925 }
1926 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1927 }
1928 return (void *)old_handler;
1929 }
1930
do_default_vi(void)1931 static void do_default_vi(void)
1932 {
1933 show_regs(get_irq_regs());
1934 panic("Caught unexpected vectored interrupt.");
1935 }
1936
set_vi_srs_handler(int n,vi_handler_t addr,int srs)1937 static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1938 {
1939 unsigned long handler;
1940 unsigned long old_handler = vi_handlers[n];
1941 int srssets = current_cpu_data.srsets;
1942 u16 *h;
1943 unsigned char *b;
1944
1945 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1946
1947 if (addr == NULL) {
1948 handler = (unsigned long) do_default_vi;
1949 srs = 0;
1950 } else
1951 handler = (unsigned long) addr;
1952 vi_handlers[n] = handler;
1953
1954 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1955
1956 if (srs >= srssets)
1957 panic("Shadow register set %d not supported", srs);
1958
1959 if (cpu_has_veic) {
1960 if (board_bind_eic_interrupt)
1961 board_bind_eic_interrupt(n, srs);
1962 } else if (cpu_has_vint) {
1963 /* SRSMap is only defined if shadow sets are implemented */
1964 if (srssets > 1)
1965 change_c0_srsmap(0xf << n*4, srs << n*4);
1966 }
1967
1968 if (srs == 0) {
1969 /*
1970 * If no shadow set is selected then use the default handler
1971 * that does normal register saving and standard interrupt exit
1972 */
1973 extern char except_vec_vi, except_vec_vi_lui;
1974 extern char except_vec_vi_ori, except_vec_vi_end;
1975 extern char rollback_except_vec_vi;
1976 char *vec_start = using_rollback_handler() ?
1977 &rollback_except_vec_vi : &except_vec_vi;
1978 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1979 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1980 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1981 #else
1982 const int lui_offset = &except_vec_vi_lui - vec_start;
1983 const int ori_offset = &except_vec_vi_ori - vec_start;
1984 #endif
1985 const int handler_len = &except_vec_vi_end - vec_start;
1986
1987 if (handler_len > VECTORSPACING) {
1988 /*
1989 * Sigh... panicing won't help as the console
1990 * is probably not configured :(
1991 */
1992 panic("VECTORSPACING too small");
1993 }
1994
1995 set_handler(((unsigned long)b - ebase), vec_start,
1996 #ifdef CONFIG_CPU_MICROMIPS
1997 (handler_len - 1));
1998 #else
1999 handler_len);
2000 #endif
2001 h = (u16 *)(b + lui_offset);
2002 *h = (handler >> 16) & 0xffff;
2003 h = (u16 *)(b + ori_offset);
2004 *h = (handler & 0xffff);
2005 local_flush_icache_range((unsigned long)b,
2006 (unsigned long)(b+handler_len));
2007 }
2008 else {
2009 /*
2010 * In other cases jump directly to the interrupt handler. It
2011 * is the handler's responsibility to save registers if required
2012 * (eg hi/lo) and return from the exception using "eret".
2013 */
2014 u32 insn;
2015
2016 h = (u16 *)b;
2017 /* j handler */
2018 #ifdef CONFIG_CPU_MICROMIPS
2019 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2020 #else
2021 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2022 #endif
2023 h[0] = (insn >> 16) & 0xffff;
2024 h[1] = insn & 0xffff;
2025 h[2] = 0;
2026 h[3] = 0;
2027 local_flush_icache_range((unsigned long)b,
2028 (unsigned long)(b+8));
2029 }
2030
2031 return (void *)old_handler;
2032 }
2033
set_vi_handler(int n,vi_handler_t addr)2034 void *set_vi_handler(int n, vi_handler_t addr)
2035 {
2036 return set_vi_srs_handler(n, addr, 0);
2037 }
2038
2039 extern void tlb_init(void);
2040
2041 /*
2042 * Timer interrupt
2043 */
2044 int cp0_compare_irq;
2045 EXPORT_SYMBOL_GPL(cp0_compare_irq);
2046 int cp0_compare_irq_shift;
2047
2048 /*
2049 * Performance counter IRQ or -1 if shared with timer
2050 */
2051 int cp0_perfcount_irq;
2052 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2053
2054 /*
2055 * Fast debug channel IRQ or -1 if not present
2056 */
2057 int cp0_fdc_irq;
2058 EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2059
2060 static int noulri;
2061
ulri_disable(char * s)2062 static int __init ulri_disable(char *s)
2063 {
2064 pr_info("Disabling ulri\n");
2065 noulri = 1;
2066
2067 return 1;
2068 }
2069 __setup("noulri", ulri_disable);
2070
2071 /* configure STATUS register */
configure_status(void)2072 static void configure_status(void)
2073 {
2074 /*
2075 * Disable coprocessors and select 32-bit or 64-bit addressing
2076 * and the 16/32 or 32/32 FPR register model. Reset the BEV
2077 * flag that some firmware may have left set and the TS bit (for
2078 * IP27). Set XX for ISA IV code to work.
2079 */
2080 unsigned int status_set = ST0_CU0;
2081 #ifdef CONFIG_64BIT
2082 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2083 #endif
2084 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2085 status_set |= ST0_XX;
2086 if (cpu_has_dsp)
2087 status_set |= ST0_MX;
2088
2089 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2090 status_set);
2091 back_to_back_c0_hazard();
2092 }
2093
2094 /* configure HWRENA register */
configure_hwrena(void)2095 static void configure_hwrena(void)
2096 {
2097 unsigned int hwrena = cpu_hwrena_impl_bits;
2098
2099 if (cpu_has_mips_r2_r6)
2100 hwrena |= 0x0000000f;
2101
2102 if (!noulri && cpu_has_userlocal)
2103 hwrena |= (1 << 29);
2104
2105 if (hwrena)
2106 write_c0_hwrena(hwrena);
2107 }
2108
configure_exception_vector(void)2109 static void configure_exception_vector(void)
2110 {
2111 if (cpu_has_veic || cpu_has_vint) {
2112 unsigned long sr = set_c0_status(ST0_BEV);
2113 write_c0_ebase(ebase);
2114 write_c0_status(sr);
2115 /* Setting vector spacing enables EI/VI mode */
2116 change_c0_intctl(0x3e0, VECTORSPACING);
2117 }
2118 if (cpu_has_divec) {
2119 if (cpu_has_mipsmt) {
2120 unsigned int vpflags = dvpe();
2121 set_c0_cause(CAUSEF_IV);
2122 evpe(vpflags);
2123 } else
2124 set_c0_cause(CAUSEF_IV);
2125 }
2126 }
2127
per_cpu_trap_init(bool is_boot_cpu)2128 void per_cpu_trap_init(bool is_boot_cpu)
2129 {
2130 unsigned int cpu = smp_processor_id();
2131
2132 configure_status();
2133 configure_hwrena();
2134
2135 configure_exception_vector();
2136
2137 /*
2138 * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2139 *
2140 * o read IntCtl.IPTI to determine the timer interrupt
2141 * o read IntCtl.IPPCI to determine the performance counter interrupt
2142 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2143 */
2144 if (cpu_has_mips_r2_r6) {
2145 /*
2146 * We shouldn't trust a secondary core has a sane EBASE register
2147 * so use the one calculated by the boot CPU.
2148 */
2149 if (!is_boot_cpu)
2150 write_c0_ebase(ebase);
2151
2152 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2153 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2154 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2155 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2156 if (!cp0_fdc_irq)
2157 cp0_fdc_irq = -1;
2158
2159 } else {
2160 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2161 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2162 cp0_perfcount_irq = -1;
2163 cp0_fdc_irq = -1;
2164 }
2165
2166 if (!cpu_data[cpu].asid_cache)
2167 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2168
2169 atomic_inc(&init_mm.mm_count);
2170 current->active_mm = &init_mm;
2171 BUG_ON(current->mm);
2172 enter_lazy_tlb(&init_mm, current);
2173
2174 /* Boot CPU's cache setup in setup_arch(). */
2175 if (!is_boot_cpu)
2176 cpu_cache_init();
2177 tlb_init();
2178 TLBMISS_HANDLER_SETUP();
2179 }
2180
2181 /* Install CPU exception handler */
set_handler(unsigned long offset,void * addr,unsigned long size)2182 void set_handler(unsigned long offset, void *addr, unsigned long size)
2183 {
2184 #ifdef CONFIG_CPU_MICROMIPS
2185 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2186 #else
2187 memcpy((void *)(ebase + offset), addr, size);
2188 #endif
2189 local_flush_icache_range(ebase + offset, ebase + offset + size);
2190 }
2191
2192 static char panic_null_cerr[] =
2193 "Trying to set NULL cache error exception handler";
2194
2195 /*
2196 * Install uncached CPU exception handler.
2197 * This is suitable only for the cache error exception which is the only
2198 * exception handler that is being run uncached.
2199 */
set_uncached_handler(unsigned long offset,void * addr,unsigned long size)2200 void set_uncached_handler(unsigned long offset, void *addr,
2201 unsigned long size)
2202 {
2203 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2204
2205 if (!addr)
2206 panic(panic_null_cerr);
2207
2208 memcpy((void *)(uncached_ebase + offset), addr, size);
2209 }
2210
2211 static int __initdata rdhwr_noopt;
set_rdhwr_noopt(char * str)2212 static int __init set_rdhwr_noopt(char *str)
2213 {
2214 rdhwr_noopt = 1;
2215 return 1;
2216 }
2217
2218 __setup("rdhwr_noopt", set_rdhwr_noopt);
2219
trap_init(void)2220 void __init trap_init(void)
2221 {
2222 extern char except_vec3_generic;
2223 extern char except_vec4;
2224 extern char except_vec3_r4000;
2225 unsigned long i;
2226
2227 check_wait();
2228
2229 if (cpu_has_veic || cpu_has_vint) {
2230 unsigned long size = 0x200 + VECTORSPACING*64;
2231 ebase = (unsigned long)
2232 __alloc_bootmem(size, 1 << fls(size), 0);
2233 } else {
2234 ebase = CAC_BASE;
2235
2236 if (cpu_has_mips_r2_r6)
2237 ebase += (read_c0_ebase() & 0x3ffff000);
2238 }
2239
2240 if (cpu_has_mmips) {
2241 unsigned int config3 = read_c0_config3();
2242
2243 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2244 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2245 else
2246 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2247 }
2248
2249 if (board_ebase_setup)
2250 board_ebase_setup();
2251 per_cpu_trap_init(true);
2252
2253 /*
2254 * Copy the generic exception handlers to their final destination.
2255 * This will be overridden later as suitable for a particular
2256 * configuration.
2257 */
2258 set_handler(0x180, &except_vec3_generic, 0x80);
2259
2260 /*
2261 * Setup default vectors
2262 */
2263 for (i = 0; i <= 31; i++)
2264 set_except_vector(i, handle_reserved);
2265
2266 /*
2267 * Copy the EJTAG debug exception vector handler code to it's final
2268 * destination.
2269 */
2270 if (cpu_has_ejtag && board_ejtag_handler_setup)
2271 board_ejtag_handler_setup();
2272
2273 /*
2274 * Only some CPUs have the watch exceptions.
2275 */
2276 if (cpu_has_watch)
2277 set_except_vector(23, handle_watch);
2278
2279 /*
2280 * Initialise interrupt handlers
2281 */
2282 if (cpu_has_veic || cpu_has_vint) {
2283 int nvec = cpu_has_veic ? 64 : 8;
2284 for (i = 0; i < nvec; i++)
2285 set_vi_handler(i, NULL);
2286 }
2287 else if (cpu_has_divec)
2288 set_handler(0x200, &except_vec4, 0x8);
2289
2290 /*
2291 * Some CPUs can enable/disable for cache parity detection, but does
2292 * it different ways.
2293 */
2294 parity_protection_init();
2295
2296 /*
2297 * The Data Bus Errors / Instruction Bus Errors are signaled
2298 * by external hardware. Therefore these two exceptions
2299 * may have board specific handlers.
2300 */
2301 if (board_be_init)
2302 board_be_init();
2303
2304 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2305 : handle_int);
2306 set_except_vector(1, handle_tlbm);
2307 set_except_vector(2, handle_tlbl);
2308 set_except_vector(3, handle_tlbs);
2309
2310 set_except_vector(4, handle_adel);
2311 set_except_vector(5, handle_ades);
2312
2313 set_except_vector(6, handle_ibe);
2314 set_except_vector(7, handle_dbe);
2315
2316 set_except_vector(8, handle_sys);
2317 set_except_vector(9, handle_bp);
2318 set_except_vector(10, rdhwr_noopt ? handle_ri :
2319 (cpu_has_vtag_icache ?
2320 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2321 set_except_vector(11, handle_cpu);
2322 set_except_vector(12, handle_ov);
2323 set_except_vector(13, handle_tr);
2324 set_except_vector(14, handle_msa_fpe);
2325
2326 if (current_cpu_type() == CPU_R6000 ||
2327 current_cpu_type() == CPU_R6000A) {
2328 /*
2329 * The R6000 is the only R-series CPU that features a machine
2330 * check exception (similar to the R4000 cache error) and
2331 * unaligned ldc1/sdc1 exception. The handlers have not been
2332 * written yet. Well, anyway there is no R6000 machine on the
2333 * current list of targets for Linux/MIPS.
2334 * (Duh, crap, there is someone with a triple R6k machine)
2335 */
2336 //set_except_vector(14, handle_mc);
2337 //set_except_vector(15, handle_ndc);
2338 }
2339
2340
2341 if (board_nmi_handler_setup)
2342 board_nmi_handler_setup();
2343
2344 if (cpu_has_fpu && !cpu_has_nofpuex)
2345 set_except_vector(15, handle_fpe);
2346
2347 set_except_vector(16, handle_ftlb);
2348
2349 if (cpu_has_rixiex) {
2350 set_except_vector(19, tlb_do_page_fault_0);
2351 set_except_vector(20, tlb_do_page_fault_0);
2352 }
2353
2354 set_except_vector(21, handle_msa);
2355 set_except_vector(22, handle_mdmx);
2356
2357 if (cpu_has_mcheck)
2358 set_except_vector(24, handle_mcheck);
2359
2360 if (cpu_has_mipsmt)
2361 set_except_vector(25, handle_mt);
2362
2363 set_except_vector(26, handle_dsp);
2364
2365 if (board_cache_error_setup)
2366 board_cache_error_setup();
2367
2368 if (cpu_has_vce)
2369 /* Special exception: R4[04]00 uses also the divec space. */
2370 set_handler(0x180, &except_vec3_r4000, 0x100);
2371 else if (cpu_has_4kex)
2372 set_handler(0x180, &except_vec3_generic, 0x80);
2373 else
2374 set_handler(0x080, &except_vec3_generic, 0x80);
2375
2376 local_flush_icache_range(ebase, ebase + 0x400);
2377
2378 sort_extable(__start___dbe_table, __stop___dbe_table);
2379
2380 cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
2381 }
2382
trap_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)2383 static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2384 void *v)
2385 {
2386 switch (cmd) {
2387 case CPU_PM_ENTER_FAILED:
2388 case CPU_PM_EXIT:
2389 configure_status();
2390 configure_hwrena();
2391 configure_exception_vector();
2392
2393 /* Restore register with CPU number for TLB handlers */
2394 TLBMISS_HANDLER_RESTORE();
2395
2396 break;
2397 }
2398
2399 return NOTIFY_OK;
2400 }
2401
2402 static struct notifier_block trap_pm_notifier_block = {
2403 .notifier_call = trap_pm_notifier,
2404 };
2405
trap_pm_init(void)2406 static int __init trap_pm_init(void)
2407 {
2408 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2409 }
2410 arch_initcall(trap_pm_init);
2411