1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 *
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras (paulus@samba.org)
8 */
9
10 /*
11 * This file handles the architecture-dependent parts of hardware exceptions
12 */
13
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/sched/debug.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/pkeys.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/user.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/extable.h>
27 #include <linux/module.h> /* print_modules */
28 #include <linux/prctl.h>
29 #include <linux/delay.h>
30 #include <linux/kprobes.h>
31 #include <linux/kexec.h>
32 #include <linux/backlight.h>
33 #include <linux/bug.h>
34 #include <linux/kdebug.h>
35 #include <linux/ratelimit.h>
36 #include <linux/context_tracking.h>
37 #include <linux/smp.h>
38 #include <linux/console.h>
39 #include <linux/kmsg_dump.h>
40
41 #include <asm/emulated_ops.h>
42 #include <linux/uaccess.h>
43 #include <asm/debugfs.h>
44 #include <asm/io.h>
45 #include <asm/machdep.h>
46 #include <asm/rtas.h>
47 #include <asm/pmc.h>
48 #include <asm/reg.h>
49 #ifdef CONFIG_PMAC_BACKLIGHT
50 #include <asm/backlight.h>
51 #endif
52 #ifdef CONFIG_PPC64
53 #include <asm/firmware.h>
54 #include <asm/processor.h>
55 #include <asm/tm.h>
56 #endif
57 #include <asm/kexec.h>
58 #include <asm/ppc-opcode.h>
59 #include <asm/rio.h>
60 #include <asm/fadump.h>
61 #include <asm/switch_to.h>
62 #include <asm/tm.h>
63 #include <asm/debug.h>
64 #include <asm/asm-prototypes.h>
65 #include <asm/hmi.h>
66 #include <sysdev/fsl_pci.h>
67 #include <asm/kprobes.h>
68 #include <asm/stacktrace.h>
69 #include <asm/nmi.h>
70
71 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
72 int (*__debugger)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
76 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
78 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
79
80 EXPORT_SYMBOL(__debugger);
81 EXPORT_SYMBOL(__debugger_ipi);
82 EXPORT_SYMBOL(__debugger_bpt);
83 EXPORT_SYMBOL(__debugger_sstep);
84 EXPORT_SYMBOL(__debugger_iabr_match);
85 EXPORT_SYMBOL(__debugger_break_match);
86 EXPORT_SYMBOL(__debugger_fault_handler);
87 #endif
88
89 /* Transactional Memory trap debug */
90 #ifdef TM_DEBUG_SW
91 #define TM_DEBUG(x...) printk(KERN_INFO x)
92 #else
93 #define TM_DEBUG(x...) do { } while(0)
94 #endif
95
signame(int signr)96 static const char *signame(int signr)
97 {
98 switch (signr) {
99 case SIGBUS: return "bus error";
100 case SIGFPE: return "floating point exception";
101 case SIGILL: return "illegal instruction";
102 case SIGSEGV: return "segfault";
103 case SIGTRAP: return "unhandled trap";
104 }
105
106 return "unknown signal";
107 }
108
109 /*
110 * Trap & Exception support
111 */
112
113 #ifdef CONFIG_PMAC_BACKLIGHT
pmac_backlight_unblank(void)114 static void pmac_backlight_unblank(void)
115 {
116 mutex_lock(&pmac_backlight_mutex);
117 if (pmac_backlight) {
118 struct backlight_properties *props;
119
120 props = &pmac_backlight->props;
121 props->brightness = props->max_brightness;
122 props->power = FB_BLANK_UNBLANK;
123 backlight_update_status(pmac_backlight);
124 }
125 mutex_unlock(&pmac_backlight_mutex);
126 }
127 #else
pmac_backlight_unblank(void)128 static inline void pmac_backlight_unblank(void) { }
129 #endif
130
131 /*
132 * If oops/die is expected to crash the machine, return true here.
133 *
134 * This should not be expected to be 100% accurate, there may be
135 * notifiers registered or other unexpected conditions that may bring
136 * down the kernel. Or if the current process in the kernel is holding
137 * locks or has other critical state, the kernel may become effectively
138 * unusable anyway.
139 */
die_will_crash(void)140 bool die_will_crash(void)
141 {
142 if (should_fadump_crash())
143 return true;
144 if (kexec_should_crash(current))
145 return true;
146 if (in_interrupt() || panic_on_oops ||
147 !current->pid || is_global_init(current))
148 return true;
149
150 return false;
151 }
152
153 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
154 static int die_owner = -1;
155 static unsigned int die_nest_count;
156 static int die_counter;
157
panic_flush_kmsg_start(void)158 extern void panic_flush_kmsg_start(void)
159 {
160 /*
161 * These are mostly taken from kernel/panic.c, but tries to do
162 * relatively minimal work. Don't use delay functions (TB may
163 * be broken), don't crash dump (need to set a firmware log),
164 * don't run notifiers. We do want to get some information to
165 * Linux console.
166 */
167 console_verbose();
168 bust_spinlocks(1);
169 }
170
panic_flush_kmsg_end(void)171 extern void panic_flush_kmsg_end(void)
172 {
173 printk_safe_flush_on_panic();
174 kmsg_dump(KMSG_DUMP_PANIC);
175 bust_spinlocks(0);
176 debug_locks_off();
177 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
178 }
179
oops_begin(struct pt_regs * regs)180 static unsigned long oops_begin(struct pt_regs *regs)
181 {
182 int cpu;
183 unsigned long flags;
184
185 oops_enter();
186
187 /* racy, but better than risking deadlock. */
188 raw_local_irq_save(flags);
189 cpu = smp_processor_id();
190 if (!arch_spin_trylock(&die_lock)) {
191 if (cpu == die_owner)
192 /* nested oops. should stop eventually */;
193 else
194 arch_spin_lock(&die_lock);
195 }
196 die_nest_count++;
197 die_owner = cpu;
198 console_verbose();
199 bust_spinlocks(1);
200 if (machine_is(powermac))
201 pmac_backlight_unblank();
202 return flags;
203 }
204 NOKPROBE_SYMBOL(oops_begin);
205
oops_end(unsigned long flags,struct pt_regs * regs,int signr)206 static void oops_end(unsigned long flags, struct pt_regs *regs,
207 int signr)
208 {
209 bust_spinlocks(0);
210 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
211 die_nest_count--;
212 oops_exit();
213 printk("\n");
214 if (!die_nest_count) {
215 /* Nest count reaches zero, release the lock. */
216 die_owner = -1;
217 arch_spin_unlock(&die_lock);
218 }
219 raw_local_irq_restore(flags);
220
221 /*
222 * system_reset_excption handles debugger, crash dump, panic, for 0x100
223 */
224 if (TRAP(regs) == 0x100)
225 return;
226
227 crash_fadump(regs, "die oops");
228
229 if (kexec_should_crash(current))
230 crash_kexec(regs);
231
232 if (!signr)
233 return;
234
235 /*
236 * While our oops output is serialised by a spinlock, output
237 * from panic() called below can race and corrupt it. If we
238 * know we are going to panic, delay for 1 second so we have a
239 * chance to get clean backtraces from all CPUs that are oopsing.
240 */
241 if (in_interrupt() || panic_on_oops || !current->pid ||
242 is_global_init(current)) {
243 mdelay(MSEC_PER_SEC);
244 }
245
246 if (panic_on_oops)
247 panic("Fatal exception");
248 make_task_dead(signr);
249 }
250 NOKPROBE_SYMBOL(oops_end);
251
get_mmu_str(void)252 static char *get_mmu_str(void)
253 {
254 if (early_radix_enabled())
255 return " MMU=Radix";
256 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
257 return " MMU=Hash";
258 return "";
259 }
260
__die(const char * str,struct pt_regs * regs,long err)261 static int __die(const char *str, struct pt_regs *regs, long err)
262 {
263 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
264
265 printk("%s PAGE_SIZE=%luK%s%s%s%s%s%s %s\n",
266 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
267 PAGE_SIZE / 1024, get_mmu_str(),
268 IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
269 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
270 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
271 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
272 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
273 ppc_md.name ? ppc_md.name : "");
274
275 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
276 return 1;
277
278 print_modules();
279 show_regs(regs);
280
281 return 0;
282 }
283 NOKPROBE_SYMBOL(__die);
284
die(const char * str,struct pt_regs * regs,long err)285 void die(const char *str, struct pt_regs *regs, long err)
286 {
287 unsigned long flags;
288
289 /*
290 * system_reset_excption handles debugger, crash dump, panic, for 0x100
291 */
292 if (TRAP(regs) != 0x100) {
293 if (debugger(regs))
294 return;
295 }
296
297 flags = oops_begin(regs);
298 if (__die(str, regs, err))
299 err = 0;
300 oops_end(flags, regs, err);
301 }
302 NOKPROBE_SYMBOL(die);
303
user_single_step_report(struct pt_regs * regs)304 void user_single_step_report(struct pt_regs *regs)
305 {
306 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
307 }
308
show_signal_msg(int signr,struct pt_regs * regs,int code,unsigned long addr)309 static void show_signal_msg(int signr, struct pt_regs *regs, int code,
310 unsigned long addr)
311 {
312 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
313 DEFAULT_RATELIMIT_BURST);
314
315 if (!show_unhandled_signals)
316 return;
317
318 if (!unhandled_signal(current, signr))
319 return;
320
321 if (!__ratelimit(&rs))
322 return;
323
324 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
325 current->comm, current->pid, signame(signr), signr,
326 addr, regs->nip, regs->link, code);
327
328 print_vma_addr(KERN_CONT " in ", regs->nip);
329
330 pr_cont("\n");
331
332 show_user_instructions(regs);
333 }
334
exception_common(int signr,struct pt_regs * regs,int code,unsigned long addr)335 static bool exception_common(int signr, struct pt_regs *regs, int code,
336 unsigned long addr)
337 {
338 if (!user_mode(regs)) {
339 die("Exception in kernel mode", regs, signr);
340 return false;
341 }
342
343 show_signal_msg(signr, regs, code, addr);
344
345 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
346 local_irq_enable();
347
348 current->thread.trap_nr = code;
349
350 /*
351 * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
352 * to capture the content, if the task gets killed.
353 */
354 thread_pkey_regs_save(¤t->thread);
355
356 return true;
357 }
358
_exception_pkey(struct pt_regs * regs,unsigned long addr,int key)359 void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
360 {
361 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
362 return;
363
364 force_sig_pkuerr((void __user *) addr, key);
365 }
366
_exception(int signr,struct pt_regs * regs,int code,unsigned long addr)367 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
368 {
369 if (!exception_common(signr, regs, code, addr))
370 return;
371
372 force_sig_fault(signr, code, (void __user *)addr);
373 }
374
375 /*
376 * The interrupt architecture has a quirk in that the HV interrupts excluding
377 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
378 * that an interrupt handler must do is save off a GPR into a scratch register,
379 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
380 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
381 * that it is non-reentrant, which leads to random data corruption.
382 *
383 * The solution is for NMI interrupts in HV mode to check if they originated
384 * from these critical HV interrupt regions. If so, then mark them not
385 * recoverable.
386 *
387 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
388 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
389 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
390 * that would work. However any other guest OS that may have the SPRG live
391 * and MSR[RI]=1 could encounter silent corruption.
392 *
393 * Builds that do not support KVM could take this second option to increase
394 * the recoverability of NMIs.
395 */
hv_nmi_check_nonrecoverable(struct pt_regs * regs)396 void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
397 {
398 #ifdef CONFIG_PPC_POWERNV
399 unsigned long kbase = (unsigned long)_stext;
400 unsigned long nip = regs->nip;
401
402 if (!(regs->msr & MSR_RI))
403 return;
404 if (!(regs->msr & MSR_HV))
405 return;
406 if (regs->msr & MSR_PR)
407 return;
408
409 /*
410 * Now test if the interrupt has hit a range that may be using
411 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
412 * problem ranges all run un-relocated. Test real and virt modes
413 * at the same time by droping the high bit of the nip (virt mode
414 * entry points still have the +0x4000 offset).
415 */
416 nip &= ~0xc000000000000000ULL;
417 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
418 goto nonrecoverable;
419 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
420 goto nonrecoverable;
421 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
422 goto nonrecoverable;
423 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
424 goto nonrecoverable;
425
426 /* Trampoline code runs un-relocated so subtract kbase. */
427 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
428 nip < (unsigned long)(end_real_trampolines - kbase))
429 goto nonrecoverable;
430 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
431 nip < (unsigned long)(end_virt_trampolines - kbase))
432 goto nonrecoverable;
433 return;
434
435 nonrecoverable:
436 regs->msr &= ~MSR_RI;
437 #endif
438 }
439
system_reset_exception(struct pt_regs * regs)440 void system_reset_exception(struct pt_regs *regs)
441 {
442 unsigned long hsrr0, hsrr1;
443 bool saved_hsrrs = false;
444 u8 ftrace_enabled = this_cpu_get_ftrace_enabled();
445
446 this_cpu_set_ftrace_enabled(0);
447
448 nmi_enter();
449
450 /*
451 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
452 * The system reset interrupt itself may clobber HSRRs (e.g., to call
453 * OPAL), so save them here and restore them before returning.
454 *
455 * Machine checks don't need to save HSRRs, as the real mode handler
456 * is careful to avoid them, and the regular handler is not delivered
457 * as an NMI.
458 */
459 if (cpu_has_feature(CPU_FTR_HVMODE)) {
460 hsrr0 = mfspr(SPRN_HSRR0);
461 hsrr1 = mfspr(SPRN_HSRR1);
462 saved_hsrrs = true;
463 }
464
465 hv_nmi_check_nonrecoverable(regs);
466
467 __this_cpu_inc(irq_stat.sreset_irqs);
468
469 /* See if any machine dependent calls */
470 if (ppc_md.system_reset_exception) {
471 if (ppc_md.system_reset_exception(regs))
472 goto out;
473 }
474
475 if (debugger(regs))
476 goto out;
477
478 kmsg_dump(KMSG_DUMP_OOPS);
479 /*
480 * A system reset is a request to dump, so we always send
481 * it through the crashdump code (if fadump or kdump are
482 * registered).
483 */
484 crash_fadump(regs, "System Reset");
485
486 crash_kexec(regs);
487
488 /*
489 * We aren't the primary crash CPU. We need to send it
490 * to a holding pattern to avoid it ending up in the panic
491 * code.
492 */
493 crash_kexec_secondary(regs);
494
495 /*
496 * No debugger or crash dump registered, print logs then
497 * panic.
498 */
499 die("System Reset", regs, SIGABRT);
500
501 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
502 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
503 nmi_panic(regs, "System Reset");
504
505 out:
506 #ifdef CONFIG_PPC_BOOK3S_64
507 BUG_ON(get_paca()->in_nmi == 0);
508 if (get_paca()->in_nmi > 1)
509 die("Unrecoverable nested System Reset", regs, SIGABRT);
510 #endif
511 /* Must die if the interrupt is not recoverable */
512 if (!(regs->msr & MSR_RI)) {
513 /* For the reason explained in die_mce, nmi_exit before die */
514 nmi_exit();
515 die("Unrecoverable System Reset", regs, SIGABRT);
516 }
517
518 if (saved_hsrrs) {
519 mtspr(SPRN_HSRR0, hsrr0);
520 mtspr(SPRN_HSRR1, hsrr1);
521 }
522
523 nmi_exit();
524
525 this_cpu_set_ftrace_enabled(ftrace_enabled);
526
527 /* What should we do here? We could issue a shutdown or hard reset. */
528 }
529
530 /*
531 * I/O accesses can cause machine checks on powermacs.
532 * Check if the NIP corresponds to the address of a sync
533 * instruction for which there is an entry in the exception
534 * table.
535 * -- paulus.
536 */
check_io_access(struct pt_regs * regs)537 static inline int check_io_access(struct pt_regs *regs)
538 {
539 #ifdef CONFIG_PPC32
540 unsigned long msr = regs->msr;
541 const struct exception_table_entry *entry;
542 unsigned int *nip = (unsigned int *)regs->nip;
543
544 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
545 && (entry = search_exception_tables(regs->nip)) != NULL) {
546 /*
547 * Check that it's a sync instruction, or somewhere
548 * in the twi; isync; nop sequence that inb/inw/inl uses.
549 * As the address is in the exception table
550 * we should be able to read the instr there.
551 * For the debug message, we look at the preceding
552 * load or store.
553 */
554 if (*nip == PPC_INST_NOP)
555 nip -= 2;
556 else if (*nip == PPC_INST_ISYNC)
557 --nip;
558 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
559 unsigned int rb;
560
561 --nip;
562 rb = (*nip >> 11) & 0x1f;
563 printk(KERN_DEBUG "%s bad port %lx at %p\n",
564 (*nip & 0x100)? "OUT to": "IN from",
565 regs->gpr[rb] - _IO_BASE, nip);
566 regs->msr |= MSR_RI;
567 regs->nip = extable_fixup(entry);
568 return 1;
569 }
570 }
571 #endif /* CONFIG_PPC32 */
572 return 0;
573 }
574
575 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
576 /* On 4xx, the reason for the machine check or program exception
577 is in the ESR. */
578 #define get_reason(regs) ((regs)->dsisr)
579 #define REASON_FP ESR_FP
580 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
581 #define REASON_PRIVILEGED ESR_PPR
582 #define REASON_TRAP ESR_PTR
583 #define REASON_PREFIXED 0
584 #define REASON_BOUNDARY 0
585
586 /* single-step stuff */
587 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
588 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
589 #define clear_br_trace(regs) do {} while(0)
590 #else
591 /* On non-4xx, the reason for the machine check or program
592 exception is in the MSR. */
593 #define get_reason(regs) ((regs)->msr)
594 #define REASON_TM SRR1_PROGTM
595 #define REASON_FP SRR1_PROGFPE
596 #define REASON_ILLEGAL SRR1_PROGILL
597 #define REASON_PRIVILEGED SRR1_PROGPRIV
598 #define REASON_TRAP SRR1_PROGTRAP
599 #define REASON_PREFIXED SRR1_PREFIXED
600 #define REASON_BOUNDARY SRR1_BOUNDARY
601
602 #define single_stepping(regs) ((regs)->msr & MSR_SE)
603 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
604 #define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
605 #endif
606
607 #define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
608
609 #if defined(CONFIG_E500)
machine_check_e500mc(struct pt_regs * regs)610 int machine_check_e500mc(struct pt_regs *regs)
611 {
612 unsigned long mcsr = mfspr(SPRN_MCSR);
613 unsigned long pvr = mfspr(SPRN_PVR);
614 unsigned long reason = mcsr;
615 int recoverable = 1;
616
617 if (reason & MCSR_LD) {
618 recoverable = fsl_rio_mcheck_exception(regs);
619 if (recoverable == 1)
620 goto silent_out;
621 }
622
623 printk("Machine check in kernel mode.\n");
624 printk("Caused by (from MCSR=%lx): ", reason);
625
626 if (reason & MCSR_MCP)
627 pr_cont("Machine Check Signal\n");
628
629 if (reason & MCSR_ICPERR) {
630 pr_cont("Instruction Cache Parity Error\n");
631
632 /*
633 * This is recoverable by invalidating the i-cache.
634 */
635 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
636 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
637 ;
638
639 /*
640 * This will generally be accompanied by an instruction
641 * fetch error report -- only treat MCSR_IF as fatal
642 * if it wasn't due to an L1 parity error.
643 */
644 reason &= ~MCSR_IF;
645 }
646
647 if (reason & MCSR_DCPERR_MC) {
648 pr_cont("Data Cache Parity Error\n");
649
650 /*
651 * In write shadow mode we auto-recover from the error, but it
652 * may still get logged and cause a machine check. We should
653 * only treat the non-write shadow case as non-recoverable.
654 */
655 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
656 * is not implemented but L1 data cache always runs in write
657 * shadow mode. Hence on data cache parity errors HW will
658 * automatically invalidate the L1 Data Cache.
659 */
660 if (PVR_VER(pvr) != PVR_VER_E6500) {
661 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
662 recoverable = 0;
663 }
664 }
665
666 if (reason & MCSR_L2MMU_MHIT) {
667 pr_cont("Hit on multiple TLB entries\n");
668 recoverable = 0;
669 }
670
671 if (reason & MCSR_NMI)
672 pr_cont("Non-maskable interrupt\n");
673
674 if (reason & MCSR_IF) {
675 pr_cont("Instruction Fetch Error Report\n");
676 recoverable = 0;
677 }
678
679 if (reason & MCSR_LD) {
680 pr_cont("Load Error Report\n");
681 recoverable = 0;
682 }
683
684 if (reason & MCSR_ST) {
685 pr_cont("Store Error Report\n");
686 recoverable = 0;
687 }
688
689 if (reason & MCSR_LDG) {
690 pr_cont("Guarded Load Error Report\n");
691 recoverable = 0;
692 }
693
694 if (reason & MCSR_TLBSYNC)
695 pr_cont("Simultaneous tlbsync operations\n");
696
697 if (reason & MCSR_BSL2_ERR) {
698 pr_cont("Level 2 Cache Error\n");
699 recoverable = 0;
700 }
701
702 if (reason & MCSR_MAV) {
703 u64 addr;
704
705 addr = mfspr(SPRN_MCAR);
706 addr |= (u64)mfspr(SPRN_MCARU) << 32;
707
708 pr_cont("Machine Check %s Address: %#llx\n",
709 reason & MCSR_MEA ? "Effective" : "Physical", addr);
710 }
711
712 silent_out:
713 mtspr(SPRN_MCSR, mcsr);
714 return mfspr(SPRN_MCSR) == 0 && recoverable;
715 }
716
machine_check_e500(struct pt_regs * regs)717 int machine_check_e500(struct pt_regs *regs)
718 {
719 unsigned long reason = mfspr(SPRN_MCSR);
720
721 if (reason & MCSR_BUS_RBERR) {
722 if (fsl_rio_mcheck_exception(regs))
723 return 1;
724 if (fsl_pci_mcheck_exception(regs))
725 return 1;
726 }
727
728 printk("Machine check in kernel mode.\n");
729 printk("Caused by (from MCSR=%lx): ", reason);
730
731 if (reason & MCSR_MCP)
732 pr_cont("Machine Check Signal\n");
733 if (reason & MCSR_ICPERR)
734 pr_cont("Instruction Cache Parity Error\n");
735 if (reason & MCSR_DCP_PERR)
736 pr_cont("Data Cache Push Parity Error\n");
737 if (reason & MCSR_DCPERR)
738 pr_cont("Data Cache Parity Error\n");
739 if (reason & MCSR_BUS_IAERR)
740 pr_cont("Bus - Instruction Address Error\n");
741 if (reason & MCSR_BUS_RAERR)
742 pr_cont("Bus - Read Address Error\n");
743 if (reason & MCSR_BUS_WAERR)
744 pr_cont("Bus - Write Address Error\n");
745 if (reason & MCSR_BUS_IBERR)
746 pr_cont("Bus - Instruction Data Error\n");
747 if (reason & MCSR_BUS_RBERR)
748 pr_cont("Bus - Read Data Bus Error\n");
749 if (reason & MCSR_BUS_WBERR)
750 pr_cont("Bus - Write Data Bus Error\n");
751 if (reason & MCSR_BUS_IPERR)
752 pr_cont("Bus - Instruction Parity Error\n");
753 if (reason & MCSR_BUS_RPERR)
754 pr_cont("Bus - Read Parity Error\n");
755
756 return 0;
757 }
758
machine_check_generic(struct pt_regs * regs)759 int machine_check_generic(struct pt_regs *regs)
760 {
761 return 0;
762 }
763 #elif defined(CONFIG_E200)
machine_check_e200(struct pt_regs * regs)764 int machine_check_e200(struct pt_regs *regs)
765 {
766 unsigned long reason = mfspr(SPRN_MCSR);
767
768 printk("Machine check in kernel mode.\n");
769 printk("Caused by (from MCSR=%lx): ", reason);
770
771 if (reason & MCSR_MCP)
772 pr_cont("Machine Check Signal\n");
773 if (reason & MCSR_CP_PERR)
774 pr_cont("Cache Push Parity Error\n");
775 if (reason & MCSR_CPERR)
776 pr_cont("Cache Parity Error\n");
777 if (reason & MCSR_EXCP_ERR)
778 pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
779 if (reason & MCSR_BUS_IRERR)
780 pr_cont("Bus - Read Bus Error on instruction fetch\n");
781 if (reason & MCSR_BUS_DRERR)
782 pr_cont("Bus - Read Bus Error on data load\n");
783 if (reason & MCSR_BUS_WRERR)
784 pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
785
786 return 0;
787 }
788 #elif defined(CONFIG_PPC32)
machine_check_generic(struct pt_regs * regs)789 int machine_check_generic(struct pt_regs *regs)
790 {
791 unsigned long reason = regs->msr;
792
793 printk("Machine check in kernel mode.\n");
794 printk("Caused by (from SRR1=%lx): ", reason);
795 switch (reason & 0x601F0000) {
796 case 0x80000:
797 pr_cont("Machine check signal\n");
798 break;
799 case 0x40000:
800 case 0x140000: /* 7450 MSS error and TEA */
801 pr_cont("Transfer error ack signal\n");
802 break;
803 case 0x20000:
804 pr_cont("Data parity error signal\n");
805 break;
806 case 0x10000:
807 pr_cont("Address parity error signal\n");
808 break;
809 case 0x20000000:
810 pr_cont("L1 Data Cache error\n");
811 break;
812 case 0x40000000:
813 pr_cont("L1 Instruction Cache error\n");
814 break;
815 case 0x00100000:
816 pr_cont("L2 data cache parity error\n");
817 break;
818 default:
819 pr_cont("Unknown values in msr\n");
820 }
821 return 0;
822 }
823 #endif /* everything else */
824
machine_check_exception(struct pt_regs * regs)825 void machine_check_exception(struct pt_regs *regs)
826 {
827 int recover = 0;
828
829 /*
830 * BOOK3S_64 does not call this handler as a non-maskable interrupt
831 * (it uses its own early real-mode handler to handle the MCE proper
832 * and then raises irq_work to call this handler when interrupts are
833 * enabled).
834 *
835 * This is silly. The BOOK3S_64 should just call a different function
836 * rather than expecting semantics to magically change. Something
837 * like 'non_nmi_machine_check_exception()', perhaps?
838 */
839 const bool nmi = !IS_ENABLED(CONFIG_PPC_BOOK3S_64);
840
841 if (nmi) nmi_enter();
842
843 __this_cpu_inc(irq_stat.mce_exceptions);
844
845 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
846
847 /* See if any machine dependent calls. In theory, we would want
848 * to call the CPU first, and call the ppc_md. one if the CPU
849 * one returns a positive number. However there is existing code
850 * that assumes the board gets a first chance, so let's keep it
851 * that way for now and fix things later. --BenH.
852 */
853 if (ppc_md.machine_check_exception)
854 recover = ppc_md.machine_check_exception(regs);
855 else if (cur_cpu_spec->machine_check)
856 recover = cur_cpu_spec->machine_check(regs);
857
858 if (recover > 0)
859 goto bail;
860
861 if (debugger_fault_handler(regs))
862 goto bail;
863
864 if (check_io_access(regs))
865 goto bail;
866
867 if (nmi) nmi_exit();
868
869 die("Machine check", regs, SIGBUS);
870
871 /* Must die if the interrupt is not recoverable */
872 if (!(regs->msr & MSR_RI))
873 die("Unrecoverable Machine check", regs, SIGBUS);
874
875 return;
876
877 bail:
878 if (nmi) nmi_exit();
879 }
880
SMIException(struct pt_regs * regs)881 void SMIException(struct pt_regs *regs)
882 {
883 die("System Management Interrupt", regs, SIGABRT);
884 }
885
886 #ifdef CONFIG_VSX
p9_hmi_special_emu(struct pt_regs * regs)887 static void p9_hmi_special_emu(struct pt_regs *regs)
888 {
889 unsigned int ra, rb, t, i, sel, instr, rc;
890 const void __user *addr;
891 u8 vbuf[16] __aligned(16), *vdst;
892 unsigned long ea, msr, msr_mask;
893 bool swap;
894
895 if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
896 return;
897
898 /*
899 * lxvb16x opcode: 0x7c0006d8
900 * lxvd2x opcode: 0x7c000698
901 * lxvh8x opcode: 0x7c000658
902 * lxvw4x opcode: 0x7c000618
903 */
904 if ((instr & 0xfc00073e) != 0x7c000618) {
905 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
906 " instr=%08x\n",
907 smp_processor_id(), current->comm, current->pid,
908 regs->nip, instr);
909 return;
910 }
911
912 /* Grab vector registers into the task struct */
913 msr = regs->msr; /* Grab msr before we flush the bits */
914 flush_vsx_to_thread(current);
915 enable_kernel_altivec();
916
917 /*
918 * Is userspace running with a different endian (this is rare but
919 * not impossible)
920 */
921 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
922
923 /* Decode the instruction */
924 ra = (instr >> 16) & 0x1f;
925 rb = (instr >> 11) & 0x1f;
926 t = (instr >> 21) & 0x1f;
927 if (instr & 1)
928 vdst = (u8 *)¤t->thread.vr_state.vr[t];
929 else
930 vdst = (u8 *)¤t->thread.fp_state.fpr[t][0];
931
932 /* Grab the vector address */
933 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
934 if (is_32bit_task())
935 ea &= 0xfffffffful;
936 addr = (__force const void __user *)ea;
937
938 /* Check it */
939 if (!access_ok(addr, 16)) {
940 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
941 " instr=%08x addr=%016lx\n",
942 smp_processor_id(), current->comm, current->pid,
943 regs->nip, instr, (unsigned long)addr);
944 return;
945 }
946
947 /* Read the vector */
948 rc = 0;
949 if ((unsigned long)addr & 0xfUL)
950 /* unaligned case */
951 rc = __copy_from_user_inatomic(vbuf, addr, 16);
952 else
953 __get_user_atomic_128_aligned(vbuf, addr, rc);
954 if (rc) {
955 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
956 " instr=%08x addr=%016lx\n",
957 smp_processor_id(), current->comm, current->pid,
958 regs->nip, instr, (unsigned long)addr);
959 return;
960 }
961
962 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
963 " instr=%08x addr=%016lx\n",
964 smp_processor_id(), current->comm, current->pid, regs->nip,
965 instr, (unsigned long) addr);
966
967 /* Grab instruction "selector" */
968 sel = (instr >> 6) & 3;
969
970 /*
971 * Check to make sure the facility is actually enabled. This
972 * could happen if we get a false positive hit.
973 *
974 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
975 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
976 */
977 msr_mask = MSR_VSX;
978 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
979 msr_mask = MSR_VEC;
980 if (!(msr & msr_mask)) {
981 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
982 " instr=%08x msr:%016lx\n",
983 smp_processor_id(), current->comm, current->pid,
984 regs->nip, instr, msr);
985 return;
986 }
987
988 /* Do logging here before we modify sel based on endian */
989 switch (sel) {
990 case 0: /* lxvw4x */
991 PPC_WARN_EMULATED(lxvw4x, regs);
992 break;
993 case 1: /* lxvh8x */
994 PPC_WARN_EMULATED(lxvh8x, regs);
995 break;
996 case 2: /* lxvd2x */
997 PPC_WARN_EMULATED(lxvd2x, regs);
998 break;
999 case 3: /* lxvb16x */
1000 PPC_WARN_EMULATED(lxvb16x, regs);
1001 break;
1002 }
1003
1004 #ifdef __LITTLE_ENDIAN__
1005 /*
1006 * An LE kernel stores the vector in the task struct as an LE
1007 * byte array (effectively swapping both the components and
1008 * the content of the components). Those instructions expect
1009 * the components to remain in ascending address order, so we
1010 * swap them back.
1011 *
1012 * If we are running a BE user space, the expectation is that
1013 * of a simple memcpy, so forcing the emulation to look like
1014 * a lxvb16x should do the trick.
1015 */
1016 if (swap)
1017 sel = 3;
1018
1019 switch (sel) {
1020 case 0: /* lxvw4x */
1021 for (i = 0; i < 4; i++)
1022 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1023 break;
1024 case 1: /* lxvh8x */
1025 for (i = 0; i < 8; i++)
1026 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1027 break;
1028 case 2: /* lxvd2x */
1029 for (i = 0; i < 2; i++)
1030 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1031 break;
1032 case 3: /* lxvb16x */
1033 for (i = 0; i < 16; i++)
1034 vdst[i] = vbuf[15-i];
1035 break;
1036 }
1037 #else /* __LITTLE_ENDIAN__ */
1038 /* On a big endian kernel, a BE userspace only needs a memcpy */
1039 if (!swap)
1040 sel = 3;
1041
1042 /* Otherwise, we need to swap the content of the components */
1043 switch (sel) {
1044 case 0: /* lxvw4x */
1045 for (i = 0; i < 4; i++)
1046 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1047 break;
1048 case 1: /* lxvh8x */
1049 for (i = 0; i < 8; i++)
1050 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1051 break;
1052 case 2: /* lxvd2x */
1053 for (i = 0; i < 2; i++)
1054 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1055 break;
1056 case 3: /* lxvb16x */
1057 memcpy(vdst, vbuf, 16);
1058 break;
1059 }
1060 #endif /* !__LITTLE_ENDIAN__ */
1061
1062 /* Go to next instruction */
1063 regs->nip += 4;
1064 }
1065 #endif /* CONFIG_VSX */
1066
handle_hmi_exception(struct pt_regs * regs)1067 void handle_hmi_exception(struct pt_regs *regs)
1068 {
1069 struct pt_regs *old_regs;
1070
1071 old_regs = set_irq_regs(regs);
1072 irq_enter();
1073
1074 #ifdef CONFIG_VSX
1075 /* Real mode flagged P9 special emu is needed */
1076 if (local_paca->hmi_p9_special_emu) {
1077 local_paca->hmi_p9_special_emu = 0;
1078
1079 /*
1080 * We don't want to take page faults while doing the
1081 * emulation, we just replay the instruction if necessary.
1082 */
1083 pagefault_disable();
1084 p9_hmi_special_emu(regs);
1085 pagefault_enable();
1086 }
1087 #endif /* CONFIG_VSX */
1088
1089 if (ppc_md.handle_hmi_exception)
1090 ppc_md.handle_hmi_exception(regs);
1091
1092 irq_exit();
1093 set_irq_regs(old_regs);
1094 }
1095
unknown_exception(struct pt_regs * regs)1096 void unknown_exception(struct pt_regs *regs)
1097 {
1098 enum ctx_state prev_state = exception_enter();
1099
1100 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1101 regs->nip, regs->msr, regs->trap);
1102
1103 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1104
1105 exception_exit(prev_state);
1106 }
1107
instruction_breakpoint_exception(struct pt_regs * regs)1108 void instruction_breakpoint_exception(struct pt_regs *regs)
1109 {
1110 enum ctx_state prev_state = exception_enter();
1111
1112 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1113 5, SIGTRAP) == NOTIFY_STOP)
1114 goto bail;
1115 if (debugger_iabr_match(regs))
1116 goto bail;
1117 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1118
1119 bail:
1120 exception_exit(prev_state);
1121 }
1122
RunModeException(struct pt_regs * regs)1123 void RunModeException(struct pt_regs *regs)
1124 {
1125 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1126 }
1127
single_step_exception(struct pt_regs * regs)1128 void single_step_exception(struct pt_regs *regs)
1129 {
1130 enum ctx_state prev_state = exception_enter();
1131
1132 clear_single_step(regs);
1133 clear_br_trace(regs);
1134
1135 if (kprobe_post_handler(regs))
1136 return;
1137
1138 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1139 5, SIGTRAP) == NOTIFY_STOP)
1140 goto bail;
1141 if (debugger_sstep(regs))
1142 goto bail;
1143
1144 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1145
1146 bail:
1147 exception_exit(prev_state);
1148 }
1149 NOKPROBE_SYMBOL(single_step_exception);
1150
1151 /*
1152 * After we have successfully emulated an instruction, we have to
1153 * check if the instruction was being single-stepped, and if so,
1154 * pretend we got a single-step exception. This was pointed out
1155 * by Kumar Gala. -- paulus
1156 */
emulate_single_step(struct pt_regs * regs)1157 static void emulate_single_step(struct pt_regs *regs)
1158 {
1159 if (single_stepping(regs))
1160 single_step_exception(regs);
1161 }
1162
__parse_fpscr(unsigned long fpscr)1163 static inline int __parse_fpscr(unsigned long fpscr)
1164 {
1165 int ret = FPE_FLTUNK;
1166
1167 /* Invalid operation */
1168 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1169 ret = FPE_FLTINV;
1170
1171 /* Overflow */
1172 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1173 ret = FPE_FLTOVF;
1174
1175 /* Underflow */
1176 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1177 ret = FPE_FLTUND;
1178
1179 /* Divide by zero */
1180 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1181 ret = FPE_FLTDIV;
1182
1183 /* Inexact result */
1184 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1185 ret = FPE_FLTRES;
1186
1187 return ret;
1188 }
1189
parse_fpe(struct pt_regs * regs)1190 static void parse_fpe(struct pt_regs *regs)
1191 {
1192 int code = 0;
1193
1194 flush_fp_to_thread(current);
1195
1196 code = __parse_fpscr(current->thread.fp_state.fpscr);
1197
1198 _exception(SIGFPE, regs, code, regs->nip);
1199 }
1200
1201 /*
1202 * Illegal instruction emulation support. Originally written to
1203 * provide the PVR to user applications using the mfspr rd, PVR.
1204 * Return non-zero if we can't emulate, or -EFAULT if the associated
1205 * memory access caused an access fault. Return zero on success.
1206 *
1207 * There are a couple of ways to do this, either "decode" the instruction
1208 * or directly match lots of bits. In this case, matching lots of
1209 * bits is faster and easier.
1210 *
1211 */
emulate_string_inst(struct pt_regs * regs,u32 instword)1212 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1213 {
1214 u8 rT = (instword >> 21) & 0x1f;
1215 u8 rA = (instword >> 16) & 0x1f;
1216 u8 NB_RB = (instword >> 11) & 0x1f;
1217 u32 num_bytes;
1218 unsigned long EA;
1219 int pos = 0;
1220
1221 /* Early out if we are an invalid form of lswx */
1222 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1223 if ((rT == rA) || (rT == NB_RB))
1224 return -EINVAL;
1225
1226 EA = (rA == 0) ? 0 : regs->gpr[rA];
1227
1228 switch (instword & PPC_INST_STRING_MASK) {
1229 case PPC_INST_LSWX:
1230 case PPC_INST_STSWX:
1231 EA += NB_RB;
1232 num_bytes = regs->xer & 0x7f;
1233 break;
1234 case PPC_INST_LSWI:
1235 case PPC_INST_STSWI:
1236 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1237 break;
1238 default:
1239 return -EINVAL;
1240 }
1241
1242 while (num_bytes != 0)
1243 {
1244 u8 val;
1245 u32 shift = 8 * (3 - (pos & 0x3));
1246
1247 /* if process is 32-bit, clear upper 32 bits of EA */
1248 if ((regs->msr & MSR_64BIT) == 0)
1249 EA &= 0xFFFFFFFF;
1250
1251 switch ((instword & PPC_INST_STRING_MASK)) {
1252 case PPC_INST_LSWX:
1253 case PPC_INST_LSWI:
1254 if (get_user(val, (u8 __user *)EA))
1255 return -EFAULT;
1256 /* first time updating this reg,
1257 * zero it out */
1258 if (pos == 0)
1259 regs->gpr[rT] = 0;
1260 regs->gpr[rT] |= val << shift;
1261 break;
1262 case PPC_INST_STSWI:
1263 case PPC_INST_STSWX:
1264 val = regs->gpr[rT] >> shift;
1265 if (put_user(val, (u8 __user *)EA))
1266 return -EFAULT;
1267 break;
1268 }
1269 /* move EA to next address */
1270 EA += 1;
1271 num_bytes--;
1272
1273 /* manage our position within the register */
1274 if (++pos == 4) {
1275 pos = 0;
1276 if (++rT == 32)
1277 rT = 0;
1278 }
1279 }
1280
1281 return 0;
1282 }
1283
emulate_popcntb_inst(struct pt_regs * regs,u32 instword)1284 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1285 {
1286 u32 ra,rs;
1287 unsigned long tmp;
1288
1289 ra = (instword >> 16) & 0x1f;
1290 rs = (instword >> 21) & 0x1f;
1291
1292 tmp = regs->gpr[rs];
1293 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1294 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1295 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1296 regs->gpr[ra] = tmp;
1297
1298 return 0;
1299 }
1300
emulate_isel(struct pt_regs * regs,u32 instword)1301 static int emulate_isel(struct pt_regs *regs, u32 instword)
1302 {
1303 u8 rT = (instword >> 21) & 0x1f;
1304 u8 rA = (instword >> 16) & 0x1f;
1305 u8 rB = (instword >> 11) & 0x1f;
1306 u8 BC = (instword >> 6) & 0x1f;
1307 u8 bit;
1308 unsigned long tmp;
1309
1310 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1311 bit = (regs->ccr >> (31 - BC)) & 0x1;
1312
1313 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1314
1315 return 0;
1316 }
1317
1318 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_abort_check(struct pt_regs * regs,int cause)1319 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1320 {
1321 /* If we're emulating a load/store in an active transaction, we cannot
1322 * emulate it as the kernel operates in transaction suspended context.
1323 * We need to abort the transaction. This creates a persistent TM
1324 * abort so tell the user what caused it with a new code.
1325 */
1326 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1327 tm_enable();
1328 tm_abort(cause);
1329 return true;
1330 }
1331 return false;
1332 }
1333 #else
tm_abort_check(struct pt_regs * regs,int reason)1334 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1335 {
1336 return false;
1337 }
1338 #endif
1339
emulate_instruction(struct pt_regs * regs)1340 static int emulate_instruction(struct pt_regs *regs)
1341 {
1342 u32 instword;
1343 u32 rd;
1344
1345 if (!user_mode(regs))
1346 return -EINVAL;
1347 CHECK_FULL_REGS(regs);
1348
1349 if (get_user(instword, (u32 __user *)(regs->nip)))
1350 return -EFAULT;
1351
1352 /* Emulate the mfspr rD, PVR. */
1353 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1354 PPC_WARN_EMULATED(mfpvr, regs);
1355 rd = (instword >> 21) & 0x1f;
1356 regs->gpr[rd] = mfspr(SPRN_PVR);
1357 return 0;
1358 }
1359
1360 /* Emulating the dcba insn is just a no-op. */
1361 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1362 PPC_WARN_EMULATED(dcba, regs);
1363 return 0;
1364 }
1365
1366 /* Emulate the mcrxr insn. */
1367 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1368 int shift = (instword >> 21) & 0x1c;
1369 unsigned long msk = 0xf0000000UL >> shift;
1370
1371 PPC_WARN_EMULATED(mcrxr, regs);
1372 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1373 regs->xer &= ~0xf0000000UL;
1374 return 0;
1375 }
1376
1377 /* Emulate load/store string insn. */
1378 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1379 if (tm_abort_check(regs,
1380 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1381 return -EINVAL;
1382 PPC_WARN_EMULATED(string, regs);
1383 return emulate_string_inst(regs, instword);
1384 }
1385
1386 /* Emulate the popcntb (Population Count Bytes) instruction. */
1387 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1388 PPC_WARN_EMULATED(popcntb, regs);
1389 return emulate_popcntb_inst(regs, instword);
1390 }
1391
1392 /* Emulate isel (Integer Select) instruction */
1393 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1394 PPC_WARN_EMULATED(isel, regs);
1395 return emulate_isel(regs, instword);
1396 }
1397
1398 /* Emulate sync instruction variants */
1399 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1400 PPC_WARN_EMULATED(sync, regs);
1401 asm volatile("sync");
1402 return 0;
1403 }
1404
1405 #ifdef CONFIG_PPC64
1406 /* Emulate the mfspr rD, DSCR. */
1407 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1408 PPC_INST_MFSPR_DSCR_USER) ||
1409 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1410 PPC_INST_MFSPR_DSCR)) &&
1411 cpu_has_feature(CPU_FTR_DSCR)) {
1412 PPC_WARN_EMULATED(mfdscr, regs);
1413 rd = (instword >> 21) & 0x1f;
1414 regs->gpr[rd] = mfspr(SPRN_DSCR);
1415 return 0;
1416 }
1417 /* Emulate the mtspr DSCR, rD. */
1418 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1419 PPC_INST_MTSPR_DSCR_USER) ||
1420 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1421 PPC_INST_MTSPR_DSCR)) &&
1422 cpu_has_feature(CPU_FTR_DSCR)) {
1423 PPC_WARN_EMULATED(mtdscr, regs);
1424 rd = (instword >> 21) & 0x1f;
1425 current->thread.dscr = regs->gpr[rd];
1426 current->thread.dscr_inherit = 1;
1427 mtspr(SPRN_DSCR, current->thread.dscr);
1428 return 0;
1429 }
1430 #endif
1431
1432 return -EINVAL;
1433 }
1434
1435 #ifdef CONFIG_GENERIC_BUG
is_valid_bugaddr(unsigned long addr)1436 int is_valid_bugaddr(unsigned long addr)
1437 {
1438 return is_kernel_addr(addr);
1439 }
1440 #endif
1441
1442 #ifdef CONFIG_MATH_EMULATION
emulate_math(struct pt_regs * regs)1443 static int emulate_math(struct pt_regs *regs)
1444 {
1445 int ret;
1446 extern int do_mathemu(struct pt_regs *regs);
1447
1448 ret = do_mathemu(regs);
1449 if (ret >= 0)
1450 PPC_WARN_EMULATED(math, regs);
1451
1452 switch (ret) {
1453 case 0:
1454 emulate_single_step(regs);
1455 return 0;
1456 case 1: {
1457 int code = 0;
1458 code = __parse_fpscr(current->thread.fp_state.fpscr);
1459 _exception(SIGFPE, regs, code, regs->nip);
1460 return 0;
1461 }
1462 case -EFAULT:
1463 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1464 return 0;
1465 }
1466
1467 return -1;
1468 }
1469 #else
emulate_math(struct pt_regs * regs)1470 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1471 #endif
1472
program_check_exception(struct pt_regs * regs)1473 void program_check_exception(struct pt_regs *regs)
1474 {
1475 enum ctx_state prev_state = exception_enter();
1476 unsigned int reason = get_reason(regs);
1477
1478 /* We can now get here via a FP Unavailable exception if the core
1479 * has no FPU, in that case the reason flags will be 0 */
1480
1481 if (reason & REASON_FP) {
1482 /* IEEE FP exception */
1483 parse_fpe(regs);
1484 goto bail;
1485 }
1486 if (reason & REASON_TRAP) {
1487 unsigned long bugaddr;
1488 /* Debugger is first in line to stop recursive faults in
1489 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1490 if (debugger_bpt(regs))
1491 goto bail;
1492
1493 if (kprobe_handler(regs))
1494 goto bail;
1495
1496 /* trap exception */
1497 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1498 == NOTIFY_STOP)
1499 goto bail;
1500
1501 bugaddr = regs->nip;
1502 /*
1503 * Fixup bugaddr for BUG_ON() in real mode
1504 */
1505 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1506 bugaddr += PAGE_OFFSET;
1507
1508 if (!(regs->msr & MSR_PR) && /* not user-mode */
1509 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1510 regs->nip += 4;
1511 goto bail;
1512 }
1513 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1514 goto bail;
1515 }
1516 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1517 if (reason & REASON_TM) {
1518 /* This is a TM "Bad Thing Exception" program check.
1519 * This occurs when:
1520 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1521 * transition in TM states.
1522 * - A trechkpt is attempted when transactional.
1523 * - A treclaim is attempted when non transactional.
1524 * - A tend is illegally attempted.
1525 * - writing a TM SPR when transactional.
1526 *
1527 * If usermode caused this, it's done something illegal and
1528 * gets a SIGILL slap on the wrist. We call it an illegal
1529 * operand to distinguish from the instruction just being bad
1530 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1531 * illegal /placement/ of a valid instruction.
1532 */
1533 if (user_mode(regs)) {
1534 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1535 goto bail;
1536 } else {
1537 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1538 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1539 regs->nip, regs->msr, get_paca()->tm_scratch);
1540 die("Unrecoverable exception", regs, SIGABRT);
1541 }
1542 }
1543 #endif
1544
1545 /*
1546 * If we took the program check in the kernel skip down to sending a
1547 * SIGILL. The subsequent cases all relate to emulating instructions
1548 * which we should only do for userspace. We also do not want to enable
1549 * interrupts for kernel faults because that might lead to further
1550 * faults, and loose the context of the original exception.
1551 */
1552 if (!user_mode(regs))
1553 goto sigill;
1554
1555 /* We restore the interrupt state now */
1556 if (!arch_irq_disabled_regs(regs))
1557 local_irq_enable();
1558
1559 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1560 * but there seems to be a hardware bug on the 405GP (RevD)
1561 * that means ESR is sometimes set incorrectly - either to
1562 * ESR_DST (!?) or 0. In the process of chasing this with the
1563 * hardware people - not sure if it can happen on any illegal
1564 * instruction or only on FP instructions, whether there is a
1565 * pattern to occurrences etc. -dgibson 31/Mar/2003
1566 */
1567 if (!emulate_math(regs))
1568 goto bail;
1569
1570 /* Try to emulate it if we should. */
1571 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1572 switch (emulate_instruction(regs)) {
1573 case 0:
1574 regs->nip += 4;
1575 emulate_single_step(regs);
1576 goto bail;
1577 case -EFAULT:
1578 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1579 goto bail;
1580 }
1581 }
1582
1583 sigill:
1584 if (reason & REASON_PRIVILEGED)
1585 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1586 else
1587 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1588
1589 bail:
1590 exception_exit(prev_state);
1591 }
1592 NOKPROBE_SYMBOL(program_check_exception);
1593
1594 /*
1595 * This occurs when running in hypervisor mode on POWER6 or later
1596 * and an illegal instruction is encountered.
1597 */
emulation_assist_interrupt(struct pt_regs * regs)1598 void emulation_assist_interrupt(struct pt_regs *regs)
1599 {
1600 regs->msr |= REASON_ILLEGAL;
1601 program_check_exception(regs);
1602 }
1603 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1604
alignment_exception(struct pt_regs * regs)1605 void alignment_exception(struct pt_regs *regs)
1606 {
1607 enum ctx_state prev_state = exception_enter();
1608 int sig, code, fixed = 0;
1609 unsigned long reason;
1610
1611 /* We restore the interrupt state now */
1612 if (!arch_irq_disabled_regs(regs))
1613 local_irq_enable();
1614
1615 reason = get_reason(regs);
1616
1617 if (reason & REASON_BOUNDARY) {
1618 sig = SIGBUS;
1619 code = BUS_ADRALN;
1620 goto bad;
1621 }
1622
1623 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1624 goto bail;
1625
1626 /* we don't implement logging of alignment exceptions */
1627 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1628 fixed = fix_alignment(regs);
1629
1630 if (fixed == 1) {
1631 /* skip over emulated instruction */
1632 regs->nip += inst_length(reason);
1633 emulate_single_step(regs);
1634 goto bail;
1635 }
1636
1637 /* Operand address was bad */
1638 if (fixed == -EFAULT) {
1639 sig = SIGSEGV;
1640 code = SEGV_ACCERR;
1641 } else {
1642 sig = SIGBUS;
1643 code = BUS_ADRALN;
1644 }
1645 bad:
1646 if (user_mode(regs))
1647 _exception(sig, regs, code, regs->dar);
1648 else
1649 bad_page_fault(regs, regs->dar, sig);
1650
1651 bail:
1652 exception_exit(prev_state);
1653 }
1654
StackOverflow(struct pt_regs * regs)1655 void StackOverflow(struct pt_regs *regs)
1656 {
1657 pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
1658 current->comm, task_pid_nr(current), regs->gpr[1]);
1659 debugger(regs);
1660 show_regs(regs);
1661 panic("kernel stack overflow");
1662 }
1663
stack_overflow_exception(struct pt_regs * regs)1664 void stack_overflow_exception(struct pt_regs *regs)
1665 {
1666 enum ctx_state prev_state = exception_enter();
1667
1668 die("Kernel stack overflow", regs, SIGSEGV);
1669
1670 exception_exit(prev_state);
1671 }
1672
kernel_fp_unavailable_exception(struct pt_regs * regs)1673 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1674 {
1675 enum ctx_state prev_state = exception_enter();
1676
1677 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1678 "%lx at %lx\n", regs->trap, regs->nip);
1679 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1680
1681 exception_exit(prev_state);
1682 }
1683
altivec_unavailable_exception(struct pt_regs * regs)1684 void altivec_unavailable_exception(struct pt_regs *regs)
1685 {
1686 enum ctx_state prev_state = exception_enter();
1687
1688 if (user_mode(regs)) {
1689 /* A user program has executed an altivec instruction,
1690 but this kernel doesn't support altivec. */
1691 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1692 goto bail;
1693 }
1694
1695 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1696 "%lx at %lx\n", regs->trap, regs->nip);
1697 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1698
1699 bail:
1700 exception_exit(prev_state);
1701 }
1702
vsx_unavailable_exception(struct pt_regs * regs)1703 void vsx_unavailable_exception(struct pt_regs *regs)
1704 {
1705 if (user_mode(regs)) {
1706 /* A user program has executed an vsx instruction,
1707 but this kernel doesn't support vsx. */
1708 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1709 return;
1710 }
1711
1712 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1713 "%lx at %lx\n", regs->trap, regs->nip);
1714 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1715 }
1716
1717 #ifdef CONFIG_PPC64
tm_unavailable(struct pt_regs * regs)1718 static void tm_unavailable(struct pt_regs *regs)
1719 {
1720 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1721 if (user_mode(regs)) {
1722 current->thread.load_tm++;
1723 regs->msr |= MSR_TM;
1724 tm_enable();
1725 tm_restore_sprs(¤t->thread);
1726 return;
1727 }
1728 #endif
1729 pr_emerg("Unrecoverable TM Unavailable Exception "
1730 "%lx at %lx\n", regs->trap, regs->nip);
1731 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1732 }
1733
facility_unavailable_exception(struct pt_regs * regs)1734 void facility_unavailable_exception(struct pt_regs *regs)
1735 {
1736 static char *facility_strings[] = {
1737 [FSCR_FP_LG] = "FPU",
1738 [FSCR_VECVSX_LG] = "VMX/VSX",
1739 [FSCR_DSCR_LG] = "DSCR",
1740 [FSCR_PM_LG] = "PMU SPRs",
1741 [FSCR_BHRB_LG] = "BHRB",
1742 [FSCR_TM_LG] = "TM",
1743 [FSCR_EBB_LG] = "EBB",
1744 [FSCR_TAR_LG] = "TAR",
1745 [FSCR_MSGP_LG] = "MSGP",
1746 [FSCR_SCV_LG] = "SCV",
1747 [FSCR_PREFIX_LG] = "PREFIX",
1748 };
1749 char *facility = "unknown";
1750 u64 value;
1751 u32 instword, rd;
1752 u8 status;
1753 bool hv;
1754
1755 hv = (TRAP(regs) == 0xf80);
1756 if (hv)
1757 value = mfspr(SPRN_HFSCR);
1758 else
1759 value = mfspr(SPRN_FSCR);
1760
1761 status = value >> 56;
1762 if ((hv || status >= 2) &&
1763 (status < ARRAY_SIZE(facility_strings)) &&
1764 facility_strings[status])
1765 facility = facility_strings[status];
1766
1767 /* We should not have taken this interrupt in kernel */
1768 if (!user_mode(regs)) {
1769 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1770 facility, status, regs->nip);
1771 die("Unexpected facility unavailable exception", regs, SIGABRT);
1772 }
1773
1774 /* We restore the interrupt state now */
1775 if (!arch_irq_disabled_regs(regs))
1776 local_irq_enable();
1777
1778 if (status == FSCR_DSCR_LG) {
1779 /*
1780 * User is accessing the DSCR register using the problem
1781 * state only SPR number (0x03) either through a mfspr or
1782 * a mtspr instruction. If it is a write attempt through
1783 * a mtspr, then we set the inherit bit. This also allows
1784 * the user to write or read the register directly in the
1785 * future by setting via the FSCR DSCR bit. But in case it
1786 * is a read DSCR attempt through a mfspr instruction, we
1787 * just emulate the instruction instead. This code path will
1788 * always emulate all the mfspr instructions till the user
1789 * has attempted at least one mtspr instruction. This way it
1790 * preserves the same behaviour when the user is accessing
1791 * the DSCR through privilege level only SPR number (0x11)
1792 * which is emulated through illegal instruction exception.
1793 * We always leave HFSCR DSCR set.
1794 */
1795 if (get_user(instword, (u32 __user *)(regs->nip))) {
1796 pr_err("Failed to fetch the user instruction\n");
1797 return;
1798 }
1799
1800 /* Write into DSCR (mtspr 0x03, RS) */
1801 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1802 == PPC_INST_MTSPR_DSCR_USER) {
1803 rd = (instword >> 21) & 0x1f;
1804 current->thread.dscr = regs->gpr[rd];
1805 current->thread.dscr_inherit = 1;
1806 current->thread.fscr |= FSCR_DSCR;
1807 mtspr(SPRN_FSCR, current->thread.fscr);
1808 }
1809
1810 /* Read from DSCR (mfspr RT, 0x03) */
1811 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1812 == PPC_INST_MFSPR_DSCR_USER) {
1813 if (emulate_instruction(regs)) {
1814 pr_err("DSCR based mfspr emulation failed\n");
1815 return;
1816 }
1817 regs->nip += 4;
1818 emulate_single_step(regs);
1819 }
1820 return;
1821 }
1822
1823 if (status == FSCR_TM_LG) {
1824 /*
1825 * If we're here then the hardware is TM aware because it
1826 * generated an exception with FSRM_TM set.
1827 *
1828 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1829 * told us not to do TM, or the kernel is not built with TM
1830 * support.
1831 *
1832 * If both of those things are true, then userspace can spam the
1833 * console by triggering the printk() below just by continually
1834 * doing tbegin (or any TM instruction). So in that case just
1835 * send the process a SIGILL immediately.
1836 */
1837 if (!cpu_has_feature(CPU_FTR_TM))
1838 goto out;
1839
1840 tm_unavailable(regs);
1841 return;
1842 }
1843
1844 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1845 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1846
1847 out:
1848 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1849 }
1850 #endif
1851
1852 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1853
fp_unavailable_tm(struct pt_regs * regs)1854 void fp_unavailable_tm(struct pt_regs *regs)
1855 {
1856 /* Note: This does not handle any kind of FP laziness. */
1857
1858 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1859 regs->nip, regs->msr);
1860
1861 /* We can only have got here if the task started using FP after
1862 * beginning the transaction. So, the transactional regs are just a
1863 * copy of the checkpointed ones. But, we still need to recheckpoint
1864 * as we're enabling FP for the process; it will return, abort the
1865 * transaction, and probably retry but now with FP enabled. So the
1866 * checkpointed FP registers need to be loaded.
1867 */
1868 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1869
1870 /*
1871 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1872 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1873 *
1874 * At this point, ck{fp,vr}_state contains the exact values we want to
1875 * recheckpoint.
1876 */
1877
1878 /* Enable FP for the task: */
1879 current->thread.load_fp = 1;
1880
1881 /*
1882 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
1883 */
1884 tm_recheckpoint(¤t->thread);
1885 }
1886
altivec_unavailable_tm(struct pt_regs * regs)1887 void altivec_unavailable_tm(struct pt_regs *regs)
1888 {
1889 /* See the comments in fp_unavailable_tm(). This function operates
1890 * the same way.
1891 */
1892
1893 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1894 "MSR=%lx\n",
1895 regs->nip, regs->msr);
1896 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1897 current->thread.load_vec = 1;
1898 tm_recheckpoint(¤t->thread);
1899 current->thread.used_vr = 1;
1900 }
1901
vsx_unavailable_tm(struct pt_regs * regs)1902 void vsx_unavailable_tm(struct pt_regs *regs)
1903 {
1904 /* See the comments in fp_unavailable_tm(). This works similarly,
1905 * though we're loading both FP and VEC registers in here.
1906 *
1907 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1908 * regs. Either way, set MSR_VSX.
1909 */
1910
1911 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1912 "MSR=%lx\n",
1913 regs->nip, regs->msr);
1914
1915 current->thread.used_vsr = 1;
1916
1917 /* This reclaims FP and/or VR regs if they're already enabled */
1918 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1919
1920 current->thread.load_vec = 1;
1921 current->thread.load_fp = 1;
1922
1923 tm_recheckpoint(¤t->thread);
1924 }
1925 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1926
performance_monitor_exception_nmi(struct pt_regs * regs)1927 static void performance_monitor_exception_nmi(struct pt_regs *regs)
1928 {
1929 nmi_enter();
1930
1931 __this_cpu_inc(irq_stat.pmu_irqs);
1932
1933 perf_irq(regs);
1934
1935 nmi_exit();
1936 }
1937
performance_monitor_exception_async(struct pt_regs * regs)1938 static void performance_monitor_exception_async(struct pt_regs *regs)
1939 {
1940 irq_enter();
1941
1942 __this_cpu_inc(irq_stat.pmu_irqs);
1943
1944 perf_irq(regs);
1945
1946 irq_exit();
1947 }
1948
performance_monitor_exception(struct pt_regs * regs)1949 void performance_monitor_exception(struct pt_regs *regs)
1950 {
1951 /*
1952 * On 64-bit, if perf interrupts hit in a local_irq_disable
1953 * (soft-masked) region, we consider them as NMIs. This is required to
1954 * prevent hash faults on user addresses when reading callchains (and
1955 * looks better from an irq tracing perspective).
1956 */
1957 if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
1958 performance_monitor_exception_nmi(regs);
1959 else
1960 performance_monitor_exception_async(regs);
1961 }
1962
1963 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
handle_debug(struct pt_regs * regs,unsigned long debug_status)1964 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1965 {
1966 int changed = 0;
1967 /*
1968 * Determine the cause of the debug event, clear the
1969 * event flags and send a trap to the handler. Torez
1970 */
1971 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1972 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1973 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1974 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1975 #endif
1976 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1977 5);
1978 changed |= 0x01;
1979 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1980 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1981 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1982 6);
1983 changed |= 0x01;
1984 } else if (debug_status & DBSR_IAC1) {
1985 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1986 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1987 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1988 1);
1989 changed |= 0x01;
1990 } else if (debug_status & DBSR_IAC2) {
1991 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1992 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1993 2);
1994 changed |= 0x01;
1995 } else if (debug_status & DBSR_IAC3) {
1996 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1997 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1998 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1999 3);
2000 changed |= 0x01;
2001 } else if (debug_status & DBSR_IAC4) {
2002 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
2003 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
2004 4);
2005 changed |= 0x01;
2006 }
2007 /*
2008 * At the point this routine was called, the MSR(DE) was turned off.
2009 * Check all other debug flags and see if that bit needs to be turned
2010 * back on or not.
2011 */
2012 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2013 current->thread.debug.dbcr1))
2014 regs->msr |= MSR_DE;
2015 else
2016 /* Make sure the IDM flag is off */
2017 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2018
2019 if (changed & 0x01)
2020 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
2021 }
2022
DebugException(struct pt_regs * regs,unsigned long debug_status)2023 void DebugException(struct pt_regs *regs, unsigned long debug_status)
2024 {
2025 current->thread.debug.dbsr = debug_status;
2026
2027 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
2028 * on server, it stops on the target of the branch. In order to simulate
2029 * the server behaviour, we thus restart right away with a single step
2030 * instead of stopping here when hitting a BT
2031 */
2032 if (debug_status & DBSR_BT) {
2033 regs->msr &= ~MSR_DE;
2034
2035 /* Disable BT */
2036 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
2037 /* Clear the BT event */
2038 mtspr(SPRN_DBSR, DBSR_BT);
2039
2040 /* Do the single step trick only when coming from userspace */
2041 if (user_mode(regs)) {
2042 current->thread.debug.dbcr0 &= ~DBCR0_BT;
2043 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2044 regs->msr |= MSR_DE;
2045 return;
2046 }
2047
2048 if (kprobe_post_handler(regs))
2049 return;
2050
2051 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
2052 5, SIGTRAP) == NOTIFY_STOP) {
2053 return;
2054 }
2055 if (debugger_sstep(regs))
2056 return;
2057 } else if (debug_status & DBSR_IC) { /* Instruction complete */
2058 regs->msr &= ~MSR_DE;
2059
2060 /* Disable instruction completion */
2061 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
2062 /* Clear the instruction completion event */
2063 mtspr(SPRN_DBSR, DBSR_IC);
2064
2065 if (kprobe_post_handler(regs))
2066 return;
2067
2068 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2069 5, SIGTRAP) == NOTIFY_STOP) {
2070 return;
2071 }
2072
2073 if (debugger_sstep(regs))
2074 return;
2075
2076 if (user_mode(regs)) {
2077 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2078 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2079 current->thread.debug.dbcr1))
2080 regs->msr |= MSR_DE;
2081 else
2082 /* Make sure the IDM bit is off */
2083 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
2084 }
2085
2086 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2087 } else
2088 handle_debug(regs, debug_status);
2089 }
2090 NOKPROBE_SYMBOL(DebugException);
2091 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
2092
2093 #ifdef CONFIG_ALTIVEC
altivec_assist_exception(struct pt_regs * regs)2094 void altivec_assist_exception(struct pt_regs *regs)
2095 {
2096 int err;
2097
2098 if (!user_mode(regs)) {
2099 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2100 " at %lx\n", regs->nip);
2101 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
2102 }
2103
2104 flush_altivec_to_thread(current);
2105
2106 PPC_WARN_EMULATED(altivec, regs);
2107 err = emulate_altivec(regs);
2108 if (err == 0) {
2109 regs->nip += 4; /* skip emulated instruction */
2110 emulate_single_step(regs);
2111 return;
2112 }
2113
2114 if (err == -EFAULT) {
2115 /* got an error reading the instruction */
2116 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2117 } else {
2118 /* didn't recognize the instruction */
2119 /* XXX quick hack for now: set the non-Java bit in the VSCR */
2120 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2121 "in %s at %lx\n", current->comm, regs->nip);
2122 current->thread.vr_state.vscr.u[3] |= 0x10000;
2123 }
2124 }
2125 #endif /* CONFIG_ALTIVEC */
2126
2127 #ifdef CONFIG_FSL_BOOKE
CacheLockingException(struct pt_regs * regs,unsigned long address,unsigned long error_code)2128 void CacheLockingException(struct pt_regs *regs, unsigned long address,
2129 unsigned long error_code)
2130 {
2131 /* We treat cache locking instructions from the user
2132 * as priv ops, in the future we could try to do
2133 * something smarter
2134 */
2135 if (error_code & (ESR_DLK|ESR_ILK))
2136 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2137 return;
2138 }
2139 #endif /* CONFIG_FSL_BOOKE */
2140
2141 #ifdef CONFIG_SPE
SPEFloatingPointException(struct pt_regs * regs)2142 void SPEFloatingPointException(struct pt_regs *regs)
2143 {
2144 extern int do_spe_mathemu(struct pt_regs *regs);
2145 unsigned long spefscr;
2146 int fpexc_mode;
2147 int code = FPE_FLTUNK;
2148 int err;
2149
2150 /* We restore the interrupt state now */
2151 if (!arch_irq_disabled_regs(regs))
2152 local_irq_enable();
2153
2154 flush_spe_to_thread(current);
2155
2156 spefscr = current->thread.spefscr;
2157 fpexc_mode = current->thread.fpexc_mode;
2158
2159 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2160 code = FPE_FLTOVF;
2161 }
2162 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2163 code = FPE_FLTUND;
2164 }
2165 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2166 code = FPE_FLTDIV;
2167 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2168 code = FPE_FLTINV;
2169 }
2170 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2171 code = FPE_FLTRES;
2172
2173 err = do_spe_mathemu(regs);
2174 if (err == 0) {
2175 regs->nip += 4; /* skip emulated instruction */
2176 emulate_single_step(regs);
2177 return;
2178 }
2179
2180 if (err == -EFAULT) {
2181 /* got an error reading the instruction */
2182 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2183 } else if (err == -EINVAL) {
2184 /* didn't recognize the instruction */
2185 printk(KERN_ERR "unrecognized spe instruction "
2186 "in %s at %lx\n", current->comm, regs->nip);
2187 } else {
2188 _exception(SIGFPE, regs, code, regs->nip);
2189 }
2190
2191 return;
2192 }
2193
SPEFloatingPointRoundException(struct pt_regs * regs)2194 void SPEFloatingPointRoundException(struct pt_regs *regs)
2195 {
2196 extern int speround_handler(struct pt_regs *regs);
2197 int err;
2198
2199 /* We restore the interrupt state now */
2200 if (!arch_irq_disabled_regs(regs))
2201 local_irq_enable();
2202
2203 preempt_disable();
2204 if (regs->msr & MSR_SPE)
2205 giveup_spe(current);
2206 preempt_enable();
2207
2208 regs->nip -= 4;
2209 err = speround_handler(regs);
2210 if (err == 0) {
2211 regs->nip += 4; /* skip emulated instruction */
2212 emulate_single_step(regs);
2213 return;
2214 }
2215
2216 if (err == -EFAULT) {
2217 /* got an error reading the instruction */
2218 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2219 } else if (err == -EINVAL) {
2220 /* didn't recognize the instruction */
2221 printk(KERN_ERR "unrecognized spe instruction "
2222 "in %s at %lx\n", current->comm, regs->nip);
2223 } else {
2224 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2225 return;
2226 }
2227 }
2228 #endif
2229
2230 /*
2231 * We enter here if we get an unrecoverable exception, that is, one
2232 * that happened at a point where the RI (recoverable interrupt) bit
2233 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2234 * we therefore lost state by taking this exception.
2235 */
unrecoverable_exception(struct pt_regs * regs)2236 void unrecoverable_exception(struct pt_regs *regs)
2237 {
2238 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2239 regs->trap, regs->nip, regs->msr);
2240 die("Unrecoverable exception", regs, SIGABRT);
2241 }
2242 NOKPROBE_SYMBOL(unrecoverable_exception);
2243
2244 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2245 /*
2246 * Default handler for a Watchdog exception,
2247 * spins until a reboot occurs
2248 */
WatchdogHandler(struct pt_regs * regs)2249 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2250 {
2251 /* Generic WatchdogHandler, implement your own */
2252 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2253 return;
2254 }
2255
WatchdogException(struct pt_regs * regs)2256 void WatchdogException(struct pt_regs *regs)
2257 {
2258 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2259 WatchdogHandler(regs);
2260 }
2261 #endif
2262
2263 /*
2264 * We enter here if we discover during exception entry that we are
2265 * running in supervisor mode with a userspace value in the stack pointer.
2266 */
kernel_bad_stack(struct pt_regs * regs)2267 void kernel_bad_stack(struct pt_regs *regs)
2268 {
2269 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2270 regs->gpr[1], regs->nip);
2271 die("Bad kernel stack pointer", regs, SIGABRT);
2272 }
2273 NOKPROBE_SYMBOL(kernel_bad_stack);
2274
trap_init(void)2275 void __init trap_init(void)
2276 {
2277 }
2278
2279
2280 #ifdef CONFIG_PPC_EMULATED_STATS
2281
2282 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2283
2284 struct ppc_emulated ppc_emulated = {
2285 #ifdef CONFIG_ALTIVEC
2286 WARN_EMULATED_SETUP(altivec),
2287 #endif
2288 WARN_EMULATED_SETUP(dcba),
2289 WARN_EMULATED_SETUP(dcbz),
2290 WARN_EMULATED_SETUP(fp_pair),
2291 WARN_EMULATED_SETUP(isel),
2292 WARN_EMULATED_SETUP(mcrxr),
2293 WARN_EMULATED_SETUP(mfpvr),
2294 WARN_EMULATED_SETUP(multiple),
2295 WARN_EMULATED_SETUP(popcntb),
2296 WARN_EMULATED_SETUP(spe),
2297 WARN_EMULATED_SETUP(string),
2298 WARN_EMULATED_SETUP(sync),
2299 WARN_EMULATED_SETUP(unaligned),
2300 #ifdef CONFIG_MATH_EMULATION
2301 WARN_EMULATED_SETUP(math),
2302 #endif
2303 #ifdef CONFIG_VSX
2304 WARN_EMULATED_SETUP(vsx),
2305 #endif
2306 #ifdef CONFIG_PPC64
2307 WARN_EMULATED_SETUP(mfdscr),
2308 WARN_EMULATED_SETUP(mtdscr),
2309 WARN_EMULATED_SETUP(lq_stq),
2310 WARN_EMULATED_SETUP(lxvw4x),
2311 WARN_EMULATED_SETUP(lxvh8x),
2312 WARN_EMULATED_SETUP(lxvd2x),
2313 WARN_EMULATED_SETUP(lxvb16x),
2314 #endif
2315 };
2316
2317 u32 ppc_warn_emulated;
2318
ppc_warn_emulated_print(const char * type)2319 void ppc_warn_emulated_print(const char *type)
2320 {
2321 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2322 type);
2323 }
2324
ppc_warn_emulated_init(void)2325 static int __init ppc_warn_emulated_init(void)
2326 {
2327 struct dentry *dir;
2328 unsigned int i;
2329 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2330
2331 dir = debugfs_create_dir("emulated_instructions",
2332 powerpc_debugfs_root);
2333
2334 debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
2335
2336 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
2337 debugfs_create_u32(entries[i].name, 0644, dir,
2338 (u32 *)&entries[i].val.counter);
2339
2340 return 0;
2341 }
2342
2343 device_initcall(ppc_warn_emulated_init);
2344
2345 #endif /* CONFIG_PPC_EMULATED_STATS */
2346