1 /*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 *
5 * Pentium III FXSR, SSE support
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 */
8
9 /*
10 * Handle hardware traps and faults.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/timer.h>
34 #include <linux/init.h>
35 #include <linux/bug.h>
36 #include <linux/nmi.h>
37 #include <linux/mm.h>
38 #include <linux/smp.h>
39 #include <linux/io.h>
40 #include <linux/hardirq.h>
41 #include <linux/atomic.h>
42
43 #include <asm/stacktrace.h>
44 #include <asm/processor.h>
45 #include <asm/debugreg.h>
46 #include <asm/realmode.h>
47 #include <asm/text-patching.h>
48 #include <asm/ftrace.h>
49 #include <asm/traps.h>
50 #include <asm/desc.h>
51 #include <asm/fpu/internal.h>
52 #include <asm/cpu.h>
53 #include <asm/cpu_entry_area.h>
54 #include <asm/mce.h>
55 #include <asm/fixmap.h>
56 #include <asm/mach_traps.h>
57 #include <asm/alternative.h>
58 #include <asm/fpu/xstate.h>
59 #include <asm/vm86.h>
60 #include <asm/umip.h>
61 #include <asm/insn.h>
62 #include <asm/insn-eval.h>
63 #include <asm/vdso.h>
64
65 #ifdef CONFIG_X86_64
66 #include <asm/x86_init.h>
67 #include <asm/proto.h>
68 #else
69 #include <asm/processor-flags.h>
70 #include <asm/setup.h>
71 #include <asm/proto.h>
72 #endif
73
74 DECLARE_BITMAP(system_vectors, NR_VECTORS);
75
cond_local_irq_enable(struct pt_regs * regs)76 static inline void cond_local_irq_enable(struct pt_regs *regs)
77 {
78 if (regs->flags & X86_EFLAGS_IF)
79 local_irq_enable();
80 }
81
cond_local_irq_disable(struct pt_regs * regs)82 static inline void cond_local_irq_disable(struct pt_regs *regs)
83 {
84 if (regs->flags & X86_EFLAGS_IF)
85 local_irq_disable();
86 }
87
is_valid_bugaddr(unsigned long addr)88 __always_inline int is_valid_bugaddr(unsigned long addr)
89 {
90 if (addr < TASK_SIZE_MAX)
91 return 0;
92
93 /*
94 * We got #UD, if the text isn't readable we'd have gotten
95 * a different exception.
96 */
97 return *(unsigned short *)addr == INSN_UD2;
98 }
99
100 static nokprobe_inline int
do_trap_no_signal(struct task_struct * tsk,int trapnr,const char * str,struct pt_regs * regs,long error_code)101 do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
102 struct pt_regs *regs, long error_code)
103 {
104 if (v8086_mode(regs)) {
105 /*
106 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
107 * On nmi (interrupt 2), do_trap should not be called.
108 */
109 if (trapnr < X86_TRAP_UD) {
110 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
111 error_code, trapnr))
112 return 0;
113 }
114 } else if (!user_mode(regs)) {
115 if (fixup_exception(regs, trapnr, error_code, 0))
116 return 0;
117
118 tsk->thread.error_code = error_code;
119 tsk->thread.trap_nr = trapnr;
120 die(str, regs, error_code);
121 } else {
122 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
123 return 0;
124 }
125
126 /*
127 * We want error_code and trap_nr set for userspace faults and
128 * kernelspace faults which result in die(), but not
129 * kernelspace faults which are fixed up. die() gives the
130 * process no chance to handle the signal and notice the
131 * kernel fault information, so that won't result in polluting
132 * the information about previously queued, but not yet
133 * delivered, faults. See also exc_general_protection below.
134 */
135 tsk->thread.error_code = error_code;
136 tsk->thread.trap_nr = trapnr;
137
138 return -1;
139 }
140
show_signal(struct task_struct * tsk,int signr,const char * type,const char * desc,struct pt_regs * regs,long error_code)141 static void show_signal(struct task_struct *tsk, int signr,
142 const char *type, const char *desc,
143 struct pt_regs *regs, long error_code)
144 {
145 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
146 printk_ratelimit()) {
147 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
148 tsk->comm, task_pid_nr(tsk), type, desc,
149 regs->ip, regs->sp, error_code);
150 print_vma_addr(KERN_CONT " in ", regs->ip);
151 pr_cont("\n");
152 }
153 }
154
155 static void
do_trap(int trapnr,int signr,char * str,struct pt_regs * regs,long error_code,int sicode,void __user * addr)156 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
157 long error_code, int sicode, void __user *addr)
158 {
159 struct task_struct *tsk = current;
160
161 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
162 return;
163
164 show_signal(tsk, signr, "trap ", str, regs, error_code);
165
166 if (!sicode)
167 force_sig(signr);
168 else
169 force_sig_fault(signr, sicode, addr);
170 }
171 NOKPROBE_SYMBOL(do_trap);
172
do_error_trap(struct pt_regs * regs,long error_code,char * str,unsigned long trapnr,int signr,int sicode,void __user * addr)173 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
174 unsigned long trapnr, int signr, int sicode, void __user *addr)
175 {
176 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
177
178 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
179 NOTIFY_STOP) {
180 cond_local_irq_enable(regs);
181 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
182 cond_local_irq_disable(regs);
183 }
184 }
185
186 /*
187 * Posix requires to provide the address of the faulting instruction for
188 * SIGILL (#UD) and SIGFPE (#DE) in the si_addr member of siginfo_t.
189 *
190 * This address is usually regs->ip, but when an uprobe moved the code out
191 * of line then regs->ip points to the XOL code which would confuse
192 * anything which analyzes the fault address vs. the unmodified binary. If
193 * a trap happened in XOL code then uprobe maps regs->ip back to the
194 * original instruction address.
195 */
error_get_trap_addr(struct pt_regs * regs)196 static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
197 {
198 return (void __user *)uprobe_get_trap_addr(regs);
199 }
200
DEFINE_IDTENTRY(exc_divide_error)201 DEFINE_IDTENTRY(exc_divide_error)
202 {
203 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
204 FPE_INTDIV, error_get_trap_addr(regs));
205 }
206
DEFINE_IDTENTRY(exc_overflow)207 DEFINE_IDTENTRY(exc_overflow)
208 {
209 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
210 }
211
212 #ifdef CONFIG_X86_F00F_BUG
handle_invalid_op(struct pt_regs * regs)213 void handle_invalid_op(struct pt_regs *regs)
214 #else
215 static inline void handle_invalid_op(struct pt_regs *regs)
216 #endif
217 {
218 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
219 ILL_ILLOPN, error_get_trap_addr(regs));
220 }
221
handle_bug(struct pt_regs * regs)222 static noinstr bool handle_bug(struct pt_regs *regs)
223 {
224 bool handled = false;
225
226 if (!is_valid_bugaddr(regs->ip))
227 return handled;
228
229 /*
230 * All lies, just get the WARN/BUG out.
231 */
232 instrumentation_begin();
233 /*
234 * Since we're emulating a CALL with exceptions, restore the interrupt
235 * state to what it was at the exception site.
236 */
237 if (regs->flags & X86_EFLAGS_IF)
238 raw_local_irq_enable();
239 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
240 regs->ip += LEN_UD2;
241 handled = true;
242 }
243 if (regs->flags & X86_EFLAGS_IF)
244 raw_local_irq_disable();
245 instrumentation_end();
246
247 return handled;
248 }
249
DEFINE_IDTENTRY_RAW(exc_invalid_op)250 DEFINE_IDTENTRY_RAW(exc_invalid_op)
251 {
252 irqentry_state_t state;
253
254 /*
255 * We use UD2 as a short encoding for 'CALL __WARN', as such
256 * handle it before exception entry to avoid recursive WARN
257 * in case exception entry is the one triggering WARNs.
258 */
259 if (!user_mode(regs) && handle_bug(regs))
260 return;
261
262 state = irqentry_enter(regs);
263 instrumentation_begin();
264 handle_invalid_op(regs);
265 instrumentation_end();
266 irqentry_exit(regs, state);
267 }
268
DEFINE_IDTENTRY(exc_coproc_segment_overrun)269 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
270 {
271 do_error_trap(regs, 0, "coprocessor segment overrun",
272 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
273 }
274
DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)275 DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
276 {
277 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
278 0, NULL);
279 }
280
DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)281 DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
282 {
283 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
284 SIGBUS, 0, NULL);
285 }
286
DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)287 DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
288 {
289 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
290 0, NULL);
291 }
292
DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)293 DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
294 {
295 char *str = "alignment check";
296
297 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
298 return;
299
300 if (!user_mode(regs))
301 die("Split lock detected\n", regs, error_code);
302
303 local_irq_enable();
304
305 if (handle_user_split_lock(regs, error_code))
306 goto out;
307
308 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
309 error_code, BUS_ADRALN, NULL);
310
311 out:
312 local_irq_disable();
313 }
314
315 #ifdef CONFIG_VMAP_STACK
handle_stack_overflow(struct pt_regs * regs,unsigned long fault_address,struct stack_info * info)316 __visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
317 unsigned long fault_address,
318 struct stack_info *info)
319 {
320 const char *name = stack_type_name(info->type);
321
322 printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
323 name, (void *)fault_address, info->begin, info->end);
324
325 die("stack guard page", regs, 0);
326
327 /* Be absolutely certain we don't return. */
328 panic("%s stack guard hit", name);
329 }
330 #endif
331
332 /*
333 * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
334 *
335 * On x86_64, this is more or less a normal kernel entry. Notwithstanding the
336 * SDM's warnings about double faults being unrecoverable, returning works as
337 * expected. Presumably what the SDM actually means is that the CPU may get
338 * the register state wrong on entry, so returning could be a bad idea.
339 *
340 * Various CPU engineers have promised that double faults due to an IRET fault
341 * while the stack is read-only are, in fact, recoverable.
342 *
343 * On x86_32, this is entered through a task gate, and regs are synthesized
344 * from the TSS. Returning is, in principle, okay, but changes to regs will
345 * be lost. If, for some reason, we need to return to a context with modified
346 * regs, the shim code could be adjusted to synchronize the registers.
347 *
348 * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
349 * to be read before doing anything else.
350 */
DEFINE_IDTENTRY_DF(exc_double_fault)351 DEFINE_IDTENTRY_DF(exc_double_fault)
352 {
353 static const char str[] = "double fault";
354 struct task_struct *tsk = current;
355
356 #ifdef CONFIG_VMAP_STACK
357 unsigned long address = read_cr2();
358 struct stack_info info;
359 #endif
360
361 #ifdef CONFIG_X86_ESPFIX64
362 extern unsigned char native_irq_return_iret[];
363
364 /*
365 * If IRET takes a non-IST fault on the espfix64 stack, then we
366 * end up promoting it to a doublefault. In that case, take
367 * advantage of the fact that we're not using the normal (TSS.sp0)
368 * stack right now. We can write a fake #GP(0) frame at TSS.sp0
369 * and then modify our own IRET frame so that, when we return,
370 * we land directly at the #GP(0) vector with the stack already
371 * set up according to its expectations.
372 *
373 * The net result is that our #GP handler will think that we
374 * entered from usermode with the bad user context.
375 *
376 * No need for nmi_enter() here because we don't use RCU.
377 */
378 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
379 regs->cs == __KERNEL_CS &&
380 regs->ip == (unsigned long)native_irq_return_iret)
381 {
382 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
383 unsigned long *p = (unsigned long *)regs->sp;
384
385 /*
386 * regs->sp points to the failing IRET frame on the
387 * ESPFIX64 stack. Copy it to the entry stack. This fills
388 * in gpregs->ss through gpregs->ip.
389 *
390 */
391 gpregs->ip = p[0];
392 gpregs->cs = p[1];
393 gpregs->flags = p[2];
394 gpregs->sp = p[3];
395 gpregs->ss = p[4];
396 gpregs->orig_ax = 0; /* Missing (lost) #GP error code */
397
398 /*
399 * Adjust our frame so that we return straight to the #GP
400 * vector with the expected RSP value. This is safe because
401 * we won't enable interrupts or schedule before we invoke
402 * general_protection, so nothing will clobber the stack
403 * frame we just set up.
404 *
405 * We will enter general_protection with kernel GSBASE,
406 * which is what the stub expects, given that the faulting
407 * RIP will be the IRET instruction.
408 */
409 regs->ip = (unsigned long)asm_exc_general_protection;
410 regs->sp = (unsigned long)&gpregs->orig_ax;
411
412 return;
413 }
414 #endif
415
416 irqentry_nmi_enter(regs);
417 instrumentation_begin();
418 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
419
420 tsk->thread.error_code = error_code;
421 tsk->thread.trap_nr = X86_TRAP_DF;
422
423 #ifdef CONFIG_VMAP_STACK
424 /*
425 * If we overflow the stack into a guard page, the CPU will fail
426 * to deliver #PF and will send #DF instead. Similarly, if we
427 * take any non-IST exception while too close to the bottom of
428 * the stack, the processor will get a page fault while
429 * delivering the exception and will generate a double fault.
430 *
431 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
432 * Page-Fault Exception (#PF):
433 *
434 * Processors update CR2 whenever a page fault is detected. If a
435 * second page fault occurs while an earlier page fault is being
436 * delivered, the faulting linear address of the second fault will
437 * overwrite the contents of CR2 (replacing the previous
438 * address). These updates to CR2 occur even if the page fault
439 * results in a double fault or occurs during the delivery of a
440 * double fault.
441 *
442 * The logic below has a small possibility of incorrectly diagnosing
443 * some errors as stack overflows. For example, if the IDT or GDT
444 * gets corrupted such that #GP delivery fails due to a bad descriptor
445 * causing #GP and we hit this condition while CR2 coincidentally
446 * points to the stack guard page, we'll think we overflowed the
447 * stack. Given that we're going to panic one way or another
448 * if this happens, this isn't necessarily worth fixing.
449 *
450 * If necessary, we could improve the test by only diagnosing
451 * a stack overflow if the saved RSP points within 47 bytes of
452 * the bottom of the stack: if RSP == tsk_stack + 48 and we
453 * take an exception, the stack is already aligned and there
454 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
455 * possible error code, so a stack overflow would *not* double
456 * fault. With any less space left, exception delivery could
457 * fail, and, as a practical matter, we've overflowed the
458 * stack even if the actual trigger for the double fault was
459 * something else.
460 */
461 if (get_stack_guard_info((void *)address, &info))
462 handle_stack_overflow(regs, address, &info);
463 #endif
464
465 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
466 die("double fault", regs, error_code);
467 panic("Machine halted.");
468 instrumentation_end();
469 }
470
DEFINE_IDTENTRY(exc_bounds)471 DEFINE_IDTENTRY(exc_bounds)
472 {
473 if (notify_die(DIE_TRAP, "bounds", regs, 0,
474 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
475 return;
476 cond_local_irq_enable(regs);
477
478 if (!user_mode(regs))
479 die("bounds", regs, 0);
480
481 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
482
483 cond_local_irq_disable(regs);
484 }
485
486 enum kernel_gp_hint {
487 GP_NO_HINT,
488 GP_NON_CANONICAL,
489 GP_CANONICAL
490 };
491
492 /*
493 * When an uncaught #GP occurs, try to determine the memory address accessed by
494 * the instruction and return that address to the caller. Also, try to figure
495 * out whether any part of the access to that address was non-canonical.
496 */
get_kernel_gp_address(struct pt_regs * regs,unsigned long * addr)497 static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
498 unsigned long *addr)
499 {
500 u8 insn_buf[MAX_INSN_SIZE];
501 struct insn insn;
502 int ret;
503
504 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
505 MAX_INSN_SIZE))
506 return GP_NO_HINT;
507
508 ret = insn_decode_kernel(&insn, insn_buf);
509 if (ret < 0)
510 return GP_NO_HINT;
511
512 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
513 if (*addr == -1UL)
514 return GP_NO_HINT;
515
516 #ifdef CONFIG_X86_64
517 /*
518 * Check that:
519 * - the operand is not in the kernel half
520 * - the last byte of the operand is not in the user canonical half
521 */
522 if (*addr < ~__VIRTUAL_MASK &&
523 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
524 return GP_NON_CANONICAL;
525 #endif
526
527 return GP_CANONICAL;
528 }
529
530 #define GPFSTR "general protection fault"
531
fixup_iopl_exception(struct pt_regs * regs)532 static bool fixup_iopl_exception(struct pt_regs *regs)
533 {
534 struct thread_struct *t = ¤t->thread;
535 unsigned char byte;
536 unsigned long ip;
537
538 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
539 return false;
540
541 if (insn_get_effective_ip(regs, &ip))
542 return false;
543
544 if (get_user(byte, (const char __user *)ip))
545 return false;
546
547 if (byte != 0xfa && byte != 0xfb)
548 return false;
549
550 if (!t->iopl_warn && printk_ratelimit()) {
551 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
552 current->comm, task_pid_nr(current), ip);
553 print_vma_addr(KERN_CONT " in ", ip);
554 pr_cont("\n");
555 t->iopl_warn = 1;
556 }
557
558 regs->ip += 1;
559 return true;
560 }
561
DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)562 DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
563 {
564 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
565 enum kernel_gp_hint hint = GP_NO_HINT;
566 struct task_struct *tsk;
567 unsigned long gp_addr;
568 int ret;
569
570 cond_local_irq_enable(regs);
571
572 if (static_cpu_has(X86_FEATURE_UMIP)) {
573 if (user_mode(regs) && fixup_umip_exception(regs))
574 goto exit;
575 }
576
577 if (v8086_mode(regs)) {
578 local_irq_enable();
579 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
580 local_irq_disable();
581 return;
582 }
583
584 tsk = current;
585
586 if (user_mode(regs)) {
587 if (fixup_iopl_exception(regs))
588 goto exit;
589
590 tsk->thread.error_code = error_code;
591 tsk->thread.trap_nr = X86_TRAP_GP;
592
593 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
594 goto exit;
595
596 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
597 force_sig(SIGSEGV);
598 goto exit;
599 }
600
601 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
602 goto exit;
603
604 tsk->thread.error_code = error_code;
605 tsk->thread.trap_nr = X86_TRAP_GP;
606
607 /*
608 * To be potentially processing a kprobe fault and to trust the result
609 * from kprobe_running(), we have to be non-preemptible.
610 */
611 if (!preemptible() &&
612 kprobe_running() &&
613 kprobe_fault_handler(regs, X86_TRAP_GP))
614 goto exit;
615
616 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
617 if (ret == NOTIFY_STOP)
618 goto exit;
619
620 if (error_code)
621 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
622 else
623 hint = get_kernel_gp_address(regs, &gp_addr);
624
625 if (hint != GP_NO_HINT)
626 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
627 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
628 : "maybe for address",
629 gp_addr);
630
631 /*
632 * KASAN is interested only in the non-canonical case, clear it
633 * otherwise.
634 */
635 if (hint != GP_NON_CANONICAL)
636 gp_addr = 0;
637
638 die_addr(desc, regs, error_code, gp_addr);
639
640 exit:
641 cond_local_irq_disable(regs);
642 }
643
do_int3(struct pt_regs * regs)644 static bool do_int3(struct pt_regs *regs)
645 {
646 int res;
647
648 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
649 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
650 SIGTRAP) == NOTIFY_STOP)
651 return true;
652 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
653
654 #ifdef CONFIG_KPROBES
655 if (kprobe_int3_handler(regs))
656 return true;
657 #endif
658 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
659
660 return res == NOTIFY_STOP;
661 }
662 NOKPROBE_SYMBOL(do_int3);
663
do_int3_user(struct pt_regs * regs)664 static void do_int3_user(struct pt_regs *regs)
665 {
666 if (do_int3(regs))
667 return;
668
669 cond_local_irq_enable(regs);
670 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
671 cond_local_irq_disable(regs);
672 }
673
DEFINE_IDTENTRY_RAW(exc_int3)674 DEFINE_IDTENTRY_RAW(exc_int3)
675 {
676 /*
677 * poke_int3_handler() is completely self contained code; it does (and
678 * must) *NOT* call out to anything, lest it hits upon yet another
679 * INT3.
680 */
681 if (poke_int3_handler(regs))
682 return;
683
684 /*
685 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
686 * and therefore can trigger INT3, hence poke_int3_handler() must
687 * be done before. If the entry came from kernel mode, then use
688 * nmi_enter() because the INT3 could have been hit in any context
689 * including NMI.
690 */
691 if (user_mode(regs)) {
692 irqentry_enter_from_user_mode(regs);
693 instrumentation_begin();
694 do_int3_user(regs);
695 instrumentation_end();
696 irqentry_exit_to_user_mode(regs);
697 } else {
698 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
699
700 instrumentation_begin();
701 if (!do_int3(regs))
702 die("int3", regs, 0);
703 instrumentation_end();
704 irqentry_nmi_exit(regs, irq_state);
705 }
706 }
707
708 #ifdef CONFIG_X86_64
709 /*
710 * Help handler running on a per-cpu (IST or entry trampoline) stack
711 * to switch to the normal thread stack if the interrupted code was in
712 * user mode. The actual stack switch is done in entry_64.S
713 */
sync_regs(struct pt_regs * eregs)714 asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
715 {
716 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
717 if (regs != eregs)
718 *regs = *eregs;
719 return regs;
720 }
721
722 #ifdef CONFIG_AMD_MEM_ENCRYPT
vc_switch_off_ist(struct pt_regs * regs)723 asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
724 {
725 unsigned long sp, *stack;
726 struct stack_info info;
727 struct pt_regs *regs_ret;
728
729 /*
730 * In the SYSCALL entry path the RSP value comes from user-space - don't
731 * trust it and switch to the current kernel stack
732 */
733 if (ip_within_syscall_gap(regs)) {
734 sp = this_cpu_read(cpu_current_top_of_stack);
735 goto sync;
736 }
737
738 /*
739 * From here on the RSP value is trusted. Now check whether entry
740 * happened from a safe stack. Not safe are the entry or unknown stacks,
741 * use the fall-back stack instead in this case.
742 */
743 sp = regs->sp;
744 stack = (unsigned long *)sp;
745
746 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
747 info.type > STACK_TYPE_EXCEPTION_LAST)
748 sp = __this_cpu_ist_top_va(VC2);
749
750 sync:
751 /*
752 * Found a safe stack - switch to it as if the entry didn't happen via
753 * IST stack. The code below only copies pt_regs, the real switch happens
754 * in assembly code.
755 */
756 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
757
758 regs_ret = (struct pt_regs *)sp;
759 *regs_ret = *regs;
760
761 return regs_ret;
762 }
763 #endif
764
fixup_bad_iret(struct pt_regs * bad_regs)765 asmlinkage __visible noinstr struct pt_regs *fixup_bad_iret(struct pt_regs *bad_regs)
766 {
767 struct pt_regs tmp, *new_stack;
768
769 /*
770 * This is called from entry_64.S early in handling a fault
771 * caused by a bad iret to user mode. To handle the fault
772 * correctly, we want to move our stack frame to where it would
773 * be had we entered directly on the entry stack (rather than
774 * just below the IRET frame) and we want to pretend that the
775 * exception came from the IRET target.
776 */
777 new_stack = (struct pt_regs *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
778
779 /* Copy the IRET target to the temporary storage. */
780 __memcpy(&tmp.ip, (void *)bad_regs->sp, 5*8);
781
782 /* Copy the remainder of the stack from the current stack. */
783 __memcpy(&tmp, bad_regs, offsetof(struct pt_regs, ip));
784
785 /* Update the entry stack */
786 __memcpy(new_stack, &tmp, sizeof(tmp));
787
788 BUG_ON(!user_mode(new_stack));
789 return new_stack;
790 }
791 #endif
792
is_sysenter_singlestep(struct pt_regs * regs)793 static bool is_sysenter_singlestep(struct pt_regs *regs)
794 {
795 /*
796 * We don't try for precision here. If we're anywhere in the region of
797 * code that can be single-stepped in the SYSENTER entry path, then
798 * assume that this is a useless single-step trap due to SYSENTER
799 * being invoked with TF set. (We don't know in advance exactly
800 * which instructions will be hit because BTF could plausibly
801 * be set.)
802 */
803 #ifdef CONFIG_X86_32
804 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
805 (unsigned long)__end_SYSENTER_singlestep_region -
806 (unsigned long)__begin_SYSENTER_singlestep_region;
807 #elif defined(CONFIG_IA32_EMULATION)
808 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
809 (unsigned long)__end_entry_SYSENTER_compat -
810 (unsigned long)entry_SYSENTER_compat;
811 #else
812 return false;
813 #endif
814 }
815
debug_read_clear_dr6(void)816 static __always_inline unsigned long debug_read_clear_dr6(void)
817 {
818 unsigned long dr6;
819
820 /*
821 * The Intel SDM says:
822 *
823 * Certain debug exceptions may clear bits 0-3. The remaining
824 * contents of the DR6 register are never cleared by the
825 * processor. To avoid confusion in identifying debug
826 * exceptions, debug handlers should clear the register before
827 * returning to the interrupted task.
828 *
829 * Keep it simple: clear DR6 immediately.
830 */
831 get_debugreg(dr6, 6);
832 set_debugreg(DR6_RESERVED, 6);
833 dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
834
835 return dr6;
836 }
837
838 /*
839 * Our handling of the processor debug registers is non-trivial.
840 * We do not clear them on entry and exit from the kernel. Therefore
841 * it is possible to get a watchpoint trap here from inside the kernel.
842 * However, the code in ./ptrace.c has ensured that the user can
843 * only set watchpoints on userspace addresses. Therefore the in-kernel
844 * watchpoint trap can only occur in code which is reading/writing
845 * from user space. Such code must not hold kernel locks (since it
846 * can equally take a page fault), therefore it is safe to call
847 * force_sig_info even though that claims and releases locks.
848 *
849 * Code in ./signal.c ensures that the debug control register
850 * is restored before we deliver any signal, and therefore that
851 * user code runs with the correct debug control register even though
852 * we clear it here.
853 *
854 * Being careful here means that we don't have to be as careful in a
855 * lot of more complicated places (task switching can be a bit lazy
856 * about restoring all the debug state, and ptrace doesn't have to
857 * find every occurrence of the TF bit that could be saved away even
858 * by user code)
859 *
860 * May run on IST stack.
861 */
862
notify_debug(struct pt_regs * regs,unsigned long * dr6)863 static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
864 {
865 /*
866 * Notifiers will clear bits in @dr6 to indicate the event has been
867 * consumed - hw_breakpoint_handler(), single_stop_cont().
868 *
869 * Notifiers will set bits in @virtual_dr6 to indicate the desire
870 * for signals - ptrace_triggered(), kgdb_hw_overflow_handler().
871 */
872 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
873 return true;
874
875 return false;
876 }
877
exc_debug_kernel(struct pt_regs * regs,unsigned long dr6)878 static __always_inline void exc_debug_kernel(struct pt_regs *regs,
879 unsigned long dr6)
880 {
881 /*
882 * Disable breakpoints during exception handling; recursive exceptions
883 * are exceedingly 'fun'.
884 *
885 * Since this function is NOKPROBE, and that also applies to
886 * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
887 * HW_BREAKPOINT_W on our stack)
888 *
889 * Entry text is excluded for HW_BP_X and cpu_entry_area, which
890 * includes the entry stack is excluded for everything.
891 */
892 unsigned long dr7 = local_db_save();
893 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
894 instrumentation_begin();
895
896 /*
897 * If something gets miswired and we end up here for a user mode
898 * #DB, we will malfunction.
899 */
900 WARN_ON_ONCE(user_mode(regs));
901
902 if (test_thread_flag(TIF_BLOCKSTEP)) {
903 /*
904 * The SDM says "The processor clears the BTF flag when it
905 * generates a debug exception." but PTRACE_BLOCKSTEP requested
906 * it for userspace, but we just took a kernel #DB, so re-set
907 * BTF.
908 */
909 unsigned long debugctl;
910
911 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
912 debugctl |= DEBUGCTLMSR_BTF;
913 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
914 }
915
916 /*
917 * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
918 * watchpoint at the same time then that will still be handled.
919 */
920 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
921 dr6 &= ~DR_STEP;
922
923 /*
924 * The kernel doesn't use INT1
925 */
926 if (!dr6)
927 goto out;
928
929 if (notify_debug(regs, &dr6))
930 goto out;
931
932 /*
933 * The kernel doesn't use TF single-step outside of:
934 *
935 * - Kprobes, consumed through kprobe_debug_handler()
936 * - KGDB, consumed through notify_debug()
937 *
938 * So if we get here with DR_STEP set, something is wonky.
939 *
940 * A known way to trigger this is through QEMU's GDB stub,
941 * which leaks #DB into the guest and causes IST recursion.
942 */
943 if (WARN_ON_ONCE(dr6 & DR_STEP))
944 regs->flags &= ~X86_EFLAGS_TF;
945 out:
946 instrumentation_end();
947 irqentry_nmi_exit(regs, irq_state);
948
949 local_db_restore(dr7);
950 }
951
exc_debug_user(struct pt_regs * regs,unsigned long dr6)952 static __always_inline void exc_debug_user(struct pt_regs *regs,
953 unsigned long dr6)
954 {
955 bool icebp;
956
957 /*
958 * If something gets miswired and we end up here for a kernel mode
959 * #DB, we will malfunction.
960 */
961 WARN_ON_ONCE(!user_mode(regs));
962
963 /*
964 * NB: We can't easily clear DR7 here because
965 * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
966 * user memory, etc. This means that a recursive #DB is possible. If
967 * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
968 * Since we're not on the IST stack right now, everything will be
969 * fine.
970 */
971
972 irqentry_enter_from_user_mode(regs);
973 instrumentation_begin();
974
975 /*
976 * Start the virtual/ptrace DR6 value with just the DR_STEP mask
977 * of the real DR6. ptrace_triggered() will set the DR_TRAPn bits.
978 *
979 * Userspace expects DR_STEP to be visible in ptrace_get_debugreg(6)
980 * even if it is not the result of PTRACE_SINGLESTEP.
981 */
982 current->thread.virtual_dr6 = (dr6 & DR_STEP);
983
984 /*
985 * The SDM says "The processor clears the BTF flag when it
986 * generates a debug exception." Clear TIF_BLOCKSTEP to keep
987 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
988 */
989 clear_thread_flag(TIF_BLOCKSTEP);
990
991 /*
992 * If dr6 has no reason to give us about the origin of this trap,
993 * then it's very likely the result of an icebp/int01 trap.
994 * User wants a sigtrap for that.
995 */
996 icebp = !dr6;
997
998 if (notify_debug(regs, &dr6))
999 goto out;
1000
1001 /* It's safe to allow irq's after DR6 has been saved */
1002 local_irq_enable();
1003
1004 if (v8086_mode(regs)) {
1005 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1006 goto out_irq;
1007 }
1008
1009 /* #DB for bus lock can only be triggered from userspace. */
1010 if (dr6 & DR_BUS_LOCK)
1011 handle_bus_lock(regs);
1012
1013 /* Add the virtual_dr6 bits for signals. */
1014 dr6 |= current->thread.virtual_dr6;
1015 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1016 send_sigtrap(regs, 0, get_si_code(dr6));
1017
1018 out_irq:
1019 local_irq_disable();
1020 out:
1021 instrumentation_end();
1022 irqentry_exit_to_user_mode(regs);
1023 }
1024
1025 #ifdef CONFIG_X86_64
1026 /* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)1027 DEFINE_IDTENTRY_DEBUG(exc_debug)
1028 {
1029 exc_debug_kernel(regs, debug_read_clear_dr6());
1030 }
1031
1032 /* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)1033 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1034 {
1035 exc_debug_user(regs, debug_read_clear_dr6());
1036 }
1037 #else
1038 /* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)1039 DEFINE_IDTENTRY_RAW(exc_debug)
1040 {
1041 unsigned long dr6 = debug_read_clear_dr6();
1042
1043 if (user_mode(regs))
1044 exc_debug_user(regs, dr6);
1045 else
1046 exc_debug_kernel(regs, dr6);
1047 }
1048 #endif
1049
1050 /*
1051 * Note that we play around with the 'TS' bit in an attempt to get
1052 * the correct behaviour even in the presence of the asynchronous
1053 * IRQ13 behaviour
1054 */
math_error(struct pt_regs * regs,int trapnr)1055 static void math_error(struct pt_regs *regs, int trapnr)
1056 {
1057 struct task_struct *task = current;
1058 struct fpu *fpu = &task->thread.fpu;
1059 int si_code;
1060 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1061 "simd exception";
1062
1063 cond_local_irq_enable(regs);
1064
1065 if (!user_mode(regs)) {
1066 if (fixup_exception(regs, trapnr, 0, 0))
1067 goto exit;
1068
1069 task->thread.error_code = 0;
1070 task->thread.trap_nr = trapnr;
1071
1072 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1073 SIGFPE) != NOTIFY_STOP)
1074 die(str, regs, 0);
1075 goto exit;
1076 }
1077
1078 /*
1079 * Synchronize the FPU register state to the memory register state
1080 * if necessary. This allows the exception handler to inspect it.
1081 */
1082 fpu_sync_fpstate(fpu);
1083
1084 task->thread.trap_nr = trapnr;
1085 task->thread.error_code = 0;
1086
1087 si_code = fpu__exception_code(fpu, trapnr);
1088 /* Retry when we get spurious exceptions: */
1089 if (!si_code)
1090 goto exit;
1091
1092 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1093 goto exit;
1094
1095 force_sig_fault(SIGFPE, si_code,
1096 (void __user *)uprobe_get_trap_addr(regs));
1097 exit:
1098 cond_local_irq_disable(regs);
1099 }
1100
DEFINE_IDTENTRY(exc_coprocessor_error)1101 DEFINE_IDTENTRY(exc_coprocessor_error)
1102 {
1103 math_error(regs, X86_TRAP_MF);
1104 }
1105
DEFINE_IDTENTRY(exc_simd_coprocessor_error)1106 DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1107 {
1108 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1109 /* AMD 486 bug: INVD in CPL 0 raises #XF instead of #GP */
1110 if (!static_cpu_has(X86_FEATURE_XMM)) {
1111 __exc_general_protection(regs, 0);
1112 return;
1113 }
1114 }
1115 math_error(regs, X86_TRAP_XF);
1116 }
1117
DEFINE_IDTENTRY(exc_spurious_interrupt_bug)1118 DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1119 {
1120 /*
1121 * This addresses a Pentium Pro Erratum:
1122 *
1123 * PROBLEM: If the APIC subsystem is configured in mixed mode with
1124 * Virtual Wire mode implemented through the local APIC, an
1125 * interrupt vector of 0Fh (Intel reserved encoding) may be
1126 * generated by the local APIC (Int 15). This vector may be
1127 * generated upon receipt of a spurious interrupt (an interrupt
1128 * which is removed before the system receives the INTA sequence)
1129 * instead of the programmed 8259 spurious interrupt vector.
1130 *
1131 * IMPLICATION: The spurious interrupt vector programmed in the
1132 * 8259 is normally handled by an operating system's spurious
1133 * interrupt handler. However, a vector of 0Fh is unknown to some
1134 * operating systems, which would crash if this erratum occurred.
1135 *
1136 * In theory this could be limited to 32bit, but the handler is not
1137 * hurting and who knows which other CPUs suffer from this.
1138 */
1139 }
1140
DEFINE_IDTENTRY(exc_device_not_available)1141 DEFINE_IDTENTRY(exc_device_not_available)
1142 {
1143 unsigned long cr0 = read_cr0();
1144
1145 #ifdef CONFIG_MATH_EMULATION
1146 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1147 struct math_emu_info info = { };
1148
1149 cond_local_irq_enable(regs);
1150
1151 info.regs = regs;
1152 math_emulate(&info);
1153
1154 cond_local_irq_disable(regs);
1155 return;
1156 }
1157 #endif
1158
1159 /* This should not happen. */
1160 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1161 /* Try to fix it up and carry on. */
1162 write_cr0(cr0 & ~X86_CR0_TS);
1163 } else {
1164 /*
1165 * Something terrible happened, and we're better off trying
1166 * to kill the task than getting stuck in a never-ending
1167 * loop of #NM faults.
1168 */
1169 die("unexpected #NM exception", regs, 0);
1170 }
1171 }
1172
1173 #ifdef CONFIG_X86_32
DEFINE_IDTENTRY_SW(iret_error)1174 DEFINE_IDTENTRY_SW(iret_error)
1175 {
1176 local_irq_enable();
1177 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1178 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1179 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1180 ILL_BADSTK, (void __user *)NULL);
1181 }
1182 local_irq_disable();
1183 }
1184 #endif
1185
trap_init(void)1186 void __init trap_init(void)
1187 {
1188 /* Init cpu_entry_area before IST entries are set up */
1189 setup_cpu_entry_areas();
1190
1191 /* Init GHCB memory pages when running as an SEV-ES guest */
1192 sev_es_init_vc_handling();
1193
1194 /* Initialize TSS before setting up traps so ISTs work */
1195 cpu_init_exception_handling();
1196 /* Setup traps as cpu_init() might #GP */
1197 idt_setup_traps();
1198 cpu_init();
1199 }
1200