• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (C) 1991, 1992  Linus Torvalds
3  *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4  *
5  *  Pentium III FXSR, SSE support
6  *	Gareth Hughes <gareth@valinux.com>, May 2000
7  */
8 
9 /*
10  * Handle hardware traps and faults.
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/context_tracking.h>
16 #include <linux/interrupt.h>
17 #include <linux/kallsyms.h>
18 #include <linux/spinlock.h>
19 #include <linux/kprobes.h>
20 #include <linux/uaccess.h>
21 #include <linux/kdebug.h>
22 #include <linux/kgdb.h>
23 #include <linux/kernel.h>
24 #include <linux/export.h>
25 #include <linux/ptrace.h>
26 #include <linux/uprobes.h>
27 #include <linux/string.h>
28 #include <linux/delay.h>
29 #include <linux/errno.h>
30 #include <linux/kexec.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/timer.h>
34 #include <linux/init.h>
35 #include <linux/bug.h>
36 #include <linux/nmi.h>
37 #include <linux/mm.h>
38 #include <linux/smp.h>
39 #include <linux/io.h>
40 
41 #if defined(CONFIG_EDAC)
42 #include <linux/edac.h>
43 #endif
44 
45 #include <asm/stacktrace.h>
46 #include <asm/processor.h>
47 #include <asm/debugreg.h>
48 #include <linux/atomic.h>
49 #include <asm/text-patching.h>
50 #include <asm/ftrace.h>
51 #include <asm/traps.h>
52 #include <asm/desc.h>
53 #include <asm/fpu/internal.h>
54 #include <asm/cpu_entry_area.h>
55 #include <asm/mce.h>
56 #include <asm/fixmap.h>
57 #include <asm/mach_traps.h>
58 #include <asm/alternative.h>
59 #include <asm/fpu/xstate.h>
60 #include <asm/trace/mpx.h>
61 #include <asm/mpx.h>
62 #include <asm/vm86.h>
63 
64 #ifdef CONFIG_X86_64
65 #include <asm/x86_init.h>
66 #include <asm/pgalloc.h>
67 #include <asm/proto.h>
68 #else
69 #include <asm/processor-flags.h>
70 #include <asm/setup.h>
71 #include <asm/proto.h>
72 #endif
73 
74 DECLARE_BITMAP(used_vectors, NR_VECTORS);
75 
cond_local_irq_enable(struct pt_regs * regs)76 static inline void cond_local_irq_enable(struct pt_regs *regs)
77 {
78 	if (regs->flags & X86_EFLAGS_IF)
79 		local_irq_enable();
80 }
81 
cond_local_irq_disable(struct pt_regs * regs)82 static inline void cond_local_irq_disable(struct pt_regs *regs)
83 {
84 	if (regs->flags & X86_EFLAGS_IF)
85 		local_irq_disable();
86 }
87 
88 /*
89  * In IST context, we explicitly disable preemption.  This serves two
90  * purposes: it makes it much less likely that we would accidentally
91  * schedule in IST context and it will force a warning if we somehow
92  * manage to schedule by accident.
93  */
ist_enter(struct pt_regs * regs)94 void ist_enter(struct pt_regs *regs)
95 {
96 	if (user_mode(regs)) {
97 		RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
98 	} else {
99 		/*
100 		 * We might have interrupted pretty much anything.  In
101 		 * fact, if we're a machine check, we can even interrupt
102 		 * NMI processing.  We don't want in_nmi() to return true,
103 		 * but we need to notify RCU.
104 		 */
105 		rcu_nmi_enter();
106 	}
107 
108 	preempt_disable();
109 
110 	/* This code is a bit fragile.  Test it. */
111 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
112 }
113 
ist_exit(struct pt_regs * regs)114 void ist_exit(struct pt_regs *regs)
115 {
116 	preempt_enable_no_resched();
117 
118 	if (!user_mode(regs))
119 		rcu_nmi_exit();
120 }
121 
122 /**
123  * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
124  * @regs:	regs passed to the IST exception handler
125  *
126  * IST exception handlers normally cannot schedule.  As a special
127  * exception, if the exception interrupted userspace code (i.e.
128  * user_mode(regs) would return true) and the exception was not
129  * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
130  * begins a non-atomic section within an ist_enter()/ist_exit() region.
131  * Callers are responsible for enabling interrupts themselves inside
132  * the non-atomic section, and callers must call ist_end_non_atomic()
133  * before ist_exit().
134  */
ist_begin_non_atomic(struct pt_regs * regs)135 void ist_begin_non_atomic(struct pt_regs *regs)
136 {
137 	BUG_ON(!user_mode(regs));
138 
139 	/*
140 	 * Sanity check: we need to be on the normal thread stack.  This
141 	 * will catch asm bugs and any attempt to use ist_preempt_enable
142 	 * from double_fault.
143 	 */
144 	BUG_ON(!on_thread_stack());
145 
146 	preempt_enable_no_resched();
147 }
148 
149 /**
150  * ist_end_non_atomic() - begin a non-atomic section in an IST exception
151  *
152  * Ends a non-atomic section started with ist_begin_non_atomic().
153  */
ist_end_non_atomic(void)154 void ist_end_non_atomic(void)
155 {
156 	preempt_disable();
157 }
158 
is_valid_bugaddr(unsigned long addr)159 int is_valid_bugaddr(unsigned long addr)
160 {
161 	unsigned short ud;
162 
163 	if (addr < TASK_SIZE_MAX)
164 		return 0;
165 
166 	if (probe_kernel_address((unsigned short *)addr, ud))
167 		return 0;
168 
169 	return ud == INSN_UD0 || ud == INSN_UD2;
170 }
171 
fixup_bug(struct pt_regs * regs,int trapnr)172 int fixup_bug(struct pt_regs *regs, int trapnr)
173 {
174 	if (trapnr != X86_TRAP_UD)
175 		return 0;
176 
177 	switch (report_bug(regs->ip, regs)) {
178 	case BUG_TRAP_TYPE_NONE:
179 	case BUG_TRAP_TYPE_BUG:
180 		break;
181 
182 	case BUG_TRAP_TYPE_WARN:
183 		regs->ip += LEN_UD2;
184 		return 1;
185 	}
186 
187 	return 0;
188 }
189 
190 static nokprobe_inline int
do_trap_no_signal(struct task_struct * tsk,int trapnr,char * str,struct pt_regs * regs,long error_code)191 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
192 		  struct pt_regs *regs,	long error_code)
193 {
194 	if (v8086_mode(regs)) {
195 		/*
196 		 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
197 		 * On nmi (interrupt 2), do_trap should not be called.
198 		 */
199 		if (trapnr < X86_TRAP_UD) {
200 			if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
201 						error_code, trapnr))
202 				return 0;
203 		}
204 		return -1;
205 	}
206 
207 	if (!user_mode(regs)) {
208 		if (fixup_exception(regs, trapnr))
209 			return 0;
210 
211 		tsk->thread.error_code = error_code;
212 		tsk->thread.trap_nr = trapnr;
213 		die(str, regs, error_code);
214 	}
215 
216 	return -1;
217 }
218 
fill_trap_info(struct pt_regs * regs,int signr,int trapnr,siginfo_t * info)219 static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr,
220 				siginfo_t *info)
221 {
222 	unsigned long siaddr;
223 	int sicode;
224 
225 	switch (trapnr) {
226 	default:
227 		return SEND_SIG_PRIV;
228 
229 	case X86_TRAP_DE:
230 		sicode = FPE_INTDIV;
231 		siaddr = uprobe_get_trap_addr(regs);
232 		break;
233 	case X86_TRAP_UD:
234 		sicode = ILL_ILLOPN;
235 		siaddr = uprobe_get_trap_addr(regs);
236 		break;
237 	case X86_TRAP_AC:
238 		sicode = BUS_ADRALN;
239 		siaddr = 0;
240 		break;
241 	}
242 
243 	info->si_signo = signr;
244 	info->si_errno = 0;
245 	info->si_code = sicode;
246 	info->si_addr = (void __user *)siaddr;
247 	return info;
248 }
249 
250 static void
do_trap(int trapnr,int signr,char * str,struct pt_regs * regs,long error_code,siginfo_t * info)251 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
252 	long error_code, siginfo_t *info)
253 {
254 	struct task_struct *tsk = current;
255 
256 
257 	if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
258 		return;
259 	/*
260 	 * We want error_code and trap_nr set for userspace faults and
261 	 * kernelspace faults which result in die(), but not
262 	 * kernelspace faults which are fixed up.  die() gives the
263 	 * process no chance to handle the signal and notice the
264 	 * kernel fault information, so that won't result in polluting
265 	 * the information about previously queued, but not yet
266 	 * delivered, faults.  See also do_general_protection below.
267 	 */
268 	tsk->thread.error_code = error_code;
269 	tsk->thread.trap_nr = trapnr;
270 
271 	if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
272 	    printk_ratelimit()) {
273 		pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
274 			tsk->comm, tsk->pid, str,
275 			regs->ip, regs->sp, error_code);
276 		print_vma_addr(KERN_CONT " in ", regs->ip);
277 		pr_cont("\n");
278 	}
279 
280 	force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk);
281 }
282 NOKPROBE_SYMBOL(do_trap);
283 
do_error_trap(struct pt_regs * regs,long error_code,char * str,unsigned long trapnr,int signr)284 static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
285 			  unsigned long trapnr, int signr)
286 {
287 	siginfo_t info;
288 
289 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
290 
291 	/*
292 	 * WARN*()s end up here; fix them up before we call the
293 	 * notifier chain.
294 	 */
295 	if (!user_mode(regs) && fixup_bug(regs, trapnr))
296 		return;
297 
298 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
299 			NOTIFY_STOP) {
300 		cond_local_irq_enable(regs);
301 		do_trap(trapnr, signr, str, regs, error_code,
302 			fill_trap_info(regs, signr, trapnr, &info));
303 	}
304 }
305 
306 #define DO_ERROR(trapnr, signr, str, name)				\
307 dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
308 {									\
309 	do_error_trap(regs, error_code, str, trapnr, signr);		\
310 }
311 
312 DO_ERROR(X86_TRAP_DE,     SIGFPE,  "divide error",		divide_error)
313 DO_ERROR(X86_TRAP_OF,     SIGSEGV, "overflow",			overflow)
314 DO_ERROR(X86_TRAP_UD,     SIGILL,  "invalid opcode",		invalid_op)
315 DO_ERROR(X86_TRAP_OLD_MF, SIGFPE,  "coprocessor segment overrun",coprocessor_segment_overrun)
316 DO_ERROR(X86_TRAP_TS,     SIGSEGV, "invalid TSS",		invalid_TSS)
317 DO_ERROR(X86_TRAP_NP,     SIGBUS,  "segment not present",	segment_not_present)
318 DO_ERROR(X86_TRAP_SS,     SIGBUS,  "stack segment",		stack_segment)
319 DO_ERROR(X86_TRAP_AC,     SIGBUS,  "alignment check",		alignment_check)
320 
321 #ifdef CONFIG_VMAP_STACK
handle_stack_overflow(const char * message,struct pt_regs * regs,unsigned long fault_address)322 __visible void __noreturn handle_stack_overflow(const char *message,
323 						struct pt_regs *regs,
324 						unsigned long fault_address)
325 {
326 	printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
327 		 (void *)fault_address, current->stack,
328 		 (char *)current->stack + THREAD_SIZE - 1);
329 	die(message, regs, 0);
330 
331 	/* Be absolutely certain we don't return. */
332 	panic(message);
333 }
334 #endif
335 
336 #ifdef CONFIG_X86_64
337 /* Runs on IST stack */
do_double_fault(struct pt_regs * regs,long error_code)338 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
339 {
340 	static const char str[] = "double fault";
341 	struct task_struct *tsk = current;
342 #ifdef CONFIG_VMAP_STACK
343 	unsigned long cr2;
344 #endif
345 
346 #ifdef CONFIG_X86_ESPFIX64
347 	extern unsigned char native_irq_return_iret[];
348 
349 	/*
350 	 * If IRET takes a non-IST fault on the espfix64 stack, then we
351 	 * end up promoting it to a doublefault.  In that case, take
352 	 * advantage of the fact that we're not using the normal (TSS.sp0)
353 	 * stack right now.  We can write a fake #GP(0) frame at TSS.sp0
354 	 * and then modify our own IRET frame so that, when we return,
355 	 * we land directly at the #GP(0) vector with the stack already
356 	 * set up according to its expectations.
357 	 *
358 	 * The net result is that our #GP handler will think that we
359 	 * entered from usermode with the bad user context.
360 	 *
361 	 * No need for ist_enter here because we don't use RCU.
362 	 */
363 	if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
364 		regs->cs == __KERNEL_CS &&
365 		regs->ip == (unsigned long)native_irq_return_iret)
366 	{
367 		struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
368 
369 		/*
370 		 * regs->sp points to the failing IRET frame on the
371 		 * ESPFIX64 stack.  Copy it to the entry stack.  This fills
372 		 * in gpregs->ss through gpregs->ip.
373 		 *
374 		 */
375 		memmove(&gpregs->ip, (void *)regs->sp, 5*8);
376 		gpregs->orig_ax = 0;  /* Missing (lost) #GP error code */
377 
378 		/*
379 		 * Adjust our frame so that we return straight to the #GP
380 		 * vector with the expected RSP value.  This is safe because
381 		 * we won't enable interupts or schedule before we invoke
382 		 * general_protection, so nothing will clobber the stack
383 		 * frame we just set up.
384 		 */
385 		regs->ip = (unsigned long)general_protection;
386 		regs->sp = (unsigned long)&gpregs->orig_ax;
387 
388 		return;
389 	}
390 #endif
391 
392 	ist_enter(regs);
393 	notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
394 
395 	tsk->thread.error_code = error_code;
396 	tsk->thread.trap_nr = X86_TRAP_DF;
397 
398 #ifdef CONFIG_VMAP_STACK
399 	/*
400 	 * If we overflow the stack into a guard page, the CPU will fail
401 	 * to deliver #PF and will send #DF instead.  Similarly, if we
402 	 * take any non-IST exception while too close to the bottom of
403 	 * the stack, the processor will get a page fault while
404 	 * delivering the exception and will generate a double fault.
405 	 *
406 	 * According to the SDM (footnote in 6.15 under "Interrupt 14 -
407 	 * Page-Fault Exception (#PF):
408 	 *
409 	 *   Processors update CR2 whenever a page fault is detected. If a
410 	 *   second page fault occurs while an earlier page fault is being
411 	 *   delivered, the faulting linear address of the second fault will
412 	 *   overwrite the contents of CR2 (replacing the previous
413 	 *   address). These updates to CR2 occur even if the page fault
414 	 *   results in a double fault or occurs during the delivery of a
415 	 *   double fault.
416 	 *
417 	 * The logic below has a small possibility of incorrectly diagnosing
418 	 * some errors as stack overflows.  For example, if the IDT or GDT
419 	 * gets corrupted such that #GP delivery fails due to a bad descriptor
420 	 * causing #GP and we hit this condition while CR2 coincidentally
421 	 * points to the stack guard page, we'll think we overflowed the
422 	 * stack.  Given that we're going to panic one way or another
423 	 * if this happens, this isn't necessarily worth fixing.
424 	 *
425 	 * If necessary, we could improve the test by only diagnosing
426 	 * a stack overflow if the saved RSP points within 47 bytes of
427 	 * the bottom of the stack: if RSP == tsk_stack + 48 and we
428 	 * take an exception, the stack is already aligned and there
429 	 * will be enough room SS, RSP, RFLAGS, CS, RIP, and a
430 	 * possible error code, so a stack overflow would *not* double
431 	 * fault.  With any less space left, exception delivery could
432 	 * fail, and, as a practical matter, we've overflowed the
433 	 * stack even if the actual trigger for the double fault was
434 	 * something else.
435 	 */
436 	cr2 = read_cr2();
437 	if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
438 		handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
439 #endif
440 
441 #ifdef CONFIG_DOUBLEFAULT
442 	df_debug(regs, error_code);
443 #endif
444 	/*
445 	 * This is always a kernel trap and never fixable (and thus must
446 	 * never return).
447 	 */
448 	for (;;)
449 		die(str, regs, error_code);
450 }
451 #endif
452 
do_bounds(struct pt_regs * regs,long error_code)453 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
454 {
455 	const struct mpx_bndcsr *bndcsr;
456 	siginfo_t *info;
457 
458 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
459 	if (notify_die(DIE_TRAP, "bounds", regs, error_code,
460 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
461 		return;
462 	cond_local_irq_enable(regs);
463 
464 	if (!user_mode(regs))
465 		die("bounds", regs, error_code);
466 
467 	if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
468 		/* The exception is not from Intel MPX */
469 		goto exit_trap;
470 	}
471 
472 	/*
473 	 * We need to look at BNDSTATUS to resolve this exception.
474 	 * A NULL here might mean that it is in its 'init state',
475 	 * which is all zeros which indicates MPX was not
476 	 * responsible for the exception.
477 	 */
478 	bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
479 	if (!bndcsr)
480 		goto exit_trap;
481 
482 	trace_bounds_exception_mpx(bndcsr);
483 	/*
484 	 * The error code field of the BNDSTATUS register communicates status
485 	 * information of a bound range exception #BR or operation involving
486 	 * bound directory.
487 	 */
488 	switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
489 	case 2:	/* Bound directory has invalid entry. */
490 		if (mpx_handle_bd_fault())
491 			goto exit_trap;
492 		break; /* Success, it was handled */
493 	case 1: /* Bound violation. */
494 		info = mpx_generate_siginfo(regs);
495 		if (IS_ERR(info)) {
496 			/*
497 			 * We failed to decode the MPX instruction.  Act as if
498 			 * the exception was not caused by MPX.
499 			 */
500 			goto exit_trap;
501 		}
502 		/*
503 		 * Success, we decoded the instruction and retrieved
504 		 * an 'info' containing the address being accessed
505 		 * which caused the exception.  This information
506 		 * allows and application to possibly handle the
507 		 * #BR exception itself.
508 		 */
509 		do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info);
510 		kfree(info);
511 		break;
512 	case 0: /* No exception caused by Intel MPX operations. */
513 		goto exit_trap;
514 	default:
515 		die("bounds", regs, error_code);
516 	}
517 
518 	return;
519 
520 exit_trap:
521 	/*
522 	 * This path out is for all the cases where we could not
523 	 * handle the exception in some way (like allocating a
524 	 * table or telling userspace about it.  We will also end
525 	 * up here if the kernel has MPX turned off at compile
526 	 * time..
527 	 */
528 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL);
529 }
530 
531 dotraplinkage void
do_general_protection(struct pt_regs * regs,long error_code)532 do_general_protection(struct pt_regs *regs, long error_code)
533 {
534 	struct task_struct *tsk;
535 
536 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
537 	cond_local_irq_enable(regs);
538 
539 	if (v8086_mode(regs)) {
540 		local_irq_enable();
541 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
542 		return;
543 	}
544 
545 	tsk = current;
546 	if (!user_mode(regs)) {
547 		if (fixup_exception(regs, X86_TRAP_GP))
548 			return;
549 
550 		tsk->thread.error_code = error_code;
551 		tsk->thread.trap_nr = X86_TRAP_GP;
552 		if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
553 			       X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
554 			die("general protection fault", regs, error_code);
555 		return;
556 	}
557 
558 	tsk->thread.error_code = error_code;
559 	tsk->thread.trap_nr = X86_TRAP_GP;
560 
561 	if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
562 			printk_ratelimit()) {
563 		pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
564 			tsk->comm, task_pid_nr(tsk),
565 			regs->ip, regs->sp, error_code);
566 		print_vma_addr(KERN_CONT " in ", regs->ip);
567 		pr_cont("\n");
568 	}
569 
570 	force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
571 }
572 NOKPROBE_SYMBOL(do_general_protection);
573 
do_int3(struct pt_regs * regs,long error_code)574 dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
575 {
576 #ifdef CONFIG_DYNAMIC_FTRACE
577 	/*
578 	 * ftrace must be first, everything else may cause a recursive crash.
579 	 * See note by declaration of modifying_ftrace_code in ftrace.c
580 	 */
581 	if (unlikely(atomic_read(&modifying_ftrace_code)) &&
582 	    ftrace_int3_handler(regs))
583 		return;
584 #endif
585 	if (poke_int3_handler(regs))
586 		return;
587 
588 	/*
589 	 * Use ist_enter despite the fact that we don't use an IST stack.
590 	 * We can be called from a kprobe in non-CONTEXT_KERNEL kernel
591 	 * mode or even during context tracking state changes.
592 	 *
593 	 * This means that we can't schedule.  That's okay.
594 	 */
595 	ist_enter(regs);
596 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
597 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
598 	if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
599 				SIGTRAP) == NOTIFY_STOP)
600 		goto exit;
601 #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
602 
603 #ifdef CONFIG_KPROBES
604 	if (kprobe_int3_handler(regs))
605 		goto exit;
606 #endif
607 
608 	if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
609 			SIGTRAP) == NOTIFY_STOP)
610 		goto exit;
611 
612 	cond_local_irq_enable(regs);
613 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
614 	cond_local_irq_disable(regs);
615 
616 exit:
617 	ist_exit(regs);
618 }
619 NOKPROBE_SYMBOL(do_int3);
620 
621 #ifdef CONFIG_X86_64
622 /*
623  * Help handler running on a per-cpu (IST or entry trampoline) stack
624  * to switch to the normal thread stack if the interrupted code was in
625  * user mode. The actual stack switch is done in entry_64.S
626  */
sync_regs(struct pt_regs * eregs)627 asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
628 {
629 	struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
630 	if (regs != eregs)
631 		*regs = *eregs;
632 	return regs;
633 }
634 NOKPROBE_SYMBOL(sync_regs);
635 
636 struct bad_iret_stack {
637 	void *error_entry_ret;
638 	struct pt_regs regs;
639 };
640 
641 asmlinkage __visible notrace
fixup_bad_iret(struct bad_iret_stack * s)642 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
643 {
644 	/*
645 	 * This is called from entry_64.S early in handling a fault
646 	 * caused by a bad iret to user mode.  To handle the fault
647 	 * correctly, we want to move our stack frame to where it would
648 	 * be had we entered directly on the entry stack (rather than
649 	 * just below the IRET frame) and we want to pretend that the
650 	 * exception came from the IRET target.
651 	 */
652 	struct bad_iret_stack *new_stack =
653 		(struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
654 
655 	/* Copy the IRET target to the new stack. */
656 	memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
657 
658 	/* Copy the remainder of the stack from the current stack. */
659 	memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
660 
661 	BUG_ON(!user_mode(&new_stack->regs));
662 	return new_stack;
663 }
664 NOKPROBE_SYMBOL(fixup_bad_iret);
665 #endif
666 
is_sysenter_singlestep(struct pt_regs * regs)667 static bool is_sysenter_singlestep(struct pt_regs *regs)
668 {
669 	/*
670 	 * We don't try for precision here.  If we're anywhere in the region of
671 	 * code that can be single-stepped in the SYSENTER entry path, then
672 	 * assume that this is a useless single-step trap due to SYSENTER
673 	 * being invoked with TF set.  (We don't know in advance exactly
674 	 * which instructions will be hit because BTF could plausibly
675 	 * be set.)
676 	 */
677 #ifdef CONFIG_X86_32
678 	return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
679 		(unsigned long)__end_SYSENTER_singlestep_region -
680 		(unsigned long)__begin_SYSENTER_singlestep_region;
681 #elif defined(CONFIG_IA32_EMULATION)
682 	return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
683 		(unsigned long)__end_entry_SYSENTER_compat -
684 		(unsigned long)entry_SYSENTER_compat;
685 #else
686 	return false;
687 #endif
688 }
689 
690 /*
691  * Our handling of the processor debug registers is non-trivial.
692  * We do not clear them on entry and exit from the kernel. Therefore
693  * it is possible to get a watchpoint trap here from inside the kernel.
694  * However, the code in ./ptrace.c has ensured that the user can
695  * only set watchpoints on userspace addresses. Therefore the in-kernel
696  * watchpoint trap can only occur in code which is reading/writing
697  * from user space. Such code must not hold kernel locks (since it
698  * can equally take a page fault), therefore it is safe to call
699  * force_sig_info even though that claims and releases locks.
700  *
701  * Code in ./signal.c ensures that the debug control register
702  * is restored before we deliver any signal, and therefore that
703  * user code runs with the correct debug control register even though
704  * we clear it here.
705  *
706  * Being careful here means that we don't have to be as careful in a
707  * lot of more complicated places (task switching can be a bit lazy
708  * about restoring all the debug state, and ptrace doesn't have to
709  * find every occurrence of the TF bit that could be saved away even
710  * by user code)
711  *
712  * May run on IST stack.
713  */
do_debug(struct pt_regs * regs,long error_code)714 dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
715 {
716 	struct task_struct *tsk = current;
717 	int user_icebp = 0;
718 	unsigned long dr6;
719 	int si_code;
720 
721 	ist_enter(regs);
722 
723 	get_debugreg(dr6, 6);
724 	/*
725 	 * The Intel SDM says:
726 	 *
727 	 *   Certain debug exceptions may clear bits 0-3. The remaining
728 	 *   contents of the DR6 register are never cleared by the
729 	 *   processor. To avoid confusion in identifying debug
730 	 *   exceptions, debug handlers should clear the register before
731 	 *   returning to the interrupted task.
732 	 *
733 	 * Keep it simple: clear DR6 immediately.
734 	 */
735 	set_debugreg(0, 6);
736 
737 	/* Filter out all the reserved bits which are preset to 1 */
738 	dr6 &= ~DR6_RESERVED;
739 
740 	/*
741 	 * The SDM says "The processor clears the BTF flag when it
742 	 * generates a debug exception."  Clear TIF_BLOCKSTEP to keep
743 	 * TIF_BLOCKSTEP in sync with the hardware BTF flag.
744 	 */
745 	clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
746 
747 	if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
748 		     is_sysenter_singlestep(regs))) {
749 		dr6 &= ~DR_STEP;
750 		if (!dr6)
751 			goto exit;
752 		/*
753 		 * else we might have gotten a single-step trap and hit a
754 		 * watchpoint at the same time, in which case we should fall
755 		 * through and handle the watchpoint.
756 		 */
757 	}
758 
759 	/*
760 	 * If dr6 has no reason to give us about the origin of this trap,
761 	 * then it's very likely the result of an icebp/int01 trap.
762 	 * User wants a sigtrap for that.
763 	 */
764 	if (!dr6 && user_mode(regs))
765 		user_icebp = 1;
766 
767 	/* Store the virtualized DR6 value */
768 	tsk->thread.debugreg6 = dr6;
769 
770 #ifdef CONFIG_KPROBES
771 	if (kprobe_debug_handler(regs))
772 		goto exit;
773 #endif
774 
775 	if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
776 							SIGTRAP) == NOTIFY_STOP)
777 		goto exit;
778 
779 	/*
780 	 * Let others (NMI) know that the debug stack is in use
781 	 * as we may switch to the interrupt stack.
782 	 */
783 	debug_stack_usage_inc();
784 
785 	/* It's safe to allow irq's after DR6 has been saved */
786 	cond_local_irq_enable(regs);
787 
788 	if (v8086_mode(regs)) {
789 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
790 					X86_TRAP_DB);
791 		cond_local_irq_disable(regs);
792 		debug_stack_usage_dec();
793 		goto exit;
794 	}
795 
796 	if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
797 		/*
798 		 * Historical junk that used to handle SYSENTER single-stepping.
799 		 * This should be unreachable now.  If we survive for a while
800 		 * without anyone hitting this warning, we'll turn this into
801 		 * an oops.
802 		 */
803 		tsk->thread.debugreg6 &= ~DR_STEP;
804 		set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
805 		regs->flags &= ~X86_EFLAGS_TF;
806 	}
807 	si_code = get_si_code(tsk->thread.debugreg6);
808 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
809 		send_sigtrap(tsk, regs, error_code, si_code);
810 	cond_local_irq_disable(regs);
811 	debug_stack_usage_dec();
812 
813 exit:
814 	ist_exit(regs);
815 }
816 NOKPROBE_SYMBOL(do_debug);
817 
818 /*
819  * Note that we play around with the 'TS' bit in an attempt to get
820  * the correct behaviour even in the presence of the asynchronous
821  * IRQ13 behaviour
822  */
math_error(struct pt_regs * regs,int error_code,int trapnr)823 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
824 {
825 	struct task_struct *task = current;
826 	struct fpu *fpu = &task->thread.fpu;
827 	siginfo_t info;
828 	char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
829 						"simd exception";
830 
831 	cond_local_irq_enable(regs);
832 
833 	if (!user_mode(regs)) {
834 		if (fixup_exception(regs, trapnr))
835 			return;
836 
837 		task->thread.error_code = error_code;
838 		task->thread.trap_nr = trapnr;
839 
840 		if (notify_die(DIE_TRAP, str, regs, error_code,
841 					trapnr, SIGFPE) != NOTIFY_STOP)
842 			die(str, regs, error_code);
843 		return;
844 	}
845 
846 	/*
847 	 * Save the info for the exception handler and clear the error.
848 	 */
849 	fpu__save(fpu);
850 
851 	task->thread.trap_nr	= trapnr;
852 	task->thread.error_code = error_code;
853 	info.si_signo		= SIGFPE;
854 	info.si_errno		= 0;
855 	info.si_addr		= (void __user *)uprobe_get_trap_addr(regs);
856 
857 	info.si_code = fpu__exception_code(fpu, trapnr);
858 
859 	/* Retry when we get spurious exceptions: */
860 	if (!info.si_code)
861 		return;
862 
863 	force_sig_info(SIGFPE, &info, task);
864 }
865 
do_coprocessor_error(struct pt_regs * regs,long error_code)866 dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
867 {
868 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
869 	math_error(regs, error_code, X86_TRAP_MF);
870 }
871 
872 dotraplinkage void
do_simd_coprocessor_error(struct pt_regs * regs,long error_code)873 do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
874 {
875 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
876 	math_error(regs, error_code, X86_TRAP_XF);
877 }
878 
879 dotraplinkage void
do_spurious_interrupt_bug(struct pt_regs * regs,long error_code)880 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
881 {
882 	cond_local_irq_enable(regs);
883 }
884 
885 dotraplinkage void
do_device_not_available(struct pt_regs * regs,long error_code)886 do_device_not_available(struct pt_regs *regs, long error_code)
887 {
888 	unsigned long cr0;
889 
890 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
891 
892 #ifdef CONFIG_MATH_EMULATION
893 	if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
894 		struct math_emu_info info = { };
895 
896 		cond_local_irq_enable(regs);
897 
898 		info.regs = regs;
899 		math_emulate(&info);
900 		return;
901 	}
902 #endif
903 
904 	/* This should not happen. */
905 	cr0 = read_cr0();
906 	if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
907 		/* Try to fix it up and carry on. */
908 		write_cr0(cr0 & ~X86_CR0_TS);
909 	} else {
910 		/*
911 		 * Something terrible happened, and we're better off trying
912 		 * to kill the task than getting stuck in a never-ending
913 		 * loop of #NM faults.
914 		 */
915 		die("unexpected #NM exception", regs, error_code);
916 	}
917 }
918 NOKPROBE_SYMBOL(do_device_not_available);
919 
920 #ifdef CONFIG_X86_32
do_iret_error(struct pt_regs * regs,long error_code)921 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
922 {
923 	siginfo_t info;
924 
925 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
926 	local_irq_enable();
927 
928 	info.si_signo = SIGILL;
929 	info.si_errno = 0;
930 	info.si_code = ILL_BADSTK;
931 	info.si_addr = NULL;
932 	if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
933 			X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
934 		do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
935 			&info);
936 	}
937 }
938 #endif
939 
trap_init(void)940 void __init trap_init(void)
941 {
942 	/* Init cpu_entry_area before IST entries are set up */
943 	setup_cpu_entry_areas();
944 
945 	idt_setup_traps();
946 
947 	/*
948 	 * Set the IDT descriptor to a fixed read-only location, so that the
949 	 * "sidt" instruction will not leak the location of the kernel, and
950 	 * to defend the IDT against arbitrary memory write vulnerabilities.
951 	 * It will be reloaded in cpu_init() */
952 	cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
953 		    PAGE_KERNEL_RO);
954 	idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
955 
956 	/*
957 	 * Should be a barrier for any external CPU state:
958 	 */
959 	cpu_init();
960 
961 	idt_setup_ist_traps();
962 
963 	x86_init.irqs.trap_init();
964 
965 	idt_setup_debugidt_traps();
966 }
967