1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/extable.h>
3 #include <linux/uaccess.h>
4 #include <linux/sched/debug.h>
5 #include <xen/xen.h>
6
7 #include <asm/fpu/internal.h>
8 #include <asm/traps.h>
9 #include <asm/kdebug.h>
10
11 typedef bool (*ex_handler_t)(const struct exception_table_entry *,
12 struct pt_regs *, int, unsigned long,
13 unsigned long);
14
15 static inline unsigned long
ex_fixup_addr(const struct exception_table_entry * x)16 ex_fixup_addr(const struct exception_table_entry *x)
17 {
18 return (unsigned long)&x->fixup + x->fixup;
19 }
20 static inline ex_handler_t
ex_fixup_handler(const struct exception_table_entry * x)21 ex_fixup_handler(const struct exception_table_entry *x)
22 {
23 return (ex_handler_t)((unsigned long)&x->handler + x->handler);
24 }
25
ex_handler_default(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)26 __visible bool ex_handler_default(const struct exception_table_entry *fixup,
27 struct pt_regs *regs, int trapnr,
28 unsigned long error_code,
29 unsigned long fault_addr)
30 {
31 regs->ip = ex_fixup_addr(fixup);
32 return true;
33 }
34 EXPORT_SYMBOL(ex_handler_default);
35
ex_handler_fault(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)36 __visible bool ex_handler_fault(const struct exception_table_entry *fixup,
37 struct pt_regs *regs, int trapnr,
38 unsigned long error_code,
39 unsigned long fault_addr)
40 {
41 regs->ip = ex_fixup_addr(fixup);
42 regs->ax = trapnr;
43 return true;
44 }
45 EXPORT_SYMBOL_GPL(ex_handler_fault);
46
47 /*
48 * Handler for UD0 exception following a failed test against the
49 * result of a refcount inc/dec/add/sub.
50 */
ex_handler_refcount(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)51 __visible bool ex_handler_refcount(const struct exception_table_entry *fixup,
52 struct pt_regs *regs, int trapnr,
53 unsigned long error_code,
54 unsigned long fault_addr)
55 {
56 /* First unconditionally saturate the refcount. */
57 *(int *)regs->cx = INT_MIN / 2;
58
59 /*
60 * Strictly speaking, this reports the fixup destination, not
61 * the fault location, and not the actually overflowing
62 * instruction, which is the instruction before the "js", but
63 * since that instruction could be a variety of lengths, just
64 * report the location after the overflow, which should be close
65 * enough for finding the overflow, as it's at least back in
66 * the function, having returned from .text.unlikely.
67 */
68 regs->ip = ex_fixup_addr(fixup);
69
70 /*
71 * This function has been called because either a negative refcount
72 * value was seen by any of the refcount functions, or a zero
73 * refcount value was seen by refcount_dec().
74 *
75 * If we crossed from INT_MAX to INT_MIN, OF (Overflow Flag: result
76 * wrapped around) will be set. Additionally, seeing the refcount
77 * reach 0 will set ZF (Zero Flag: result was zero). In each of
78 * these cases we want a report, since it's a boundary condition.
79 * The SF case is not reported since it indicates post-boundary
80 * manipulations below zero or above INT_MAX. And if none of the
81 * flags are set, something has gone very wrong, so report it.
82 */
83 if (regs->flags & (X86_EFLAGS_OF | X86_EFLAGS_ZF)) {
84 bool zero = regs->flags & X86_EFLAGS_ZF;
85
86 refcount_error_report(regs, zero ? "hit zero" : "overflow");
87 } else if ((regs->flags & X86_EFLAGS_SF) == 0) {
88 /* Report if none of OF, ZF, nor SF are set. */
89 refcount_error_report(regs, "unexpected saturation");
90 }
91
92 return true;
93 }
94 EXPORT_SYMBOL(ex_handler_refcount);
95
96 /*
97 * Handler for when we fail to restore a task's FPU state. We should never get
98 * here because the FPU state of a task using the FPU (task->thread.fpu.state)
99 * should always be valid. However, past bugs have allowed userspace to set
100 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
101 * These caused XRSTOR to fail when switching to the task, leaking the FPU
102 * registers of the task previously executing on the CPU. Mitigate this class
103 * of vulnerability by restoring from the initial state (essentially, zeroing
104 * out all the FPU registers) if we can't restore from the task's FPU state.
105 */
ex_handler_fprestore(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)106 __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
107 struct pt_regs *regs, int trapnr,
108 unsigned long error_code,
109 unsigned long fault_addr)
110 {
111 regs->ip = ex_fixup_addr(fixup);
112
113 WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
114 (void *)instruction_pointer(regs));
115
116 __copy_kernel_to_fpregs(&init_fpstate, -1);
117 return true;
118 }
119 EXPORT_SYMBOL_GPL(ex_handler_fprestore);
120
ex_handler_uaccess(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)121 __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
122 struct pt_regs *regs, int trapnr,
123 unsigned long error_code,
124 unsigned long fault_addr)
125 {
126 WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
127 regs->ip = ex_fixup_addr(fixup);
128 return true;
129 }
130 EXPORT_SYMBOL(ex_handler_uaccess);
131
ex_handler_ext(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)132 __visible bool ex_handler_ext(const struct exception_table_entry *fixup,
133 struct pt_regs *regs, int trapnr,
134 unsigned long error_code,
135 unsigned long fault_addr)
136 {
137 /* Special hack for uaccess_err */
138 current->thread.uaccess_err = 1;
139 regs->ip = ex_fixup_addr(fixup);
140 return true;
141 }
142 EXPORT_SYMBOL(ex_handler_ext);
143
ex_handler_rdmsr_unsafe(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)144 __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
145 struct pt_regs *regs, int trapnr,
146 unsigned long error_code,
147 unsigned long fault_addr)
148 {
149 if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
150 (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
151 show_stack_regs(regs);
152
153 /* Pretend that the read succeeded and returned 0. */
154 regs->ip = ex_fixup_addr(fixup);
155 regs->ax = 0;
156 regs->dx = 0;
157 return true;
158 }
159 EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
160
ex_handler_wrmsr_unsafe(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)161 __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
162 struct pt_regs *regs, int trapnr,
163 unsigned long error_code,
164 unsigned long fault_addr)
165 {
166 if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
167 (unsigned int)regs->cx, (unsigned int)regs->dx,
168 (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
169 show_stack_regs(regs);
170
171 /* Pretend that the write succeeded. */
172 regs->ip = ex_fixup_addr(fixup);
173 return true;
174 }
175 EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
176
ex_handler_clear_fs(const struct exception_table_entry * fixup,struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)177 __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
178 struct pt_regs *regs, int trapnr,
179 unsigned long error_code,
180 unsigned long fault_addr)
181 {
182 if (static_cpu_has(X86_BUG_NULL_SEG))
183 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
184 asm volatile ("mov %0, %%fs" : : "rm" (0));
185 return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
186 }
187 EXPORT_SYMBOL(ex_handler_clear_fs);
188
ex_has_fault_handler(unsigned long ip)189 __visible bool ex_has_fault_handler(unsigned long ip)
190 {
191 const struct exception_table_entry *e;
192 ex_handler_t handler;
193
194 e = search_exception_tables(ip);
195 if (!e)
196 return false;
197 handler = ex_fixup_handler(e);
198
199 return handler == ex_handler_fault;
200 }
201
202 __nocfi
fixup_exception(struct pt_regs * regs,int trapnr,unsigned long error_code,unsigned long fault_addr)203 int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
204 unsigned long fault_addr)
205 {
206 const struct exception_table_entry *e;
207 ex_handler_t handler;
208
209 #ifdef CONFIG_PNPBIOS
210 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
211 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
212 extern u32 pnp_bios_is_utter_crap;
213 pnp_bios_is_utter_crap = 1;
214 printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
215 __asm__ volatile(
216 "movl %0, %%esp\n\t"
217 "jmp *%1\n\t"
218 : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
219 panic("do_trap: can't hit this");
220 }
221 #endif
222
223 e = search_exception_tables(regs->ip);
224 if (!e)
225 return 0;
226
227 handler = ex_fixup_handler(e);
228 return handler(e, regs, trapnr, error_code, fault_addr);
229 }
230
231 extern unsigned int early_recursion_flag;
232
233 /* Restricted version used during very early boot */
early_fixup_exception(struct pt_regs * regs,int trapnr)234 void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
235 {
236 /* Ignore early NMIs. */
237 if (trapnr == X86_TRAP_NMI)
238 return;
239
240 if (early_recursion_flag > 2)
241 goto halt_loop;
242
243 /*
244 * Old CPUs leave the high bits of CS on the stack
245 * undefined. I'm not sure which CPUs do this, but at least
246 * the 486 DX works this way.
247 * Xen pv domains are not using the default __KERNEL_CS.
248 */
249 if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
250 goto fail;
251
252 /*
253 * The full exception fixup machinery is available as soon as
254 * the early IDT is loaded. This means that it is the
255 * responsibility of extable users to either function correctly
256 * when handlers are invoked early or to simply avoid causing
257 * exceptions before they're ready to handle them.
258 *
259 * This is better than filtering which handlers can be used,
260 * because refusing to call a handler here is guaranteed to
261 * result in a hard-to-debug panic.
262 *
263 * Keep in mind that not all vectors actually get here. Early
264 * page faults, for example, are special.
265 */
266 if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
267 return;
268
269 if (fixup_bug(regs, trapnr))
270 return;
271
272 fail:
273 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
274 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
275 regs->orig_ax, read_cr2());
276
277 show_regs(regs);
278
279 halt_loop:
280 while (true)
281 halt();
282 }
283