1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 */
7
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19
20 /*
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
23 */
enter_from_kernel_mode(struct pt_regs * regs)24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 regs->exit_rcu = false;
27
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
30 rcu_irq_enter();
31 trace_hardirqs_off_finish();
32
33 regs->exit_rcu = true;
34 return;
35 }
36
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
40 }
41
42 /*
43 * This is intended to match the logic in irqentry_exit(), handling the kernel
44 * mode transitions only, and with preemption handled elsewhere.
45 */
exit_to_kernel_mode(struct pt_regs * regs)46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
47 {
48 lockdep_assert_irqs_disabled();
49
50 if (interrupts_enabled(regs)) {
51 if (regs->exit_rcu) {
52 trace_hardirqs_on_prepare();
53 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 rcu_irq_exit();
55 lockdep_hardirqs_on(CALLER_ADDR0);
56 return;
57 }
58
59 trace_hardirqs_on();
60 } else {
61 if (regs->exit_rcu)
62 rcu_irq_exit();
63 }
64 }
65
arm64_enter_nmi(struct pt_regs * regs)66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
67 {
68 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
69
70 __nmi_enter();
71 lockdep_hardirqs_off(CALLER_ADDR0);
72 lockdep_hardirq_enter();
73 rcu_nmi_enter();
74
75 trace_hardirqs_off_finish();
76 ftrace_nmi_enter();
77 }
78
arm64_exit_nmi(struct pt_regs * regs)79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
80 {
81 bool restore = regs->lockdep_hardirqs;
82
83 ftrace_nmi_exit();
84 if (restore) {
85 trace_hardirqs_on_prepare();
86 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
87 }
88
89 rcu_nmi_exit();
90 lockdep_hardirq_exit();
91 if (restore)
92 lockdep_hardirqs_on(CALLER_ADDR0);
93 __nmi_exit();
94 }
95
enter_el1_irq_or_nmi(struct pt_regs * regs)96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
97 {
98 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 arm64_enter_nmi(regs);
100 else
101 enter_from_kernel_mode(regs);
102 }
103
exit_el1_irq_or_nmi(struct pt_regs * regs)104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
105 {
106 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 arm64_exit_nmi(regs);
108 else
109 exit_to_kernel_mode(regs);
110 }
111
el1_abort(struct pt_regs * regs,unsigned long esr)112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
113 {
114 unsigned long far = read_sysreg(far_el1);
115
116 enter_from_kernel_mode(regs);
117 local_daif_inherit(regs);
118 far = untagged_addr(far);
119 do_mem_abort(far, esr, regs);
120 local_daif_mask();
121 exit_to_kernel_mode(regs);
122 }
123
el1_pc(struct pt_regs * regs,unsigned long esr)124 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
125 {
126 unsigned long far = read_sysreg(far_el1);
127
128 enter_from_kernel_mode(regs);
129 local_daif_inherit(regs);
130 do_sp_pc_abort(far, esr, regs);
131 local_daif_mask();
132 exit_to_kernel_mode(regs);
133 }
134
el1_undef(struct pt_regs * regs)135 static void noinstr el1_undef(struct pt_regs *regs)
136 {
137 enter_from_kernel_mode(regs);
138 local_daif_inherit(regs);
139 do_undefinstr(regs);
140 local_daif_mask();
141 exit_to_kernel_mode(regs);
142 }
143
el1_inv(struct pt_regs * regs,unsigned long esr)144 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
145 {
146 enter_from_kernel_mode(regs);
147 local_daif_inherit(regs);
148 bad_mode(regs, 0, esr);
149 local_daif_mask();
150 exit_to_kernel_mode(regs);
151 }
152
arm64_enter_el1_dbg(struct pt_regs * regs)153 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
154 {
155 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
156
157 lockdep_hardirqs_off(CALLER_ADDR0);
158 rcu_nmi_enter();
159
160 trace_hardirqs_off_finish();
161 }
162
arm64_exit_el1_dbg(struct pt_regs * regs)163 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
164 {
165 bool restore = regs->lockdep_hardirqs;
166
167 if (restore) {
168 trace_hardirqs_on_prepare();
169 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
170 }
171
172 rcu_nmi_exit();
173 if (restore)
174 lockdep_hardirqs_on(CALLER_ADDR0);
175 }
176
el1_dbg(struct pt_regs * regs,unsigned long esr)177 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
178 {
179 unsigned long far = read_sysreg(far_el1);
180
181 arm64_enter_el1_dbg(regs);
182 do_debug_exception(far, esr, regs);
183 arm64_exit_el1_dbg(regs);
184 }
185
el1_fpac(struct pt_regs * regs,unsigned long esr)186 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
187 {
188 enter_from_kernel_mode(regs);
189 local_daif_inherit(regs);
190 do_ptrauth_fault(regs, esr);
191 local_daif_mask();
192 exit_to_kernel_mode(regs);
193 }
194
el1_sync_handler(struct pt_regs * regs)195 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
196 {
197 unsigned long esr = read_sysreg(esr_el1);
198
199 switch (ESR_ELx_EC(esr)) {
200 case ESR_ELx_EC_DABT_CUR:
201 case ESR_ELx_EC_IABT_CUR:
202 el1_abort(regs, esr);
203 break;
204 /*
205 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
206 * recursive exception when trying to push the initial pt_regs.
207 */
208 case ESR_ELx_EC_PC_ALIGN:
209 el1_pc(regs, esr);
210 break;
211 case ESR_ELx_EC_SYS64:
212 case ESR_ELx_EC_UNKNOWN:
213 el1_undef(regs);
214 break;
215 case ESR_ELx_EC_BREAKPT_CUR:
216 case ESR_ELx_EC_SOFTSTP_CUR:
217 case ESR_ELx_EC_WATCHPT_CUR:
218 case ESR_ELx_EC_BRK64:
219 el1_dbg(regs, esr);
220 break;
221 case ESR_ELx_EC_FPAC:
222 el1_fpac(regs, esr);
223 break;
224 default:
225 el1_inv(regs, esr);
226 }
227 }
228
enter_from_user_mode(void)229 asmlinkage void noinstr enter_from_user_mode(void)
230 {
231 lockdep_hardirqs_off(CALLER_ADDR0);
232 CT_WARN_ON(ct_state() != CONTEXT_USER);
233 user_exit_irqoff();
234 trace_hardirqs_off_finish();
235 }
236
exit_to_user_mode(void)237 asmlinkage void noinstr exit_to_user_mode(void)
238 {
239 trace_hardirqs_on_prepare();
240 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
241 user_enter_irqoff();
242 lockdep_hardirqs_on(CALLER_ADDR0);
243 }
244
el0_da(struct pt_regs * regs,unsigned long esr)245 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
246 {
247 unsigned long far = read_sysreg(far_el1);
248
249 enter_from_user_mode();
250 local_daif_restore(DAIF_PROCCTX);
251 far = untagged_addr(far);
252 do_mem_abort(far, esr, regs);
253 }
254
el0_ia(struct pt_regs * regs,unsigned long esr)255 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
256 {
257 unsigned long far = read_sysreg(far_el1);
258
259 /*
260 * We've taken an instruction abort from userspace and not yet
261 * re-enabled IRQs. If the address is a kernel address, apply
262 * BP hardening prior to enabling IRQs and pre-emption.
263 */
264 if (!is_ttbr0_addr(far))
265 arm64_apply_bp_hardening();
266
267 enter_from_user_mode();
268 local_daif_restore(DAIF_PROCCTX);
269 do_mem_abort(far, esr, regs);
270 }
271
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)272 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
273 {
274 enter_from_user_mode();
275 local_daif_restore(DAIF_PROCCTX);
276 do_fpsimd_acc(esr, regs);
277 }
278
el0_sve_acc(struct pt_regs * regs,unsigned long esr)279 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
280 {
281 enter_from_user_mode();
282 local_daif_restore(DAIF_PROCCTX);
283 do_sve_acc(esr, regs);
284 }
285
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)286 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
287 {
288 enter_from_user_mode();
289 local_daif_restore(DAIF_PROCCTX);
290 do_fpsimd_exc(esr, regs);
291 }
292
el0_sys(struct pt_regs * regs,unsigned long esr)293 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
294 {
295 enter_from_user_mode();
296 local_daif_restore(DAIF_PROCCTX);
297 do_sysinstr(esr, regs);
298 }
299
el0_pc(struct pt_regs * regs,unsigned long esr)300 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
301 {
302 unsigned long far = read_sysreg(far_el1);
303
304 if (!is_ttbr0_addr(instruction_pointer(regs)))
305 arm64_apply_bp_hardening();
306
307 enter_from_user_mode();
308 local_daif_restore(DAIF_PROCCTX);
309 do_sp_pc_abort(far, esr, regs);
310 }
311
el0_sp(struct pt_regs * regs,unsigned long esr)312 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
313 {
314 enter_from_user_mode();
315 local_daif_restore(DAIF_PROCCTX);
316 do_sp_pc_abort(regs->sp, esr, regs);
317 }
318
el0_undef(struct pt_regs * regs)319 static void noinstr el0_undef(struct pt_regs *regs)
320 {
321 enter_from_user_mode();
322 local_daif_restore(DAIF_PROCCTX);
323 do_undefinstr(regs);
324 }
325
el0_bti(struct pt_regs * regs)326 static void noinstr el0_bti(struct pt_regs *regs)
327 {
328 enter_from_user_mode();
329 local_daif_restore(DAIF_PROCCTX);
330 do_bti(regs);
331 }
332
el0_inv(struct pt_regs * regs,unsigned long esr)333 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
334 {
335 enter_from_user_mode();
336 local_daif_restore(DAIF_PROCCTX);
337 bad_el0_sync(regs, 0, esr);
338 }
339
el0_dbg(struct pt_regs * regs,unsigned long esr)340 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
341 {
342 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
343 unsigned long far = read_sysreg(far_el1);
344
345 enter_from_user_mode();
346 do_debug_exception(far, esr, regs);
347 local_daif_restore(DAIF_PROCCTX_NOIRQ);
348 }
349
el0_svc(struct pt_regs * regs)350 static void noinstr el0_svc(struct pt_regs *regs)
351 {
352 enter_from_user_mode();
353 do_el0_svc(regs);
354 }
355
el0_fpac(struct pt_regs * regs,unsigned long esr)356 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
357 {
358 enter_from_user_mode();
359 local_daif_restore(DAIF_PROCCTX);
360 do_ptrauth_fault(regs, esr);
361 }
362
el0_sync_handler(struct pt_regs * regs)363 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
364 {
365 unsigned long esr = read_sysreg(esr_el1);
366
367 switch (ESR_ELx_EC(esr)) {
368 case ESR_ELx_EC_SVC64:
369 el0_svc(regs);
370 break;
371 case ESR_ELx_EC_DABT_LOW:
372 el0_da(regs, esr);
373 break;
374 case ESR_ELx_EC_IABT_LOW:
375 el0_ia(regs, esr);
376 break;
377 case ESR_ELx_EC_FP_ASIMD:
378 el0_fpsimd_acc(regs, esr);
379 break;
380 case ESR_ELx_EC_SVE:
381 el0_sve_acc(regs, esr);
382 break;
383 case ESR_ELx_EC_FP_EXC64:
384 el0_fpsimd_exc(regs, esr);
385 break;
386 case ESR_ELx_EC_SYS64:
387 case ESR_ELx_EC_WFx:
388 el0_sys(regs, esr);
389 break;
390 case ESR_ELx_EC_SP_ALIGN:
391 el0_sp(regs, esr);
392 break;
393 case ESR_ELx_EC_PC_ALIGN:
394 el0_pc(regs, esr);
395 break;
396 case ESR_ELx_EC_UNKNOWN:
397 el0_undef(regs);
398 break;
399 case ESR_ELx_EC_BTI:
400 el0_bti(regs);
401 break;
402 case ESR_ELx_EC_BREAKPT_LOW:
403 case ESR_ELx_EC_SOFTSTP_LOW:
404 case ESR_ELx_EC_WATCHPT_LOW:
405 case ESR_ELx_EC_BRK64:
406 el0_dbg(regs, esr);
407 break;
408 case ESR_ELx_EC_FPAC:
409 el0_fpac(regs, esr);
410 break;
411 default:
412 el0_inv(regs, esr);
413 }
414 }
415
416 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)417 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
418 {
419 enter_from_user_mode();
420 local_daif_restore(DAIF_PROCCTX);
421 do_cp15instr(esr, regs);
422 }
423
el0_svc_compat(struct pt_regs * regs)424 static void noinstr el0_svc_compat(struct pt_regs *regs)
425 {
426 enter_from_user_mode();
427 do_el0_svc_compat(regs);
428 }
429
el0_sync_compat_handler(struct pt_regs * regs)430 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
431 {
432 unsigned long esr = read_sysreg(esr_el1);
433
434 switch (ESR_ELx_EC(esr)) {
435 case ESR_ELx_EC_SVC32:
436 el0_svc_compat(regs);
437 break;
438 case ESR_ELx_EC_DABT_LOW:
439 el0_da(regs, esr);
440 break;
441 case ESR_ELx_EC_IABT_LOW:
442 el0_ia(regs, esr);
443 break;
444 case ESR_ELx_EC_FP_ASIMD:
445 el0_fpsimd_acc(regs, esr);
446 break;
447 case ESR_ELx_EC_FP_EXC32:
448 el0_fpsimd_exc(regs, esr);
449 break;
450 case ESR_ELx_EC_PC_ALIGN:
451 el0_pc(regs, esr);
452 break;
453 case ESR_ELx_EC_UNKNOWN:
454 case ESR_ELx_EC_CP14_MR:
455 case ESR_ELx_EC_CP14_LS:
456 case ESR_ELx_EC_CP14_64:
457 el0_undef(regs);
458 break;
459 case ESR_ELx_EC_CP15_32:
460 case ESR_ELx_EC_CP15_64:
461 el0_cp15(regs, esr);
462 break;
463 case ESR_ELx_EC_BREAKPT_LOW:
464 case ESR_ELx_EC_SOFTSTP_LOW:
465 case ESR_ELx_EC_WATCHPT_LOW:
466 case ESR_ELx_EC_BKPT32:
467 el0_dbg(regs, esr);
468 break;
469 default:
470 el0_inv(regs, esr);
471 }
472 }
473 #endif /* CONFIG_COMPAT */
474