1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Exception handling code
4 *
5 * Copyright (C) 2019 ARM Ltd.
6 */
7
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19
20 /*
21 * This is intended to match the logic in irqentry_enter(), handling the kernel
22 * mode transitions only.
23 */
enter_from_kernel_mode(struct pt_regs * regs)24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 regs->exit_rcu = false;
27
28 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 lockdep_hardirqs_off(CALLER_ADDR0);
30 rcu_irq_enter();
31 trace_hardirqs_off_finish();
32
33 regs->exit_rcu = true;
34 return;
35 }
36
37 lockdep_hardirqs_off(CALLER_ADDR0);
38 rcu_irq_enter_check_tick();
39 trace_hardirqs_off_finish();
40
41 mte_check_tfsr_entry();
42 }
43
44 /*
45 * This is intended to match the logic in irqentry_exit(), handling the kernel
46 * mode transitions only, and with preemption handled elsewhere.
47 */
exit_to_kernel_mode(struct pt_regs * regs)48 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
49 {
50 lockdep_assert_irqs_disabled();
51
52 mte_check_tfsr_exit();
53
54 if (interrupts_enabled(regs)) {
55 if (regs->exit_rcu) {
56 trace_hardirqs_on_prepare();
57 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
58 rcu_irq_exit();
59 lockdep_hardirqs_on(CALLER_ADDR0);
60 return;
61 }
62
63 trace_hardirqs_on();
64 } else {
65 if (regs->exit_rcu)
66 rcu_irq_exit();
67 }
68 }
69
arm64_enter_nmi(struct pt_regs * regs)70 void noinstr arm64_enter_nmi(struct pt_regs *regs)
71 {
72 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
73
74 __nmi_enter();
75 lockdep_hardirqs_off(CALLER_ADDR0);
76 lockdep_hardirq_enter();
77 rcu_nmi_enter();
78
79 trace_hardirqs_off_finish();
80 ftrace_nmi_enter();
81 }
82
arm64_exit_nmi(struct pt_regs * regs)83 void noinstr arm64_exit_nmi(struct pt_regs *regs)
84 {
85 bool restore = regs->lockdep_hardirqs;
86
87 ftrace_nmi_exit();
88 if (restore) {
89 trace_hardirqs_on_prepare();
90 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
91 }
92
93 rcu_nmi_exit();
94 lockdep_hardirq_exit();
95 if (restore)
96 lockdep_hardirqs_on(CALLER_ADDR0);
97 __nmi_exit();
98 }
99
enter_el1_irq_or_nmi(struct pt_regs * regs)100 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
101 {
102 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
103 arm64_enter_nmi(regs);
104 else
105 enter_from_kernel_mode(regs);
106 }
107
exit_el1_irq_or_nmi(struct pt_regs * regs)108 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
109 {
110 if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
111 arm64_exit_nmi(regs);
112 else
113 exit_to_kernel_mode(regs);
114 }
115
el1_abort(struct pt_regs * regs,unsigned long esr)116 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
117 {
118 unsigned long far = read_sysreg(far_el1);
119
120 enter_from_kernel_mode(regs);
121 local_daif_inherit(regs);
122 do_mem_abort(far, esr, regs);
123 local_daif_mask();
124 exit_to_kernel_mode(regs);
125 }
126
el1_pc(struct pt_regs * regs,unsigned long esr)127 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
128 {
129 unsigned long far = read_sysreg(far_el1);
130
131 enter_from_kernel_mode(regs);
132 local_daif_inherit(regs);
133 do_sp_pc_abort(far, esr, regs);
134 local_daif_mask();
135 exit_to_kernel_mode(regs);
136 }
137
el1_undef(struct pt_regs * regs)138 static void noinstr el1_undef(struct pt_regs *regs)
139 {
140 enter_from_kernel_mode(regs);
141 local_daif_inherit(regs);
142 do_undefinstr(regs);
143 local_daif_mask();
144 exit_to_kernel_mode(regs);
145 }
146
el1_inv(struct pt_regs * regs,unsigned long esr)147 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
148 {
149 enter_from_kernel_mode(regs);
150 local_daif_inherit(regs);
151 bad_mode(regs, 0, esr);
152 local_daif_mask();
153 exit_to_kernel_mode(regs);
154 }
155
arm64_enter_el1_dbg(struct pt_regs * regs)156 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
157 {
158 regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
159
160 lockdep_hardirqs_off(CALLER_ADDR0);
161 rcu_nmi_enter();
162
163 trace_hardirqs_off_finish();
164 }
165
arm64_exit_el1_dbg(struct pt_regs * regs)166 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
167 {
168 bool restore = regs->lockdep_hardirqs;
169
170 if (restore) {
171 trace_hardirqs_on_prepare();
172 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
173 }
174
175 rcu_nmi_exit();
176 if (restore)
177 lockdep_hardirqs_on(CALLER_ADDR0);
178 }
179
el1_dbg(struct pt_regs * regs,unsigned long esr)180 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
181 {
182 unsigned long far = read_sysreg(far_el1);
183
184 arm64_enter_el1_dbg(regs);
185 do_debug_exception(far, esr, regs);
186 arm64_exit_el1_dbg(regs);
187 }
188
el1_fpac(struct pt_regs * regs,unsigned long esr)189 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
190 {
191 enter_from_kernel_mode(regs);
192 local_daif_inherit(regs);
193 do_ptrauth_fault(regs, esr);
194 local_daif_mask();
195 exit_to_kernel_mode(regs);
196 }
197
el1_sync_handler(struct pt_regs * regs)198 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
199 {
200 unsigned long esr = read_sysreg(esr_el1);
201
202 switch (ESR_ELx_EC(esr)) {
203 case ESR_ELx_EC_DABT_CUR:
204 case ESR_ELx_EC_IABT_CUR:
205 el1_abort(regs, esr);
206 break;
207 /*
208 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
209 * recursive exception when trying to push the initial pt_regs.
210 */
211 case ESR_ELx_EC_PC_ALIGN:
212 el1_pc(regs, esr);
213 break;
214 case ESR_ELx_EC_SYS64:
215 case ESR_ELx_EC_UNKNOWN:
216 el1_undef(regs);
217 break;
218 case ESR_ELx_EC_BREAKPT_CUR:
219 case ESR_ELx_EC_SOFTSTP_CUR:
220 case ESR_ELx_EC_WATCHPT_CUR:
221 case ESR_ELx_EC_BRK64:
222 el1_dbg(regs, esr);
223 break;
224 case ESR_ELx_EC_FPAC:
225 el1_fpac(regs, esr);
226 break;
227 default:
228 el1_inv(regs, esr);
229 }
230 }
231
enter_from_user_mode(void)232 asmlinkage void noinstr enter_from_user_mode(void)
233 {
234 lockdep_hardirqs_off(CALLER_ADDR0);
235 CT_WARN_ON(ct_state() != CONTEXT_USER);
236 user_exit_irqoff();
237 trace_hardirqs_off_finish();
238 }
239
exit_to_user_mode(void)240 asmlinkage void noinstr exit_to_user_mode(void)
241 {
242 mte_check_tfsr_exit();
243
244 trace_hardirqs_on_prepare();
245 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
246 user_enter_irqoff();
247 lockdep_hardirqs_on(CALLER_ADDR0);
248 }
249
el0_da(struct pt_regs * regs,unsigned long esr)250 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
251 {
252 unsigned long far = read_sysreg(far_el1);
253
254 enter_from_user_mode();
255 local_daif_restore(DAIF_PROCCTX);
256 do_mem_abort(far, esr, regs);
257 }
258
el0_ia(struct pt_regs * regs,unsigned long esr)259 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
260 {
261 unsigned long far = read_sysreg(far_el1);
262
263 /*
264 * We've taken an instruction abort from userspace and not yet
265 * re-enabled IRQs. If the address is a kernel address, apply
266 * BP hardening prior to enabling IRQs and pre-emption.
267 */
268 if (!is_ttbr0_addr(far))
269 arm64_apply_bp_hardening();
270
271 enter_from_user_mode();
272 local_daif_restore(DAIF_PROCCTX);
273 do_mem_abort(far, esr, regs);
274 }
275
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)276 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
277 {
278 enter_from_user_mode();
279 local_daif_restore(DAIF_PROCCTX);
280 do_fpsimd_acc(esr, regs);
281 }
282
el0_sve_acc(struct pt_regs * regs,unsigned long esr)283 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
284 {
285 enter_from_user_mode();
286 local_daif_restore(DAIF_PROCCTX);
287 do_sve_acc(esr, regs);
288 }
289
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)290 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
291 {
292 enter_from_user_mode();
293 local_daif_restore(DAIF_PROCCTX);
294 do_fpsimd_exc(esr, regs);
295 }
296
el0_sys(struct pt_regs * regs,unsigned long esr)297 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
298 {
299 enter_from_user_mode();
300 local_daif_restore(DAIF_PROCCTX);
301 do_sysinstr(esr, regs);
302 }
303
el0_pc(struct pt_regs * regs,unsigned long esr)304 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
305 {
306 unsigned long far = read_sysreg(far_el1);
307
308 if (!is_ttbr0_addr(instruction_pointer(regs)))
309 arm64_apply_bp_hardening();
310
311 enter_from_user_mode();
312 local_daif_restore(DAIF_PROCCTX);
313 do_sp_pc_abort(far, esr, regs);
314 }
315
el0_sp(struct pt_regs * regs,unsigned long esr)316 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
317 {
318 enter_from_user_mode();
319 local_daif_restore(DAIF_PROCCTX);
320 do_sp_pc_abort(regs->sp, esr, regs);
321 }
322
el0_undef(struct pt_regs * regs)323 static void noinstr el0_undef(struct pt_regs *regs)
324 {
325 enter_from_user_mode();
326 local_daif_restore(DAIF_PROCCTX);
327 do_undefinstr(regs);
328 }
329
el0_bti(struct pt_regs * regs)330 static void noinstr el0_bti(struct pt_regs *regs)
331 {
332 enter_from_user_mode();
333 local_daif_restore(DAIF_PROCCTX);
334 do_bti(regs);
335 }
336
el0_inv(struct pt_regs * regs,unsigned long esr)337 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
338 {
339 enter_from_user_mode();
340 local_daif_restore(DAIF_PROCCTX);
341 bad_el0_sync(regs, 0, esr);
342 }
343
el0_dbg(struct pt_regs * regs,unsigned long esr)344 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
345 {
346 /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
347 unsigned long far = read_sysreg(far_el1);
348
349 enter_from_user_mode();
350 do_debug_exception(far, esr, regs);
351 local_daif_restore(DAIF_PROCCTX_NOIRQ);
352 }
353
el0_svc(struct pt_regs * regs)354 static void noinstr el0_svc(struct pt_regs *regs)
355 {
356 enter_from_user_mode();
357 do_el0_svc(regs);
358 }
359
el0_fpac(struct pt_regs * regs,unsigned long esr)360 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
361 {
362 enter_from_user_mode();
363 local_daif_restore(DAIF_PROCCTX);
364 do_ptrauth_fault(regs, esr);
365 }
366
el0_sync_handler(struct pt_regs * regs)367 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
368 {
369 unsigned long esr = read_sysreg(esr_el1);
370
371 switch (ESR_ELx_EC(esr)) {
372 case ESR_ELx_EC_SVC64:
373 el0_svc(regs);
374 break;
375 case ESR_ELx_EC_DABT_LOW:
376 el0_da(regs, esr);
377 break;
378 case ESR_ELx_EC_IABT_LOW:
379 el0_ia(regs, esr);
380 break;
381 case ESR_ELx_EC_FP_ASIMD:
382 el0_fpsimd_acc(regs, esr);
383 break;
384 case ESR_ELx_EC_SVE:
385 el0_sve_acc(regs, esr);
386 break;
387 case ESR_ELx_EC_FP_EXC64:
388 el0_fpsimd_exc(regs, esr);
389 break;
390 case ESR_ELx_EC_SYS64:
391 case ESR_ELx_EC_WFx:
392 el0_sys(regs, esr);
393 break;
394 case ESR_ELx_EC_SP_ALIGN:
395 el0_sp(regs, esr);
396 break;
397 case ESR_ELx_EC_PC_ALIGN:
398 el0_pc(regs, esr);
399 break;
400 case ESR_ELx_EC_UNKNOWN:
401 el0_undef(regs);
402 break;
403 case ESR_ELx_EC_BTI:
404 el0_bti(regs);
405 break;
406 case ESR_ELx_EC_BREAKPT_LOW:
407 case ESR_ELx_EC_SOFTSTP_LOW:
408 case ESR_ELx_EC_WATCHPT_LOW:
409 case ESR_ELx_EC_BRK64:
410 el0_dbg(regs, esr);
411 break;
412 case ESR_ELx_EC_FPAC:
413 el0_fpac(regs, esr);
414 break;
415 default:
416 el0_inv(regs, esr);
417 }
418 }
419
420 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)421 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
422 {
423 enter_from_user_mode();
424 local_daif_restore(DAIF_PROCCTX);
425 do_cp15instr(esr, regs);
426 }
427
el0_svc_compat(struct pt_regs * regs)428 static void noinstr el0_svc_compat(struct pt_regs *regs)
429 {
430 enter_from_user_mode();
431 do_el0_svc_compat(regs);
432 }
433
el0_sync_compat_handler(struct pt_regs * regs)434 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
435 {
436 unsigned long esr = read_sysreg(esr_el1);
437
438 switch (ESR_ELx_EC(esr)) {
439 case ESR_ELx_EC_SVC32:
440 el0_svc_compat(regs);
441 break;
442 case ESR_ELx_EC_DABT_LOW:
443 el0_da(regs, esr);
444 break;
445 case ESR_ELx_EC_IABT_LOW:
446 el0_ia(regs, esr);
447 break;
448 case ESR_ELx_EC_FP_ASIMD:
449 el0_fpsimd_acc(regs, esr);
450 break;
451 case ESR_ELx_EC_FP_EXC32:
452 el0_fpsimd_exc(regs, esr);
453 break;
454 case ESR_ELx_EC_PC_ALIGN:
455 el0_pc(regs, esr);
456 break;
457 case ESR_ELx_EC_UNKNOWN:
458 case ESR_ELx_EC_CP14_MR:
459 case ESR_ELx_EC_CP14_LS:
460 case ESR_ELx_EC_CP14_64:
461 el0_undef(regs);
462 break;
463 case ESR_ELx_EC_CP15_32:
464 case ESR_ELx_EC_CP15_64:
465 el0_cp15(regs, esr);
466 break;
467 case ESR_ELx_EC_BREAKPT_LOW:
468 case ESR_ELx_EC_SOFTSTP_LOW:
469 case ESR_ELx_EC_WATCHPT_LOW:
470 case ESR_ELx_EC_BKPT32:
471 el0_dbg(regs, esr);
472 break;
473 default:
474 el0_inv(regs, esr);
475 }
476 }
477 #endif /* CONFIG_COMPAT */
478