• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/ptrace.h>
10 #include <linux/thread_info.h>
11 
12 #include <asm/cpufeature.h>
13 #include <asm/daifflags.h>
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kprobes.h>
17 #include <asm/mmu.h>
18 #include <asm/sysreg.h>
19 
20 /*
21  * This is intended to match the logic in irqentry_enter(), handling the kernel
22  * mode transitions only.
23  */
enter_from_kernel_mode(struct pt_regs * regs)24 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
25 {
26 	regs->exit_rcu = false;
27 
28 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
29 		lockdep_hardirqs_off(CALLER_ADDR0);
30 		rcu_irq_enter();
31 		trace_hardirqs_off_finish();
32 
33 		regs->exit_rcu = true;
34 		return;
35 	}
36 
37 	lockdep_hardirqs_off(CALLER_ADDR0);
38 	rcu_irq_enter_check_tick();
39 	trace_hardirqs_off_finish();
40 }
41 
42 /*
43  * This is intended to match the logic in irqentry_exit(), handling the kernel
44  * mode transitions only, and with preemption handled elsewhere.
45  */
exit_to_kernel_mode(struct pt_regs * regs)46 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
47 {
48 	lockdep_assert_irqs_disabled();
49 
50 	if (interrupts_enabled(regs)) {
51 		if (regs->exit_rcu) {
52 			trace_hardirqs_on_prepare();
53 			lockdep_hardirqs_on_prepare(CALLER_ADDR0);
54 			rcu_irq_exit();
55 			lockdep_hardirqs_on(CALLER_ADDR0);
56 			return;
57 		}
58 
59 		trace_hardirqs_on();
60 	} else {
61 		if (regs->exit_rcu)
62 			rcu_irq_exit();
63 	}
64 }
65 
arm64_enter_nmi(struct pt_regs * regs)66 void noinstr arm64_enter_nmi(struct pt_regs *regs)
67 {
68 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
69 
70 	__nmi_enter();
71 	lockdep_hardirqs_off(CALLER_ADDR0);
72 	lockdep_hardirq_enter();
73 	rcu_nmi_enter();
74 
75 	trace_hardirqs_off_finish();
76 	ftrace_nmi_enter();
77 }
78 
arm64_exit_nmi(struct pt_regs * regs)79 void noinstr arm64_exit_nmi(struct pt_regs *regs)
80 {
81 	bool restore = regs->lockdep_hardirqs;
82 
83 	ftrace_nmi_exit();
84 	if (restore) {
85 		trace_hardirqs_on_prepare();
86 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
87 	}
88 
89 	rcu_nmi_exit();
90 	lockdep_hardirq_exit();
91 	if (restore)
92 		lockdep_hardirqs_on(CALLER_ADDR0);
93 	__nmi_exit();
94 }
95 
enter_el1_irq_or_nmi(struct pt_regs * regs)96 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
97 {
98 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
99 		arm64_enter_nmi(regs);
100 	else
101 		enter_from_kernel_mode(regs);
102 }
103 
exit_el1_irq_or_nmi(struct pt_regs * regs)104 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
105 {
106 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
107 		arm64_exit_nmi(regs);
108 	else
109 		exit_to_kernel_mode(regs);
110 }
111 
el1_abort(struct pt_regs * regs,unsigned long esr)112 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
113 {
114 	unsigned long far = read_sysreg(far_el1);
115 
116 	enter_from_kernel_mode(regs);
117 	local_daif_inherit(regs);
118 	far = untagged_addr(far);
119 	do_mem_abort(far, esr, regs);
120 	local_daif_mask();
121 	exit_to_kernel_mode(regs);
122 }
123 
el1_pc(struct pt_regs * regs,unsigned long esr)124 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
125 {
126 	unsigned long far = read_sysreg(far_el1);
127 
128 	enter_from_kernel_mode(regs);
129 	local_daif_inherit(regs);
130 	do_sp_pc_abort(far, esr, regs);
131 	local_daif_mask();
132 	exit_to_kernel_mode(regs);
133 }
134 
el1_undef(struct pt_regs * regs,unsigned long esr)135 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
136 {
137 	enter_from_kernel_mode(regs);
138 	local_daif_inherit(regs);
139 	do_el1_undef(regs, esr);
140 	local_daif_mask();
141 	exit_to_kernel_mode(regs);
142 }
143 
el1_bti(struct pt_regs * regs,unsigned long esr)144 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
145 {
146 	enter_from_kernel_mode(regs);
147 	local_daif_inherit(regs);
148 	do_el1_bti(regs, esr);
149 	local_daif_mask();
150 	exit_to_kernel_mode(regs);
151 }
152 
el1_inv(struct pt_regs * regs,unsigned long esr)153 static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
154 {
155 	enter_from_kernel_mode(regs);
156 	local_daif_inherit(regs);
157 	bad_mode(regs, 0, esr);
158 	local_daif_mask();
159 	exit_to_kernel_mode(regs);
160 }
161 
arm64_enter_el1_dbg(struct pt_regs * regs)162 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
163 {
164 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
165 
166 	lockdep_hardirqs_off(CALLER_ADDR0);
167 	rcu_nmi_enter();
168 
169 	trace_hardirqs_off_finish();
170 }
171 
arm64_exit_el1_dbg(struct pt_regs * regs)172 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
173 {
174 	bool restore = regs->lockdep_hardirqs;
175 
176 	if (restore) {
177 		trace_hardirqs_on_prepare();
178 		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
179 	}
180 
181 	rcu_nmi_exit();
182 	if (restore)
183 		lockdep_hardirqs_on(CALLER_ADDR0);
184 }
185 
el1_dbg(struct pt_regs * regs,unsigned long esr)186 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
187 {
188 	unsigned long far = read_sysreg(far_el1);
189 
190 	arm64_enter_el1_dbg(regs);
191 	do_debug_exception(far, esr, regs);
192 	arm64_exit_el1_dbg(regs);
193 }
194 
el1_fpac(struct pt_regs * regs,unsigned long esr)195 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
196 {
197 	enter_from_kernel_mode(regs);
198 	local_daif_inherit(regs);
199 	do_el1_fpac(regs, esr);
200 	local_daif_mask();
201 	exit_to_kernel_mode(regs);
202 }
203 
el1_sync_handler(struct pt_regs * regs)204 asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
205 {
206 	unsigned long esr = read_sysreg(esr_el1);
207 
208 	switch (ESR_ELx_EC(esr)) {
209 	case ESR_ELx_EC_DABT_CUR:
210 	case ESR_ELx_EC_IABT_CUR:
211 		el1_abort(regs, esr);
212 		break;
213 	/*
214 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
215 	 * recursive exception when trying to push the initial pt_regs.
216 	 */
217 	case ESR_ELx_EC_PC_ALIGN:
218 		el1_pc(regs, esr);
219 		break;
220 	case ESR_ELx_EC_SYS64:
221 	case ESR_ELx_EC_UNKNOWN:
222 		el1_undef(regs, esr);
223 		break;
224 	case ESR_ELx_EC_BTI:
225 		el1_bti(regs, esr);
226 		break;
227 	case ESR_ELx_EC_BREAKPT_CUR:
228 	case ESR_ELx_EC_SOFTSTP_CUR:
229 	case ESR_ELx_EC_WATCHPT_CUR:
230 	case ESR_ELx_EC_BRK64:
231 		el1_dbg(regs, esr);
232 		break;
233 	case ESR_ELx_EC_FPAC:
234 		el1_fpac(regs, esr);
235 		break;
236 	default:
237 		el1_inv(regs, esr);
238 	}
239 }
240 
enter_from_user_mode(void)241 asmlinkage void noinstr enter_from_user_mode(void)
242 {
243 	lockdep_hardirqs_off(CALLER_ADDR0);
244 	CT_WARN_ON(ct_state() != CONTEXT_USER);
245 	user_exit_irqoff();
246 	trace_hardirqs_off_finish();
247 }
248 
exit_to_user_mode(void)249 asmlinkage void noinstr exit_to_user_mode(void)
250 {
251 	trace_hardirqs_on_prepare();
252 	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
253 	user_enter_irqoff();
254 	lockdep_hardirqs_on(CALLER_ADDR0);
255 }
256 
el0_da(struct pt_regs * regs,unsigned long esr)257 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
258 {
259 	unsigned long far = read_sysreg(far_el1);
260 
261 	enter_from_user_mode();
262 	local_daif_restore(DAIF_PROCCTX);
263 	far = untagged_addr(far);
264 	do_mem_abort(far, esr, regs);
265 }
266 
el0_ia(struct pt_regs * regs,unsigned long esr)267 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
268 {
269 	unsigned long far = read_sysreg(far_el1);
270 
271 	/*
272 	 * We've taken an instruction abort from userspace and not yet
273 	 * re-enabled IRQs. If the address is a kernel address, apply
274 	 * BP hardening prior to enabling IRQs and pre-emption.
275 	 */
276 	if (!is_ttbr0_addr(far))
277 		arm64_apply_bp_hardening();
278 
279 	enter_from_user_mode();
280 	local_daif_restore(DAIF_PROCCTX);
281 	do_mem_abort(far, esr, regs);
282 }
283 
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)284 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
285 {
286 	enter_from_user_mode();
287 	local_daif_restore(DAIF_PROCCTX);
288 	do_fpsimd_acc(esr, regs);
289 }
290 
el0_sve_acc(struct pt_regs * regs,unsigned long esr)291 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
292 {
293 	enter_from_user_mode();
294 	local_daif_restore(DAIF_PROCCTX);
295 	do_sve_acc(esr, regs);
296 }
297 
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)298 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
299 {
300 	enter_from_user_mode();
301 	local_daif_restore(DAIF_PROCCTX);
302 	do_fpsimd_exc(esr, regs);
303 }
304 
el0_sys(struct pt_regs * regs,unsigned long esr)305 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
306 {
307 	enter_from_user_mode();
308 	local_daif_restore(DAIF_PROCCTX);
309 	do_el0_sys(esr, regs);
310 }
311 
el0_pc(struct pt_regs * regs,unsigned long esr)312 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
313 {
314 	unsigned long far = read_sysreg(far_el1);
315 
316 	if (!is_ttbr0_addr(instruction_pointer(regs)))
317 		arm64_apply_bp_hardening();
318 
319 	enter_from_user_mode();
320 	local_daif_restore(DAIF_PROCCTX);
321 	do_sp_pc_abort(far, esr, regs);
322 }
323 
el0_sp(struct pt_regs * regs,unsigned long esr)324 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
325 {
326 	enter_from_user_mode();
327 	local_daif_restore(DAIF_PROCCTX);
328 	do_sp_pc_abort(regs->sp, esr, regs);
329 }
330 
el0_undef(struct pt_regs * regs,unsigned long esr)331 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
332 {
333 	enter_from_user_mode();
334 	local_daif_restore(DAIF_PROCCTX);
335 	do_el0_undef(regs, esr);
336 }
337 
el0_bti(struct pt_regs * regs)338 static void noinstr el0_bti(struct pt_regs *regs)
339 {
340 	enter_from_user_mode();
341 	local_daif_restore(DAIF_PROCCTX);
342 	do_el0_bti(regs);
343 }
344 
el0_inv(struct pt_regs * regs,unsigned long esr)345 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
346 {
347 	enter_from_user_mode();
348 	local_daif_restore(DAIF_PROCCTX);
349 	bad_el0_sync(regs, 0, esr);
350 }
351 
el0_dbg(struct pt_regs * regs,unsigned long esr)352 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
353 {
354 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
355 	unsigned long far = read_sysreg(far_el1);
356 
357 	enter_from_user_mode();
358 	do_debug_exception(far, esr, regs);
359 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
360 }
361 
el0_svc(struct pt_regs * regs)362 static void noinstr el0_svc(struct pt_regs *regs)
363 {
364 	enter_from_user_mode();
365 	do_el0_svc(regs);
366 }
367 
el0_fpac(struct pt_regs * regs,unsigned long esr)368 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
369 {
370 	enter_from_user_mode();
371 	local_daif_restore(DAIF_PROCCTX);
372 	do_el0_fpac(regs, esr);
373 }
374 
el0_sync_handler(struct pt_regs * regs)375 asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
376 {
377 	unsigned long esr = read_sysreg(esr_el1);
378 
379 	switch (ESR_ELx_EC(esr)) {
380 	case ESR_ELx_EC_SVC64:
381 		el0_svc(regs);
382 		break;
383 	case ESR_ELx_EC_DABT_LOW:
384 		el0_da(regs, esr);
385 		break;
386 	case ESR_ELx_EC_IABT_LOW:
387 		el0_ia(regs, esr);
388 		break;
389 	case ESR_ELx_EC_FP_ASIMD:
390 		el0_fpsimd_acc(regs, esr);
391 		break;
392 	case ESR_ELx_EC_SVE:
393 		el0_sve_acc(regs, esr);
394 		break;
395 	case ESR_ELx_EC_FP_EXC64:
396 		el0_fpsimd_exc(regs, esr);
397 		break;
398 	case ESR_ELx_EC_SYS64:
399 	case ESR_ELx_EC_WFx:
400 		el0_sys(regs, esr);
401 		break;
402 	case ESR_ELx_EC_SP_ALIGN:
403 		el0_sp(regs, esr);
404 		break;
405 	case ESR_ELx_EC_PC_ALIGN:
406 		el0_pc(regs, esr);
407 		break;
408 	case ESR_ELx_EC_UNKNOWN:
409 		el0_undef(regs, esr);
410 		break;
411 	case ESR_ELx_EC_BTI:
412 		el0_bti(regs);
413 		break;
414 	case ESR_ELx_EC_BREAKPT_LOW:
415 	case ESR_ELx_EC_SOFTSTP_LOW:
416 	case ESR_ELx_EC_WATCHPT_LOW:
417 	case ESR_ELx_EC_BRK64:
418 		el0_dbg(regs, esr);
419 		break;
420 	case ESR_ELx_EC_FPAC:
421 		el0_fpac(regs, esr);
422 		break;
423 	default:
424 		el0_inv(regs, esr);
425 	}
426 }
427 
428 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)429 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
430 {
431 	enter_from_user_mode();
432 	local_daif_restore(DAIF_PROCCTX);
433 	do_el0_cp15(esr, regs);
434 }
435 
el0_svc_compat(struct pt_regs * regs)436 static void noinstr el0_svc_compat(struct pt_regs *regs)
437 {
438 	enter_from_user_mode();
439 	do_el0_svc_compat(regs);
440 }
441 
el0_sync_compat_handler(struct pt_regs * regs)442 asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
443 {
444 	unsigned long esr = read_sysreg(esr_el1);
445 
446 	switch (ESR_ELx_EC(esr)) {
447 	case ESR_ELx_EC_SVC32:
448 		el0_svc_compat(regs);
449 		break;
450 	case ESR_ELx_EC_DABT_LOW:
451 		el0_da(regs, esr);
452 		break;
453 	case ESR_ELx_EC_IABT_LOW:
454 		el0_ia(regs, esr);
455 		break;
456 	case ESR_ELx_EC_FP_ASIMD:
457 		el0_fpsimd_acc(regs, esr);
458 		break;
459 	case ESR_ELx_EC_FP_EXC32:
460 		el0_fpsimd_exc(regs, esr);
461 		break;
462 	case ESR_ELx_EC_PC_ALIGN:
463 		el0_pc(regs, esr);
464 		break;
465 	case ESR_ELx_EC_UNKNOWN:
466 	case ESR_ELx_EC_CP14_MR:
467 	case ESR_ELx_EC_CP14_LS:
468 	case ESR_ELx_EC_CP14_64:
469 		el0_undef(regs, esr);
470 		break;
471 	case ESR_ELx_EC_CP15_32:
472 	case ESR_ELx_EC_CP15_64:
473 		el0_cp15(regs, esr);
474 		break;
475 	case ESR_ELx_EC_BREAKPT_LOW:
476 	case ESR_ELx_EC_SOFTSTP_LOW:
477 	case ESR_ELx_EC_WATCHPT_LOW:
478 	case ESR_ELx_EC_BKPT32:
479 		el0_dbg(regs, esr);
480 		break;
481 	default:
482 		el0_inv(regs, esr);
483 	}
484 }
485 #endif /* CONFIG_COMPAT */
486