• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Exception handling code
4  *
5  * Copyright (C) 2019 ARM Ltd.
6  */
7 
8 #include <linux/context_tracking.h>
9 #include <linux/kasan.h>
10 #include <linux/linkage.h>
11 #include <linux/lockdep.h>
12 #include <linux/ptrace.h>
13 #include <linux/sched.h>
14 #include <linux/sched/debug.h>
15 #include <linux/thread_info.h>
16 
17 #include <asm/cpufeature.h>
18 #include <asm/daifflags.h>
19 #include <asm/esr.h>
20 #include <asm/exception.h>
21 #include <asm/kprobes.h>
22 #include <asm/mmu.h>
23 #include <asm/processor.h>
24 #include <asm/sdei.h>
25 #include <asm/stacktrace.h>
26 #include <asm/sysreg.h>
27 #include <asm/system_misc.h>
28 
29 #include <trace/hooks/traps.h>
30 
31 /*
32  * Handle IRQ/context state management when entering from kernel mode.
33  * Before this function is called it is not safe to call regular kernel code,
34  * intrumentable code, or any code which may trigger an exception.
35  *
36  * This is intended to match the logic in irqentry_enter(), handling the kernel
37  * mode transitions only.
38  */
__enter_from_kernel_mode(struct pt_regs * regs)39 static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
40 {
41 	regs->exit_rcu = false;
42 
43 	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
44 		lockdep_hardirqs_off(CALLER_ADDR0);
45 		rcu_irq_enter();
46 		trace_hardirqs_off_finish();
47 
48 		regs->exit_rcu = true;
49 		return;
50 	}
51 
52 	lockdep_hardirqs_off(CALLER_ADDR0);
53 	rcu_irq_enter_check_tick();
54 	trace_hardirqs_off_finish();
55 }
56 
enter_from_kernel_mode(struct pt_regs * regs)57 static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
58 {
59 	__enter_from_kernel_mode(regs);
60 	mte_check_tfsr_entry();
61 	mte_disable_tco_entry(current);
62 }
63 
64 /*
65  * Handle IRQ/context state management when exiting to kernel mode.
66  * After this function returns it is not safe to call regular kernel code,
67  * intrumentable code, or any code which may trigger an exception.
68  *
69  * This is intended to match the logic in irqentry_exit(), handling the kernel
70  * mode transitions only, and with preemption handled elsewhere.
71  */
__exit_to_kernel_mode(struct pt_regs * regs)72 static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
73 {
74 	lockdep_assert_irqs_disabled();
75 
76 	if (interrupts_enabled(regs)) {
77 		if (regs->exit_rcu) {
78 			trace_hardirqs_on_prepare();
79 			lockdep_hardirqs_on_prepare();
80 			rcu_irq_exit();
81 			lockdep_hardirqs_on(CALLER_ADDR0);
82 			return;
83 		}
84 
85 		trace_hardirqs_on();
86 	} else {
87 		if (regs->exit_rcu)
88 			rcu_irq_exit();
89 	}
90 }
91 
exit_to_kernel_mode(struct pt_regs * regs)92 static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
93 {
94 	mte_check_tfsr_exit();
95 	__exit_to_kernel_mode(regs);
96 }
97 
98 /*
99  * Handle IRQ/context state management when entering from user mode.
100  * Before this function is called it is not safe to call regular kernel code,
101  * intrumentable code, or any code which may trigger an exception.
102  */
__enter_from_user_mode(void)103 static __always_inline void __enter_from_user_mode(void)
104 {
105 	lockdep_hardirqs_off(CALLER_ADDR0);
106 	CT_WARN_ON(ct_state() != CONTEXT_USER);
107 	user_exit_irqoff();
108 	trace_hardirqs_off_finish();
109 	mte_disable_tco_entry(current);
110 }
111 
enter_from_user_mode(struct pt_regs * regs)112 static __always_inline void enter_from_user_mode(struct pt_regs *regs)
113 {
114 	__enter_from_user_mode();
115 }
116 
117 /*
118  * Handle IRQ/context state management when exiting to user mode.
119  * After this function returns it is not safe to call regular kernel code,
120  * intrumentable code, or any code which may trigger an exception.
121  */
__exit_to_user_mode(void)122 static __always_inline void __exit_to_user_mode(void)
123 {
124 	trace_hardirqs_on_prepare();
125 	lockdep_hardirqs_on_prepare();
126 	user_enter_irqoff();
127 	lockdep_hardirqs_on(CALLER_ADDR0);
128 }
129 
prepare_exit_to_user_mode(struct pt_regs * regs)130 static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
131 {
132 	unsigned long flags;
133 
134 	local_daif_mask();
135 
136 	flags = READ_ONCE(current_thread_info()->flags);
137 	if (unlikely(flags & _TIF_WORK_MASK))
138 		do_notify_resume(regs, flags);
139 }
140 
exit_to_user_mode(struct pt_regs * regs)141 static __always_inline void exit_to_user_mode(struct pt_regs *regs)
142 {
143 	prepare_exit_to_user_mode(regs);
144 	mte_check_tfsr_exit();
145 	__exit_to_user_mode();
146 }
147 
asm_exit_to_user_mode(struct pt_regs * regs)148 asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
149 {
150 	exit_to_user_mode(regs);
151 }
152 
153 /*
154  * Handle IRQ/context state management when entering an NMI from user/kernel
155  * mode. Before this function is called it is not safe to call regular kernel
156  * code, intrumentable code, or any code which may trigger an exception.
157  */
arm64_enter_nmi(struct pt_regs * regs)158 static void noinstr arm64_enter_nmi(struct pt_regs *regs)
159 {
160 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
161 
162 	__nmi_enter();
163 	lockdep_hardirqs_off(CALLER_ADDR0);
164 	lockdep_hardirq_enter();
165 	rcu_nmi_enter();
166 
167 	trace_hardirqs_off_finish();
168 	ftrace_nmi_enter();
169 }
170 
171 /*
172  * Handle IRQ/context state management when exiting an NMI from user/kernel
173  * mode. After this function returns it is not safe to call regular kernel
174  * code, intrumentable code, or any code which may trigger an exception.
175  */
arm64_exit_nmi(struct pt_regs * regs)176 static void noinstr arm64_exit_nmi(struct pt_regs *regs)
177 {
178 	bool restore = regs->lockdep_hardirqs;
179 
180 	ftrace_nmi_exit();
181 	if (restore) {
182 		trace_hardirqs_on_prepare();
183 		lockdep_hardirqs_on_prepare();
184 	}
185 
186 	rcu_nmi_exit();
187 	lockdep_hardirq_exit();
188 	if (restore)
189 		lockdep_hardirqs_on(CALLER_ADDR0);
190 	__nmi_exit();
191 }
192 
193 /*
194  * Handle IRQ/context state management when entering a debug exception from
195  * kernel mode. Before this function is called it is not safe to call regular
196  * kernel code, intrumentable code, or any code which may trigger an exception.
197  */
arm64_enter_el1_dbg(struct pt_regs * regs)198 static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
199 {
200 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
201 
202 	lockdep_hardirqs_off(CALLER_ADDR0);
203 	rcu_nmi_enter();
204 
205 	trace_hardirqs_off_finish();
206 }
207 
208 /*
209  * Handle IRQ/context state management when exiting a debug exception from
210  * kernel mode. After this function returns it is not safe to call regular
211  * kernel code, intrumentable code, or any code which may trigger an exception.
212  */
arm64_exit_el1_dbg(struct pt_regs * regs)213 static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
214 {
215 	bool restore = regs->lockdep_hardirqs;
216 
217 	if (restore) {
218 		trace_hardirqs_on_prepare();
219 		lockdep_hardirqs_on_prepare();
220 	}
221 
222 	rcu_nmi_exit();
223 	if (restore)
224 		lockdep_hardirqs_on(CALLER_ADDR0);
225 }
226 
enter_el1_irq_or_nmi(struct pt_regs * regs)227 static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
228 {
229 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
230 		arm64_enter_nmi(regs);
231 	else
232 		enter_from_kernel_mode(regs);
233 }
234 
exit_el1_irq_or_nmi(struct pt_regs * regs)235 static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
236 {
237 	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
238 		arm64_exit_nmi(regs);
239 	else
240 		exit_to_kernel_mode(regs);
241 }
242 
arm64_preempt_schedule_irq(void)243 static void __sched arm64_preempt_schedule_irq(void)
244 {
245 	lockdep_assert_irqs_disabled();
246 
247 	/*
248 	 * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
249 	 * priority masking is used the GIC irqchip driver will clear DAIF.IF
250 	 * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
251 	 * DAIF we must have handled an NMI, so skip preemption.
252 	 */
253 	if (system_uses_irq_prio_masking() && read_sysreg(daif))
254 		return;
255 
256 	/*
257 	 * Preempting a task from an IRQ means we leave copies of PSTATE
258 	 * on the stack. cpufeature's enable calls may modify PSTATE, but
259 	 * resuming one of these preempted tasks would undo those changes.
260 	 *
261 	 * Only allow a task to be preempted once cpufeatures have been
262 	 * enabled.
263 	 */
264 	if (system_capabilities_finalized())
265 		preempt_schedule_irq();
266 }
267 
do_interrupt_handler(struct pt_regs * regs,void (* handler)(struct pt_regs *))268 static void do_interrupt_handler(struct pt_regs *regs,
269 				 void (*handler)(struct pt_regs *))
270 {
271 	if (on_thread_stack())
272 		call_on_irq_stack(regs, handler);
273 	else
274 		handler(regs);
275 }
276 
277 extern void (*handle_arch_irq)(struct pt_regs *);
278 extern void (*handle_arch_fiq)(struct pt_regs *);
279 
__panic_unhandled(struct pt_regs * regs,const char * vector,unsigned long esr)280 static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
281 				      unsigned long esr)
282 {
283 	arm64_enter_nmi(regs);
284 
285 	console_verbose();
286 
287 	pr_crit("Unhandled %s exception on CPU%d, ESR 0x%016lx -- %s\n",
288 		vector, smp_processor_id(), esr,
289 		esr_get_class_string(esr));
290 
291 	trace_android_rvh_panic_unhandled(regs, vector, esr);
292 	__show_regs(regs);
293 	panic("Unhandled exception");
294 }
295 
296 #define UNHANDLED(el, regsize, vector)							\
297 asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)	\
298 {											\
299 	const char *desc = #regsize "-bit " #el " " #vector;				\
300 	__panic_unhandled(regs, desc, read_sysreg(esr_el1));				\
301 }
302 
303 #ifdef CONFIG_ARM64_ERRATUM_1463225
304 static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
305 
cortex_a76_erratum_1463225_svc_handler(void)306 static void cortex_a76_erratum_1463225_svc_handler(void)
307 {
308 	u32 reg, val;
309 
310 	if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
311 		return;
312 
313 	if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
314 		return;
315 
316 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
317 	reg = read_sysreg(mdscr_el1);
318 	val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
319 	write_sysreg(val, mdscr_el1);
320 	asm volatile("msr daifclr, #8");
321 	isb();
322 
323 	/* We will have taken a single-step exception by this point */
324 
325 	write_sysreg(reg, mdscr_el1);
326 	__this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
327 }
328 
329 static __always_inline bool
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)330 cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
331 {
332 	if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
333 		return false;
334 
335 	/*
336 	 * We've taken a dummy step exception from the kernel to ensure
337 	 * that interrupts are re-enabled on the syscall path. Return back
338 	 * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
339 	 * masked so that we can safely restore the mdscr and get on with
340 	 * handling the syscall.
341 	 */
342 	regs->pstate |= PSR_D_BIT;
343 	return true;
344 }
345 #else /* CONFIG_ARM64_ERRATUM_1463225 */
cortex_a76_erratum_1463225_svc_handler(void)346 static void cortex_a76_erratum_1463225_svc_handler(void) { }
cortex_a76_erratum_1463225_debug_handler(struct pt_regs * regs)347 static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
348 {
349 	return false;
350 }
351 #endif /* CONFIG_ARM64_ERRATUM_1463225 */
352 
353 UNHANDLED(el1t, 64, sync)
354 UNHANDLED(el1t, 64, irq)
355 UNHANDLED(el1t, 64, fiq)
356 UNHANDLED(el1t, 64, error)
357 
el1_abort(struct pt_regs * regs,unsigned long esr)358 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
359 {
360 	unsigned long far = read_sysreg(far_el1);
361 
362 	enter_from_kernel_mode(regs);
363 	local_daif_inherit(regs);
364 	do_mem_abort(far, esr, regs);
365 	local_daif_mask();
366 	exit_to_kernel_mode(regs);
367 }
368 
el1_pc(struct pt_regs * regs,unsigned long esr)369 static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
370 {
371 	unsigned long far = read_sysreg(far_el1);
372 
373 	enter_from_kernel_mode(regs);
374 	local_daif_inherit(regs);
375 	do_sp_pc_abort(far, esr, regs);
376 	local_daif_mask();
377 	exit_to_kernel_mode(regs);
378 }
379 
el1_undef(struct pt_regs * regs,unsigned long esr)380 static void noinstr el1_undef(struct pt_regs *regs, unsigned long esr)
381 {
382 	enter_from_kernel_mode(regs);
383 	local_daif_inherit(regs);
384 	do_el1_undef(regs, esr);
385 	local_daif_mask();
386 	exit_to_kernel_mode(regs);
387 }
388 
el1_bti(struct pt_regs * regs,unsigned long esr)389 static void noinstr el1_bti(struct pt_regs *regs, unsigned long esr)
390 {
391 	enter_from_kernel_mode(regs);
392 	local_daif_inherit(regs);
393 	do_el1_bti(regs, esr);
394 	local_daif_mask();
395 	exit_to_kernel_mode(regs);
396 }
397 
el1_dbg(struct pt_regs * regs,unsigned long esr)398 static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
399 {
400 	unsigned long far = read_sysreg(far_el1);
401 
402 	arm64_enter_el1_dbg(regs);
403 	if (!cortex_a76_erratum_1463225_debug_handler(regs))
404 		do_debug_exception(far, esr, regs);
405 	arm64_exit_el1_dbg(regs);
406 }
407 
el1_fpac(struct pt_regs * regs,unsigned long esr)408 static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
409 {
410 	enter_from_kernel_mode(regs);
411 	local_daif_inherit(regs);
412 	do_el1_fpac(regs, esr);
413 	local_daif_mask();
414 	exit_to_kernel_mode(regs);
415 }
416 
el1h_64_sync_handler(struct pt_regs * regs)417 asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
418 {
419 	unsigned long esr = read_sysreg(esr_el1);
420 
421 	switch (ESR_ELx_EC(esr)) {
422 	case ESR_ELx_EC_DABT_CUR:
423 	case ESR_ELx_EC_IABT_CUR:
424 		el1_abort(regs, esr);
425 		break;
426 	/*
427 	 * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
428 	 * recursive exception when trying to push the initial pt_regs.
429 	 */
430 	case ESR_ELx_EC_PC_ALIGN:
431 		el1_pc(regs, esr);
432 		break;
433 	case ESR_ELx_EC_SYS64:
434 	case ESR_ELx_EC_UNKNOWN:
435 		el1_undef(regs, esr);
436 		break;
437 	case ESR_ELx_EC_BTI:
438 		el1_bti(regs, esr);
439 		break;
440 	case ESR_ELx_EC_BREAKPT_CUR:
441 	case ESR_ELx_EC_SOFTSTP_CUR:
442 	case ESR_ELx_EC_WATCHPT_CUR:
443 	case ESR_ELx_EC_BRK64:
444 		el1_dbg(regs, esr);
445 		break;
446 	case ESR_ELx_EC_FPAC:
447 		el1_fpac(regs, esr);
448 		break;
449 	default:
450 		__panic_unhandled(regs, "64-bit el1h sync", esr);
451 	}
452 }
453 
el1_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))454 static void noinstr el1_interrupt(struct pt_regs *regs,
455 				  void (*handler)(struct pt_regs *))
456 {
457 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
458 
459 	enter_el1_irq_or_nmi(regs);
460 	do_interrupt_handler(regs, handler);
461 
462 	/*
463 	 * Note: thread_info::preempt_count includes both thread_info::count
464 	 * and thread_info::need_resched, and is not equivalent to
465 	 * preempt_count().
466 	 */
467 	if (IS_ENABLED(CONFIG_PREEMPTION) &&
468 	    READ_ONCE(current_thread_info()->preempt_count) == 0)
469 		arm64_preempt_schedule_irq();
470 
471 	exit_el1_irq_or_nmi(regs);
472 }
473 
el1h_64_irq_handler(struct pt_regs * regs)474 asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
475 {
476 	el1_interrupt(regs, handle_arch_irq);
477 }
478 
el1h_64_fiq_handler(struct pt_regs * regs)479 asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
480 {
481 	el1_interrupt(regs, handle_arch_fiq);
482 }
483 
el1h_64_error_handler(struct pt_regs * regs)484 asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
485 {
486 	unsigned long esr = read_sysreg(esr_el1);
487 
488 	local_daif_restore(DAIF_ERRCTX);
489 	arm64_enter_nmi(regs);
490 	do_serror(regs, esr);
491 	arm64_exit_nmi(regs);
492 }
493 
el0_da(struct pt_regs * regs,unsigned long esr)494 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
495 {
496 	unsigned long far = read_sysreg(far_el1);
497 
498 	enter_from_user_mode(regs);
499 	local_daif_restore(DAIF_PROCCTX);
500 	do_mem_abort(far, esr, regs);
501 	exit_to_user_mode(regs);
502 }
503 
el0_ia(struct pt_regs * regs,unsigned long esr)504 static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
505 {
506 	unsigned long far = read_sysreg(far_el1);
507 
508 	/*
509 	 * We've taken an instruction abort from userspace and not yet
510 	 * re-enabled IRQs. If the address is a kernel address, apply
511 	 * BP hardening prior to enabling IRQs and pre-emption.
512 	 */
513 	if (!is_ttbr0_addr(far))
514 		arm64_apply_bp_hardening();
515 
516 	enter_from_user_mode(regs);
517 	local_daif_restore(DAIF_PROCCTX);
518 	do_mem_abort(far, esr, regs);
519 	exit_to_user_mode(regs);
520 }
521 
el0_fpsimd_acc(struct pt_regs * regs,unsigned long esr)522 static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
523 {
524 	enter_from_user_mode(regs);
525 	local_daif_restore(DAIF_PROCCTX);
526 	do_fpsimd_acc(esr, regs);
527 	exit_to_user_mode(regs);
528 }
529 
el0_sve_acc(struct pt_regs * regs,unsigned long esr)530 static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
531 {
532 	enter_from_user_mode(regs);
533 	local_daif_restore(DAIF_PROCCTX);
534 	do_sve_acc(esr, regs);
535 	exit_to_user_mode(regs);
536 }
537 
el0_sme_acc(struct pt_regs * regs,unsigned long esr)538 static void noinstr el0_sme_acc(struct pt_regs *regs, unsigned long esr)
539 {
540 	enter_from_user_mode(regs);
541 	local_daif_restore(DAIF_PROCCTX);
542 	do_sme_acc(esr, regs);
543 	exit_to_user_mode(regs);
544 }
545 
el0_fpsimd_exc(struct pt_regs * regs,unsigned long esr)546 static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
547 {
548 	enter_from_user_mode(regs);
549 	local_daif_restore(DAIF_PROCCTX);
550 	do_fpsimd_exc(esr, regs);
551 	exit_to_user_mode(regs);
552 }
553 
el0_sys(struct pt_regs * regs,unsigned long esr)554 static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
555 {
556 	enter_from_user_mode(regs);
557 	local_daif_restore(DAIF_PROCCTX);
558 	do_el0_sys(esr, regs);
559 	exit_to_user_mode(regs);
560 }
561 
el0_pc(struct pt_regs * regs,unsigned long esr)562 static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
563 {
564 	unsigned long far = read_sysreg(far_el1);
565 
566 	if (!is_ttbr0_addr(instruction_pointer(regs)))
567 		arm64_apply_bp_hardening();
568 
569 	enter_from_user_mode(regs);
570 	local_daif_restore(DAIF_PROCCTX);
571 	do_sp_pc_abort(far, esr, regs);
572 	exit_to_user_mode(regs);
573 }
574 
el0_sp(struct pt_regs * regs,unsigned long esr)575 static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
576 {
577 	enter_from_user_mode(regs);
578 	local_daif_restore(DAIF_PROCCTX);
579 	do_sp_pc_abort(regs->sp, esr, regs);
580 	exit_to_user_mode(regs);
581 }
582 
el0_undef(struct pt_regs * regs,unsigned long esr)583 static void noinstr el0_undef(struct pt_regs *regs, unsigned long esr)
584 {
585 	enter_from_user_mode(regs);
586 	local_daif_restore(DAIF_PROCCTX);
587 	do_el0_undef(regs, esr);
588 	exit_to_user_mode(regs);
589 }
590 
el0_bti(struct pt_regs * regs)591 static void noinstr el0_bti(struct pt_regs *regs)
592 {
593 	enter_from_user_mode(regs);
594 	local_daif_restore(DAIF_PROCCTX);
595 	do_el0_bti(regs);
596 	exit_to_user_mode(regs);
597 }
598 
el0_inv(struct pt_regs * regs,unsigned long esr)599 static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
600 {
601 	enter_from_user_mode(regs);
602 	local_daif_restore(DAIF_PROCCTX);
603 	bad_el0_sync(regs, 0, esr);
604 	exit_to_user_mode(regs);
605 }
606 
el0_dbg(struct pt_regs * regs,unsigned long esr)607 static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
608 {
609 	/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
610 	unsigned long far = read_sysreg(far_el1);
611 
612 	enter_from_user_mode(regs);
613 	do_debug_exception(far, esr, regs);
614 	local_daif_restore(DAIF_PROCCTX);
615 	exit_to_user_mode(regs);
616 }
617 
el0_svc(struct pt_regs * regs)618 static void noinstr el0_svc(struct pt_regs *regs)
619 {
620 	enter_from_user_mode(regs);
621 	cortex_a76_erratum_1463225_svc_handler();
622 	do_el0_svc(regs);
623 	exit_to_user_mode(regs);
624 }
625 
el0_fpac(struct pt_regs * regs,unsigned long esr)626 static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
627 {
628 	enter_from_user_mode(regs);
629 	local_daif_restore(DAIF_PROCCTX);
630 	do_el0_fpac(regs, esr);
631 	exit_to_user_mode(regs);
632 }
633 
el0t_64_sync_handler(struct pt_regs * regs)634 asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
635 {
636 	unsigned long esr = read_sysreg(esr_el1);
637 
638 	switch (ESR_ELx_EC(esr)) {
639 	case ESR_ELx_EC_SVC64:
640 		el0_svc(regs);
641 		break;
642 	case ESR_ELx_EC_DABT_LOW:
643 		el0_da(regs, esr);
644 		break;
645 	case ESR_ELx_EC_IABT_LOW:
646 		el0_ia(regs, esr);
647 		break;
648 	case ESR_ELx_EC_FP_ASIMD:
649 		el0_fpsimd_acc(regs, esr);
650 		break;
651 	case ESR_ELx_EC_SVE:
652 		el0_sve_acc(regs, esr);
653 		break;
654 	case ESR_ELx_EC_SME:
655 		el0_sme_acc(regs, esr);
656 		break;
657 	case ESR_ELx_EC_FP_EXC64:
658 		el0_fpsimd_exc(regs, esr);
659 		break;
660 	case ESR_ELx_EC_SYS64:
661 	case ESR_ELx_EC_WFx:
662 		el0_sys(regs, esr);
663 		break;
664 	case ESR_ELx_EC_SP_ALIGN:
665 		el0_sp(regs, esr);
666 		break;
667 	case ESR_ELx_EC_PC_ALIGN:
668 		el0_pc(regs, esr);
669 		break;
670 	case ESR_ELx_EC_UNKNOWN:
671 		el0_undef(regs, esr);
672 		break;
673 	case ESR_ELx_EC_BTI:
674 		el0_bti(regs);
675 		break;
676 	case ESR_ELx_EC_BREAKPT_LOW:
677 	case ESR_ELx_EC_SOFTSTP_LOW:
678 	case ESR_ELx_EC_WATCHPT_LOW:
679 	case ESR_ELx_EC_BRK64:
680 		el0_dbg(regs, esr);
681 		break;
682 	case ESR_ELx_EC_FPAC:
683 		el0_fpac(regs, esr);
684 		break;
685 	default:
686 		el0_inv(regs, esr);
687 	}
688 }
689 
el0_interrupt(struct pt_regs * regs,void (* handler)(struct pt_regs *))690 static void noinstr el0_interrupt(struct pt_regs *regs,
691 				  void (*handler)(struct pt_regs *))
692 {
693 	enter_from_user_mode(regs);
694 
695 	write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
696 
697 	if (regs->pc & BIT(55))
698 		arm64_apply_bp_hardening();
699 
700 	do_interrupt_handler(regs, handler);
701 
702 	exit_to_user_mode(regs);
703 }
704 
__el0_irq_handler_common(struct pt_regs * regs)705 static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
706 {
707 	el0_interrupt(regs, handle_arch_irq);
708 }
709 
el0t_64_irq_handler(struct pt_regs * regs)710 asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
711 {
712 	__el0_irq_handler_common(regs);
713 }
714 
__el0_fiq_handler_common(struct pt_regs * regs)715 static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
716 {
717 	el0_interrupt(regs, handle_arch_fiq);
718 }
719 
el0t_64_fiq_handler(struct pt_regs * regs)720 asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
721 {
722 	__el0_fiq_handler_common(regs);
723 }
724 
__el0_error_handler_common(struct pt_regs * regs)725 static void noinstr __el0_error_handler_common(struct pt_regs *regs)
726 {
727 	unsigned long esr = read_sysreg(esr_el1);
728 
729 	enter_from_user_mode(regs);
730 	local_daif_restore(DAIF_ERRCTX);
731 	arm64_enter_nmi(regs);
732 	do_serror(regs, esr);
733 	arm64_exit_nmi(regs);
734 	local_daif_restore(DAIF_PROCCTX);
735 	exit_to_user_mode(regs);
736 }
737 
el0t_64_error_handler(struct pt_regs * regs)738 asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
739 {
740 	__el0_error_handler_common(regs);
741 }
742 
743 #ifdef CONFIG_COMPAT
el0_cp15(struct pt_regs * regs,unsigned long esr)744 static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
745 {
746 	enter_from_user_mode(regs);
747 	local_daif_restore(DAIF_PROCCTX);
748 	do_el0_cp15(esr, regs);
749 	exit_to_user_mode(regs);
750 }
751 
el0_svc_compat(struct pt_regs * regs)752 static void noinstr el0_svc_compat(struct pt_regs *regs)
753 {
754 	enter_from_user_mode(regs);
755 	cortex_a76_erratum_1463225_svc_handler();
756 	do_el0_svc_compat(regs);
757 	exit_to_user_mode(regs);
758 }
759 
el0t_32_sync_handler(struct pt_regs * regs)760 asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
761 {
762 	unsigned long esr = read_sysreg(esr_el1);
763 
764 	switch (ESR_ELx_EC(esr)) {
765 	case ESR_ELx_EC_SVC32:
766 		el0_svc_compat(regs);
767 		break;
768 	case ESR_ELx_EC_DABT_LOW:
769 		el0_da(regs, esr);
770 		break;
771 	case ESR_ELx_EC_IABT_LOW:
772 		el0_ia(regs, esr);
773 		break;
774 	case ESR_ELx_EC_FP_ASIMD:
775 		el0_fpsimd_acc(regs, esr);
776 		break;
777 	case ESR_ELx_EC_FP_EXC32:
778 		el0_fpsimd_exc(regs, esr);
779 		break;
780 	case ESR_ELx_EC_PC_ALIGN:
781 		el0_pc(regs, esr);
782 		break;
783 	case ESR_ELx_EC_UNKNOWN:
784 	case ESR_ELx_EC_CP14_MR:
785 	case ESR_ELx_EC_CP14_LS:
786 	case ESR_ELx_EC_CP14_64:
787 		el0_undef(regs, esr);
788 		break;
789 	case ESR_ELx_EC_CP15_32:
790 	case ESR_ELx_EC_CP15_64:
791 		el0_cp15(regs, esr);
792 		break;
793 	case ESR_ELx_EC_BREAKPT_LOW:
794 	case ESR_ELx_EC_SOFTSTP_LOW:
795 	case ESR_ELx_EC_WATCHPT_LOW:
796 	case ESR_ELx_EC_BKPT32:
797 		el0_dbg(regs, esr);
798 		break;
799 	default:
800 		el0_inv(regs, esr);
801 	}
802 }
803 
el0t_32_irq_handler(struct pt_regs * regs)804 asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
805 {
806 	__el0_irq_handler_common(regs);
807 }
808 
el0t_32_fiq_handler(struct pt_regs * regs)809 asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
810 {
811 	__el0_fiq_handler_common(regs);
812 }
813 
el0t_32_error_handler(struct pt_regs * regs)814 asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
815 {
816 	__el0_error_handler_common(regs);
817 }
818 #else /* CONFIG_COMPAT */
819 UNHANDLED(el0t, 32, sync)
820 UNHANDLED(el0t, 32, irq)
821 UNHANDLED(el0t, 32, fiq)
822 UNHANDLED(el0t, 32, error)
823 #endif /* CONFIG_COMPAT */
824 
825 #ifdef CONFIG_VMAP_STACK
handle_bad_stack(struct pt_regs * regs)826 asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
827 {
828 	unsigned long esr = read_sysreg(esr_el1);
829 	unsigned long far = read_sysreg(far_el1);
830 
831 	arm64_enter_nmi(regs);
832 	panic_bad_stack(regs, esr, far);
833 }
834 #endif /* CONFIG_VMAP_STACK */
835 
836 #ifdef CONFIG_ARM_SDE_INTERFACE
837 asmlinkage noinstr unsigned long
__sdei_handler(struct pt_regs * regs,struct sdei_registered_event * arg)838 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
839 {
840 	unsigned long ret;
841 
842 	/*
843 	 * We didn't take an exception to get here, so the HW hasn't
844 	 * set/cleared bits in PSTATE that we may rely on.
845 	 *
846 	 * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
847 	 * whether PSTATE bits are inherited unchanged or generated from
848 	 * scratch, and the TF-A implementation always clears PAN and always
849 	 * clears UAO. There are no other known implementations.
850 	 *
851 	 * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
852 	 * PSTATE is modified upon architectural exceptions, and so PAN is
853 	 * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
854 	 * cleared.
855 	 *
856 	 * We must explicitly reset PAN to the expected state, including
857 	 * clearing it when the host isn't using it, in case a VM had it set.
858 	 */
859 	if (system_uses_hw_pan())
860 		set_pstate_pan(1);
861 	else if (cpu_has_pan())
862 		set_pstate_pan(0);
863 
864 	arm64_enter_nmi(regs);
865 	ret = do_sdei_event(regs, arg);
866 	arm64_exit_nmi(regs);
867 
868 	return ret;
869 }
870 #endif /* CONFIG_ARM_SDE_INTERFACE */
871