• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
35 */
36	.macro user_exit_irqoff
37#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38	bl	enter_from_user_mode
39#endif
40	.endm
41
42	.macro user_enter_irqoff
43#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44	bl	exit_to_user_mode
45#endif
46	.endm
47
48	.macro	clear_gp_regs
49	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
50	mov	x\n, xzr
51	.endr
52	.endm
53
54/*
55 * Bad Abort numbers
56 *-----------------
57 */
58#define BAD_SYNC	0
59#define BAD_IRQ		1
60#define BAD_FIQ		2
61#define BAD_ERROR	3
62
63	.macro kernel_ventry, el, label, regsize = 64
64	.align 7
65.Lventry_start\@:
66	.if	\el == 0
67	/*
68	 * This must be the first instruction of the EL0 vector entries. It is
69	 * skipped by the trampoline vectors, to trigger the cleanup.
70	 */
71	b	.Lskip_tramp_vectors_cleanup\@
72	.if	\regsize == 64
73	mrs	x30, tpidrro_el0
74	msr	tpidrro_el0, xzr
75	.else
76	mov	x30, xzr
77	.endif
78.Lskip_tramp_vectors_cleanup\@:
79	.endif
80
81	sub	sp, sp, #S_FRAME_SIZE
82#ifdef CONFIG_VMAP_STACK
83	/*
84	 * Test whether the SP has overflowed, without corrupting a GPR.
85	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
86	 * should always be zero.
87	 */
88	add	sp, sp, x0			// sp' = sp + x0
89	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
90	tbnz	x0, #THREAD_SHIFT, 0f
91	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
92	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
93	b	el\()\el\()_\label
94
950:
96	/*
97	 * Either we've just detected an overflow, or we've taken an exception
98	 * while on the overflow stack. Either way, we won't return to
99	 * userspace, and can clobber EL0 registers to free up GPRs.
100	 */
101
102	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
103	msr	tpidr_el0, x0
104
105	/* Recover the original x0 value and stash it in tpidrro_el0 */
106	sub	x0, sp, x0
107	msr	tpidrro_el0, x0
108
109	/* Switch to the overflow stack */
110	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
111
112	/*
113	 * Check whether we were already on the overflow stack. This may happen
114	 * after panic() re-enables interrupts.
115	 */
116	mrs	x0, tpidr_el0			// sp of interrupted context
117	sub	x0, sp, x0			// delta with top of overflow stack
118	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
119	b.ne	__bad_stack			// no? -> bad stack pointer
120
121	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
122	sub	sp, sp, x0
123	mrs	x0, tpidrro_el0
124#endif
125	b	el\()\el\()_\label
126.org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
127	.endm
128
129	.macro tramp_alias, dst, sym, tmp
130	mov_q	\dst, TRAMP_VALIAS
131	adr_l	\tmp, \sym
132	add	\dst, \dst, \tmp
133	adr_l	\tmp, .entry.tramp.text
134	sub	\dst, \dst, \tmp
135	.endm
136
137	/*
138	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
139	 * them if required.
140	 */
141	.macro	apply_ssbd, state, tmp1, tmp2
142alternative_cb	spectre_v4_patch_fw_mitigation_enable
143	b	.L__asm_ssbd_skip\@		// Patched to NOP
144alternative_cb_end
145	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
146	cbz	\tmp2,	.L__asm_ssbd_skip\@
147	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
148	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
149	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
150	mov	w1, #\state
151alternative_cb	smccc_patch_fw_mitigation_conduit
152	nop					// Patched to SMC/HVC #0
153alternative_cb_end
154.L__asm_ssbd_skip\@:
155	.endm
156
157	/* Check for MTE asynchronous tag check faults */
158	.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
159#ifdef CONFIG_ARM64_MTE
160	.arch_extension lse
161alternative_if_not ARM64_MTE
162	b	1f
163alternative_else_nop_endif
164	/*
165	 * Asynchronous tag check faults are only possible in ASYNC (2) or
166	 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
167	 * set, so skip the check if it is unset.
168	 */
169	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
170	mrs_s	\tmp, SYS_TFSRE0_EL1
171	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
172	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
173	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
174	add	\ti_flags, tsk, #TSK_TI_FLAGS
175	stset	\tmp, [\ti_flags]
1761:
177#endif
178	.endm
179
180	/* Clear the MTE asynchronous tag check faults */
181	.macro clear_mte_async_tcf thread_sctlr
182#ifdef CONFIG_ARM64_MTE
183alternative_if ARM64_MTE
184	/* See comment in check_mte_async_tcf above. */
185	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
186	dsb	ish
187	msr_s	SYS_TFSRE0_EL1, xzr
1881:
189alternative_else_nop_endif
190#endif
191	.endm
192
193	.macro mte_set_gcr, mte_ctrl, tmp
194#ifdef CONFIG_ARM64_MTE
195	ubfx	\tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
196	orr	\tmp, \tmp, #SYS_GCR_EL1_RRND
197	msr_s	SYS_GCR_EL1, \tmp
198#endif
199	.endm
200
201	.macro mte_set_kernel_gcr, tmp, tmp2
202#ifdef CONFIG_KASAN_HW_TAGS
203alternative_cb	kasan_hw_tags_enable
204	b	1f
205alternative_cb_end
206	mov	\tmp, KERNEL_GCR_EL1
207	msr_s	SYS_GCR_EL1, \tmp
2081:
209#endif
210	.endm
211
212	.macro mte_set_user_gcr, tsk, tmp, tmp2
213#ifdef CONFIG_KASAN_HW_TAGS
214alternative_cb	kasan_hw_tags_enable
215	b	1f
216alternative_cb_end
217	ldr	\tmp, [\tsk, #THREAD_MTE_CTRL]
218
219	mte_set_gcr \tmp, \tmp2
2201:
221#endif
222	.endm
223
224	.macro	kernel_entry, el, regsize = 64
225	.if	\regsize == 32
226	mov	w0, w0				// zero upper 32 bits of x0
227	.endif
228	stp	x0, x1, [sp, #16 * 0]
229	stp	x2, x3, [sp, #16 * 1]
230	stp	x4, x5, [sp, #16 * 2]
231	stp	x6, x7, [sp, #16 * 3]
232	stp	x8, x9, [sp, #16 * 4]
233	stp	x10, x11, [sp, #16 * 5]
234	stp	x12, x13, [sp, #16 * 6]
235	stp	x14, x15, [sp, #16 * 7]
236	stp	x16, x17, [sp, #16 * 8]
237	stp	x18, x19, [sp, #16 * 9]
238	stp	x20, x21, [sp, #16 * 10]
239	stp	x22, x23, [sp, #16 * 11]
240	stp	x24, x25, [sp, #16 * 12]
241	stp	x26, x27, [sp, #16 * 13]
242	stp	x28, x29, [sp, #16 * 14]
243
244	.if	\el == 0
245	clear_gp_regs
246	mrs	x21, sp_el0
247	ldr_this_cpu	tsk, __entry_task, x20
248	msr	sp_el0, tsk
249
250	/*
251	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
252	 * when scheduling.
253	 */
254	ldr	x19, [tsk, #TSK_TI_FLAGS]
255	disable_step_tsk x19, x20
256
257	/* Check for asynchronous tag check faults in user space */
258	ldr	x0, [tsk, THREAD_SCTLR_USER]
259	check_mte_async_tcf x22, x23, x0
260
261#ifdef CONFIG_ARM64_PTR_AUTH
262alternative_if ARM64_HAS_ADDRESS_AUTH
263	/*
264	 * Enable IA for in-kernel PAC if the task had it disabled. Although
265	 * this could be implemented with an unconditional MRS which would avoid
266	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
267	 *
268	 * Install the kernel IA key only if IA was enabled in the task. If IA
269	 * was disabled on kernel exit then we would have left the kernel IA
270	 * installed so there is no need to install it again.
271	 */
272	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
273	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
274	b	2f
2751:
276	mrs	x0, sctlr_el1
277	orr	x0, x0, SCTLR_ELx_ENIA
278	msr	sctlr_el1, x0
2792:
280alternative_else_nop_endif
281#endif
282
283	apply_ssbd 1, x22, x23
284
285	mte_set_kernel_gcr x22, x23
286
287	/*
288	 * Any non-self-synchronizing system register updates required for
289	 * kernel entry should be placed before this point.
290	 */
291alternative_if ARM64_MTE
292	isb
293	b	1f
294alternative_else_nop_endif
295alternative_if ARM64_HAS_ADDRESS_AUTH
296	isb
297alternative_else_nop_endif
2981:
299
300	scs_load_current
301	.else
302	add	x21, sp, #S_FRAME_SIZE
303	get_current_task tsk
304	/* Save the task's original addr_limit and set USER_DS */
305	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
306	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
307	mov	x20, #USER_DS
308	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
309	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
310	.endif /* \el == 0 */
311	mrs	x22, elr_el1
312	mrs	x23, spsr_el1
313	stp	lr, x21, [sp, #S_LR]
314
315	/*
316	 * In order to be able to dump the contents of struct pt_regs at the
317	 * time the exception was taken (in case we attempt to walk the call
318	 * stack later), chain it together with the stack frames.
319	 */
320	.if \el == 0
321	stp	xzr, xzr, [sp, #S_STACKFRAME]
322	.else
323	stp	x29, x22, [sp, #S_STACKFRAME]
324	.endif
325	add	x29, sp, #S_STACKFRAME
326
327#ifdef CONFIG_ARM64_SW_TTBR0_PAN
328alternative_if_not ARM64_HAS_PAN
329	bl	__swpan_entry_el\el
330alternative_else_nop_endif
331#endif
332
333	stp	x22, x23, [sp, #S_PC]
334
335	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
336	.if	\el == 0
337	mov	w21, #NO_SYSCALL
338	str	w21, [sp, #S_SYSCALLNO]
339	.endif
340
341	/* Save pmr */
342alternative_if ARM64_HAS_IRQ_PRIO_MASKING
343	mrs_s	x20, SYS_ICC_PMR_EL1
344	str	x20, [sp, #S_PMR_SAVE]
345	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
346	msr_s	SYS_ICC_PMR_EL1, x20
347alternative_else_nop_endif
348
349	/* Re-enable tag checking (TCO set on exception entry) */
350#ifdef CONFIG_ARM64_MTE
351alternative_if ARM64_MTE
352	SET_PSTATE_TCO(0)
353alternative_else_nop_endif
354#endif
355
356	/*
357	 * Registers that may be useful after this macro is invoked:
358	 *
359	 * x20 - ICC_PMR_EL1
360	 * x21 - aborted SP
361	 * x22 - aborted PC
362	 * x23 - aborted PSTATE
363	*/
364	.endm
365
366	.macro	kernel_exit, el
367	.if	\el != 0
368	disable_daif
369
370	/* Restore the task's original addr_limit. */
371	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
372	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
373
374	/* No need to restore UAO, it will be restored from SPSR_EL1 */
375	.endif
376
377	/* Restore pmr */
378alternative_if ARM64_HAS_IRQ_PRIO_MASKING
379	ldr	x20, [sp, #S_PMR_SAVE]
380	msr_s	SYS_ICC_PMR_EL1, x20
381	mrs_s	x21, SYS_ICC_CTLR_EL1
382	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
383	dsb	sy				// Ensure priority change is seen by redistributor
384.L__skip_pmr_sync\@:
385alternative_else_nop_endif
386
387	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
388
389#ifdef CONFIG_ARM64_SW_TTBR0_PAN
390alternative_if_not ARM64_HAS_PAN
391	bl	__swpan_exit_el\el
392alternative_else_nop_endif
393#endif
394
395	.if	\el == 0
396	ldr	x23, [sp, #S_SP]		// load return stack pointer
397	msr	sp_el0, x23
398	tst	x22, #PSR_MODE32_BIT		// native task?
399	b.eq	3f
400
401#ifdef CONFIG_ARM64_ERRATUM_845719
402alternative_if ARM64_WORKAROUND_845719
403#ifdef CONFIG_PID_IN_CONTEXTIDR
404	mrs	x29, contextidr_el1
405	msr	contextidr_el1, x29
406#else
407	msr contextidr_el1, xzr
408#endif
409alternative_else_nop_endif
410#endif
4113:
412	scs_save tsk, x0
413
414	/* Ignore asynchronous tag check faults in the uaccess routines */
415	ldr	x0, [tsk, THREAD_SCTLR_USER]
416	clear_mte_async_tcf x0
417
418#ifdef CONFIG_ARM64_PTR_AUTH
419alternative_if ARM64_HAS_ADDRESS_AUTH
420	/*
421	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
422	 * alternatively install the user's IA. All other per-task keys and
423	 * SCTLR bits were updated on task switch.
424	 *
425	 * No kernel C function calls after this.
426	 */
427	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
428	__ptrauth_keys_install_user tsk, x0, x1, x2
429	b	2f
4301:
431	mrs	x0, sctlr_el1
432	bic	x0, x0, SCTLR_ELx_ENIA
433	msr	sctlr_el1, x0
4342:
435alternative_else_nop_endif
436#endif
437
438	mte_set_user_gcr tsk, x0, x1
439
440	apply_ssbd 0, x0, x1
441	.endif
442
443	msr	elr_el1, x21			// set up the return data
444	msr	spsr_el1, x22
445	ldp	x0, x1, [sp, #16 * 0]
446	ldp	x2, x3, [sp, #16 * 1]
447	ldp	x4, x5, [sp, #16 * 2]
448	ldp	x6, x7, [sp, #16 * 3]
449	ldp	x8, x9, [sp, #16 * 4]
450	ldp	x10, x11, [sp, #16 * 5]
451	ldp	x12, x13, [sp, #16 * 6]
452	ldp	x14, x15, [sp, #16 * 7]
453	ldp	x16, x17, [sp, #16 * 8]
454	ldp	x18, x19, [sp, #16 * 9]
455	ldp	x20, x21, [sp, #16 * 10]
456	ldp	x22, x23, [sp, #16 * 11]
457	ldp	x24, x25, [sp, #16 * 12]
458	ldp	x26, x27, [sp, #16 * 13]
459	ldp	x28, x29, [sp, #16 * 14]
460
461	.if	\el == 0
462alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
463	ldr	lr, [sp, #S_LR]
464	add	sp, sp, #S_FRAME_SIZE		// restore sp
465	eret
466alternative_else_nop_endif
467#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
468	bne	4f
469	msr	far_el1, x29
470	tramp_alias	x30, tramp_exit_native, x29
471	br	x30
4724:
473	tramp_alias	x30, tramp_exit_compat, x29
474	br	x30
475#endif
476	.else
477	ldr	lr, [sp, #S_LR]
478	add	sp, sp, #S_FRAME_SIZE		// restore sp
479
480	/* Ensure any device/NC reads complete */
481	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
482
483	eret
484	.endif
485	sb
486	.endm
487
488#ifdef CONFIG_ARM64_SW_TTBR0_PAN
489	/*
490	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
491	 * EL0, there is no need to check the state of TTBR0_EL1 since
492	 * accesses are always enabled.
493	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
494	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
495	 * user mappings.
496	 */
497SYM_CODE_START_LOCAL(__swpan_entry_el1)
498	mrs	x21, ttbr0_el1
499	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
500	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
501	b.eq	1f				// TTBR0 access already disabled
502	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
503SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
504	__uaccess_ttbr0_disable x21
5051:	ret
506SYM_CODE_END(__swpan_entry_el1)
507
508	/*
509	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
510	 * PAN bit checking.
511	 */
512SYM_CODE_START_LOCAL(__swpan_exit_el1)
513	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
514	__uaccess_ttbr0_enable x0, x1
5151:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
516	ret
517SYM_CODE_END(__swpan_exit_el1)
518
519SYM_CODE_START_LOCAL(__swpan_exit_el0)
520	__uaccess_ttbr0_enable x0, x1
521	/*
522	 * Enable errata workarounds only if returning to user. The only
523	 * workaround currently required for TTBR0_EL1 changes are for the
524	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
525	 * corruption).
526	 */
527	b	post_ttbr_update_workaround
528SYM_CODE_END(__swpan_exit_el0)
529#endif
530
531	.macro	irq_stack_entry
532	mov	x19, sp			// preserve the original sp
533#ifdef CONFIG_SHADOW_CALL_STACK
534	mov	x24, scs_sp		// preserve the original shadow stack
535#endif
536
537	/*
538	 * Compare sp with the base of the task stack.
539	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
540	 * and should switch to the irq stack.
541	 */
542	ldr	x25, [tsk, TSK_STACK]
543	eor	x25, x25, x19
544	and	x25, x25, #~(THREAD_SIZE - 1)
545	cbnz	x25, 9998f
546
547	ldr_this_cpu x25, irq_stack_ptr, x26
548	mov	x26, #IRQ_STACK_SIZE
549	add	x26, x25, x26
550
551	/* switch to the irq stack */
552	mov	sp, x26
553
554#ifdef CONFIG_SHADOW_CALL_STACK
555	/* also switch to the irq shadow stack */
556	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
557#endif
558
5599998:
560	.endm
561
562	/*
563	 * The callee-saved regs (x19-x29) should be preserved between
564	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
565	 * uses x20-x23 to store data for later use.
566	 */
567	.macro	irq_stack_exit
568	mov	sp, x19
569#ifdef CONFIG_SHADOW_CALL_STACK
570	mov	scs_sp, x24
571#endif
572	.endm
573
574/* GPRs used by entry code */
575tsk	.req	x28		// current thread_info
576
577/*
578 * Interrupt handling.
579 */
580	.macro	irq_handler, handler:req
581	ldr_l	x1, \handler
582	mov	x0, sp
583	irq_stack_entry
584	blr	x1
585	irq_stack_exit
586	.endm
587
588#ifdef CONFIG_ARM64_PSEUDO_NMI
589	/*
590	 * Set res to 0 if irqs were unmasked in interrupted context.
591	 * Otherwise set res to non-0 value.
592	 */
593	.macro	test_irqs_unmasked res:req, pmr:req
594alternative_if ARM64_HAS_IRQ_PRIO_MASKING
595	sub	\res, \pmr, #GIC_PRIO_IRQON
596alternative_else
597	mov	\res, xzr
598alternative_endif
599	.endm
600#endif
601
602	.macro	gic_prio_kentry_setup, tmp:req
603#ifdef CONFIG_ARM64_PSEUDO_NMI
604	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
605	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
606	msr_s	SYS_ICC_PMR_EL1, \tmp
607	alternative_else_nop_endif
608#endif
609	.endm
610
611	.macro el1_interrupt_handler, handler:req
612	enable_da_f
613
614	mov	x0, sp
615	bl	enter_el1_irq_or_nmi
616
617	irq_handler	\handler
618
619#ifdef CONFIG_PREEMPTION
620	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
621alternative_if ARM64_HAS_IRQ_PRIO_MASKING
622	/*
623	 * DA_F were cleared at start of handling. If anything is set in DAIF,
624	 * we come back from an NMI, so skip preemption
625	 */
626	mrs	x0, daif
627	orr	x24, x24, x0
628alternative_else_nop_endif
629	cbnz	x24, 1f				// preempt count != 0 || NMI return path
630	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
6311:
632#endif
633
634	mov	x0, sp
635	bl	exit_el1_irq_or_nmi
636	.endm
637
638	.macro el0_interrupt_handler, handler:req
639	user_exit_irqoff
640	enable_da_f
641
642	tbz	x22, #55, 1f
643	bl	do_el0_irq_bp_hardening
6441:
645	irq_handler	\handler
646	.endm
647
648	.text
649
650/*
651 * Exception vectors.
652 */
653	.pushsection ".entry.text", "ax"
654
655	.align	11
656SYM_CODE_START(vectors)
657	kernel_ventry	1, sync_invalid			// Synchronous EL1t
658	kernel_ventry	1, irq_invalid			// IRQ EL1t
659	kernel_ventry	1, fiq_invalid			// FIQ EL1t
660	kernel_ventry	1, error_invalid		// Error EL1t
661
662	kernel_ventry	1, sync				// Synchronous EL1h
663	kernel_ventry	1, irq				// IRQ EL1h
664	kernel_ventry	1, fiq_invalid			// FIQ EL1h
665	kernel_ventry	1, error			// Error EL1h
666
667	kernel_ventry	0, sync				// Synchronous 64-bit EL0
668	kernel_ventry	0, irq				// IRQ 64-bit EL0
669	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
670	kernel_ventry	0, error			// Error 64-bit EL0
671
672#ifdef CONFIG_COMPAT
673	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
674	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
675	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
676	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
677#else
678	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
679	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
680	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
681	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
682#endif
683SYM_CODE_END(vectors)
684
685#ifdef CONFIG_VMAP_STACK
686	/*
687	 * We detected an overflow in kernel_ventry, which switched to the
688	 * overflow stack. Stash the exception regs, and head to our overflow
689	 * handler.
690	 */
691__bad_stack:
692	/* Restore the original x0 value */
693	mrs	x0, tpidrro_el0
694
695	/*
696	 * Store the original GPRs to the new stack. The orginal SP (minus
697	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
698	 */
699	sub	sp, sp, #S_FRAME_SIZE
700	kernel_entry 1
701	mrs	x0, tpidr_el0
702	add	x0, x0, #S_FRAME_SIZE
703	str	x0, [sp, #S_SP]
704
705	/* Stash the regs for handle_bad_stack */
706	mov	x0, sp
707
708	/* Time to die */
709	bl	handle_bad_stack
710	ASM_BUG()
711#endif /* CONFIG_VMAP_STACK */
712
713/*
714 * Invalid mode handlers
715 */
716	.macro	inv_entry, el, reason, regsize = 64
717	kernel_entry \el, \regsize
718	mov	x0, sp
719	mov	x1, #\reason
720	mrs	x2, esr_el1
721	bl	bad_mode
722	ASM_BUG()
723	.endm
724
725SYM_CODE_START_LOCAL(el0_sync_invalid)
726	inv_entry 0, BAD_SYNC
727SYM_CODE_END(el0_sync_invalid)
728
729SYM_CODE_START_LOCAL(el0_irq_invalid)
730	inv_entry 0, BAD_IRQ
731SYM_CODE_END(el0_irq_invalid)
732
733SYM_CODE_START_LOCAL(el0_fiq_invalid)
734	inv_entry 0, BAD_FIQ
735SYM_CODE_END(el0_fiq_invalid)
736
737SYM_CODE_START_LOCAL(el0_error_invalid)
738	inv_entry 0, BAD_ERROR
739SYM_CODE_END(el0_error_invalid)
740
741#ifdef CONFIG_COMPAT
742SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
743	inv_entry 0, BAD_FIQ, 32
744SYM_CODE_END(el0_fiq_invalid_compat)
745#endif
746
747SYM_CODE_START_LOCAL(el1_sync_invalid)
748	inv_entry 1, BAD_SYNC
749SYM_CODE_END(el1_sync_invalid)
750
751SYM_CODE_START_LOCAL(el1_irq_invalid)
752	inv_entry 1, BAD_IRQ
753SYM_CODE_END(el1_irq_invalid)
754
755SYM_CODE_START_LOCAL(el1_fiq_invalid)
756	inv_entry 1, BAD_FIQ
757SYM_CODE_END(el1_fiq_invalid)
758
759SYM_CODE_START_LOCAL(el1_error_invalid)
760	inv_entry 1, BAD_ERROR
761SYM_CODE_END(el1_error_invalid)
762
763/*
764 * EL1 mode handlers.
765 */
766	.align	6
767SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
768	kernel_entry 1
769	mov	x0, sp
770	bl	el1_sync_handler
771	kernel_exit 1
772SYM_CODE_END(el1_sync)
773
774	.align	6
775SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
776	kernel_entry 1
777	el1_interrupt_handler handle_arch_irq
778	kernel_exit 1
779SYM_CODE_END(el1_irq)
780
781/*
782 * EL0 mode handlers.
783 */
784	.align	6
785SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
786	kernel_entry 0
787	mov	x0, sp
788	bl	el0_sync_handler
789	b	ret_to_user
790SYM_CODE_END(el0_sync)
791
792#ifdef CONFIG_COMPAT
793	.align	6
794SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
795	kernel_entry 0, 32
796	mov	x0, sp
797	bl	el0_sync_compat_handler
798	b	ret_to_user
799SYM_CODE_END(el0_sync_compat)
800
801	.align	6
802SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
803	kernel_entry 0, 32
804	b	el0_irq_naked
805SYM_CODE_END(el0_irq_compat)
806
807SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
808	kernel_entry 0, 32
809	b	el0_error_naked
810SYM_CODE_END(el0_error_compat)
811#endif
812
813	.align	6
814SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
815	kernel_entry 0
816el0_irq_naked:
817	el0_interrupt_handler handle_arch_irq
818	b	ret_to_user
819SYM_CODE_END(el0_irq)
820
821SYM_CODE_START_LOCAL(el1_error)
822	kernel_entry 1
823	mrs	x1, esr_el1
824	enable_dbg
825	mov	x0, sp
826	bl	do_serror
827	kernel_exit 1
828SYM_CODE_END(el1_error)
829
830SYM_CODE_START_LOCAL(el0_error)
831	kernel_entry 0
832el0_error_naked:
833	mrs	x25, esr_el1
834	user_exit_irqoff
835	enable_dbg
836	mov	x0, sp
837	mov	x1, x25
838	bl	do_serror
839	enable_da_f
840	b	ret_to_user
841SYM_CODE_END(el0_error)
842
843/*
844 * "slow" syscall return path.
845 */
846SYM_CODE_START_LOCAL(ret_to_user)
847	disable_daif
848	gic_prio_kentry_setup tmp=x3
849#ifdef CONFIG_TRACE_IRQFLAGS
850	bl	trace_hardirqs_off
851#endif
852	ldr	x19, [tsk, #TSK_TI_FLAGS]
853	and	x2, x19, #_TIF_WORK_MASK
854	cbnz	x2, work_pending
855finish_ret_to_user:
856	user_enter_irqoff
857	enable_step_tsk x19, x2
858#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
859	bl	stackleak_erase
860#endif
861	kernel_exit 0
862
863/*
864 * Ok, we need to do extra processing, enter the slow path.
865 */
866work_pending:
867	mov	x0, sp				// 'regs'
868	mov	x1, x19
869	bl	do_notify_resume
870	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
871	b	finish_ret_to_user
872SYM_CODE_END(ret_to_user)
873
874	.popsection				// .entry.text
875
876	// Move from tramp_pg_dir to swapper_pg_dir
877	.macro tramp_map_kernel, tmp
878	mrs	\tmp, ttbr1_el1
879	add	\tmp, \tmp, #(2 * PAGE_SIZE)
880	bic	\tmp, \tmp, #USER_ASID_FLAG
881	msr	ttbr1_el1, \tmp
882#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
883alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
884	/* ASID already in \tmp[63:48] */
885	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
886	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
887	/* 2MB boundary containing the vectors, so we nobble the walk cache */
888	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
889	isb
890	tlbi	vae1, \tmp
891	dsb	nsh
892alternative_else_nop_endif
893#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
894	.endm
895
896	// Move from swapper_pg_dir to tramp_pg_dir
897	.macro tramp_unmap_kernel, tmp
898	mrs	\tmp, ttbr1_el1
899	sub	\tmp, \tmp, #(2 * PAGE_SIZE)
900	orr	\tmp, \tmp, #USER_ASID_FLAG
901	msr	ttbr1_el1, \tmp
902	/*
903	 * We avoid running the post_ttbr_update_workaround here because
904	 * it's only needed by Cavium ThunderX, which requires KPTI to be
905	 * disabled.
906	 */
907	.endm
908
909	.macro tramp_data_page	dst
910	adr_l	\dst, .entry.tramp.text
911	sub	\dst, \dst, PAGE_SIZE
912	.endm
913
914	.macro tramp_data_read_var	dst, var
915#ifdef CONFIG_RANDOMIZE_BASE
916	tramp_data_page		\dst
917	add	\dst, \dst, #:lo12:__entry_tramp_data_\var
918	ldr	\dst, [\dst]
919#else
920	ldr	\dst, =\var
921#endif
922	.endm
923
924#define BHB_MITIGATION_NONE	0
925#define BHB_MITIGATION_LOOP	1
926#define BHB_MITIGATION_FW	2
927#define BHB_MITIGATION_INSN	3
928
929	.macro tramp_ventry, vector_start, regsize, kpti, bhb
930	.align	7
9311:
932	.if	\regsize == 64
933	msr	tpidrro_el0, x30	// Restored in kernel_ventry
934	.endif
935
936	.if	\bhb == BHB_MITIGATION_LOOP
937	/*
938	 * This sequence must appear before the first indirect branch. i.e. the
939	 * ret out of tramp_ventry. It appears here because x30 is free.
940	 */
941	__mitigate_spectre_bhb_loop	x30
942	.endif // \bhb == BHB_MITIGATION_LOOP
943
944	.if	\bhb == BHB_MITIGATION_INSN
945	clearbhb
946	isb
947	.endif // \bhb == BHB_MITIGATION_INSN
948
949	.if	\kpti == 1
950	/*
951	 * Defend against branch aliasing attacks by pushing a dummy
952	 * entry onto the return stack and using a RET instruction to
953	 * enter the full-fat kernel vectors.
954	 */
955	bl	2f
956	b	.
9572:
958	tramp_map_kernel	x30
959alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
960	tramp_data_read_var	x30, vectors
961alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
962	prfm	plil1strm, [x30, #(1b - \vector_start)]
963alternative_else_nop_endif
964
965	msr	vbar_el1, x30
966	isb
967	.else
968	ldr	x30, =vectors
969	.endif // \kpti == 1
970
971	.if	\bhb == BHB_MITIGATION_FW
972	/*
973	 * The firmware sequence must appear before the first indirect branch.
974	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
975	 * mapped to save/restore the registers the SMC clobbers.
976	 */
977	__mitigate_spectre_bhb_fw
978	.endif // \bhb == BHB_MITIGATION_FW
979
980	add	x30, x30, #(1b - \vector_start + 4)
981	ret
982.org 1b + 128	// Did we overflow the ventry slot?
983	.endm
984
985	.macro tramp_exit, regsize = 64
986	tramp_data_read_var	x30, this_cpu_vector
987	this_cpu_offset x29
988	ldr	x30, [x30, x29]
989
990	msr	vbar_el1, x30
991	ldr	lr, [sp, #S_LR]
992	tramp_unmap_kernel	x29
993	.if	\regsize == 64
994	mrs	x29, far_el1
995	.endif
996	add	sp, sp, #S_FRAME_SIZE		// restore sp
997	eret
998	sb
999	.endm
1000
1001	.macro	generate_tramp_vector,	kpti, bhb
1002.Lvector_start\@:
1003	.space	0x400
1004
1005	.rept	4
1006	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
1007	.endr
1008	.rept	4
1009	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
1010	.endr
1011	.endm
1012
1013#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1014/*
1015 * Exception vectors trampoline.
1016 * The order must match __bp_harden_el1_vectors and the
1017 * arm64_bp_harden_el1_vectors enum.
1018 */
1019	.pushsection ".entry.tramp.text", "ax"
1020	.align	11
1021SYM_CODE_START_NOALIGN(tramp_vectors)
1022#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1023	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
1024	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
1025	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
1026#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1027	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
1028SYM_CODE_END(tramp_vectors)
1029
1030SYM_CODE_START(tramp_exit_native)
1031	tramp_exit
1032SYM_CODE_END(tramp_exit_native)
1033
1034SYM_CODE_START(tramp_exit_compat)
1035	tramp_exit	32
1036SYM_CODE_END(tramp_exit_compat)
1037
1038	.ltorg
1039	.popsection				// .entry.tramp.text
1040#ifdef CONFIG_RANDOMIZE_BASE
1041	.pushsection ".rodata", "a"
1042	.align PAGE_SHIFT
1043SYM_DATA_START(__entry_tramp_data_start)
1044__entry_tramp_data_vectors:
1045	.quad	vectors
1046#ifdef CONFIG_ARM_SDE_INTERFACE
1047__entry_tramp_data___sdei_asm_handler:
1048	.quad	__sdei_asm_handler
1049#endif /* CONFIG_ARM_SDE_INTERFACE */
1050__entry_tramp_data_this_cpu_vector:
1051	.quad	this_cpu_vector
1052SYM_DATA_END(__entry_tramp_data_start)
1053	.popsection				// .rodata
1054#endif /* CONFIG_RANDOMIZE_BASE */
1055#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1056
1057/*
1058 * Exception vectors for spectre mitigations on entry from EL1 when
1059 * kpti is not in use.
1060 */
1061	.macro generate_el1_vector, bhb
1062.Lvector_start\@:
1063	kernel_ventry	1, sync_invalid			// Synchronous EL1t
1064	kernel_ventry	1, irq_invalid			// IRQ EL1t
1065	kernel_ventry	1, fiq_invalid			// FIQ EL1t
1066	kernel_ventry	1, error_invalid		// Error EL1t
1067
1068	kernel_ventry	1, sync				// Synchronous EL1h
1069	kernel_ventry	1, irq				// IRQ EL1h
1070	kernel_ventry	1, fiq_invalid			// FIQ EL1h
1071	kernel_ventry	1, error			// Error EL1h
1072
1073	.rept	4
1074	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
1075	.endr
1076	.rept 4
1077	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
1078	.endr
1079	.endm
1080
1081/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
1082	.pushsection ".entry.text", "ax"
1083	.align	11
1084SYM_CODE_START(__bp_harden_el1_vectors)
1085#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
1086	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
1087	generate_el1_vector	bhb=BHB_MITIGATION_FW
1088	generate_el1_vector	bhb=BHB_MITIGATION_INSN
1089#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
1090SYM_CODE_END(__bp_harden_el1_vectors)
1091	.popsection
1092
1093
1094/*
1095 * Register switch for AArch64. The callee-saved registers need to be saved
1096 * and restored. On entry:
1097 *   x0 = previous task_struct (must be preserved across the switch)
1098 *   x1 = next task_struct
1099 * Previous and next are guaranteed not to be the same.
1100 *
1101 */
1102SYM_FUNC_START(cpu_switch_to)
1103	mov	x10, #THREAD_CPU_CONTEXT
1104	add	x8, x0, x10
1105	mov	x9, sp
1106	stp	x19, x20, [x8], #16		// store callee-saved registers
1107	stp	x21, x22, [x8], #16
1108	stp	x23, x24, [x8], #16
1109	stp	x25, x26, [x8], #16
1110	stp	x27, x28, [x8], #16
1111	stp	x29, x9, [x8], #16
1112	str	lr, [x8]
1113	add	x8, x1, x10
1114	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1115	ldp	x21, x22, [x8], #16
1116	ldp	x23, x24, [x8], #16
1117	ldp	x25, x26, [x8], #16
1118	ldp	x27, x28, [x8], #16
1119	ldp	x29, x9, [x8], #16
1120	ldr	lr, [x8]
1121	mov	sp, x9
1122	msr	sp_el0, x1
1123	ptrauth_keys_install_kernel x1, x8, x9, x10
1124	scs_save x0, x8
1125	scs_load_current
1126	ret
1127SYM_FUNC_END(cpu_switch_to)
1128NOKPROBE(cpu_switch_to)
1129
1130/*
1131 * This is how we return from a fork.
1132 */
1133SYM_CODE_START(ret_from_fork)
1134	bl	schedule_tail
1135	cbz	x19, 1f				// not a kernel thread
1136	mov	x0, x20
1137	blr	x19
11381:	get_current_task tsk
1139	b	ret_to_user
1140SYM_CODE_END(ret_from_fork)
1141NOKPROBE(ret_from_fork)
1142
1143#ifdef CONFIG_ARM_SDE_INTERFACE
1144
1145#include <asm/sdei.h>
1146#include <uapi/linux/arm_sdei.h>
1147
1148.macro sdei_handler_exit exit_mode
1149	/* On success, this call never returns... */
1150	cmp	\exit_mode, #SDEI_EXIT_SMC
1151	b.ne	99f
1152	smc	#0
1153	b	.
115499:	hvc	#0
1155	b	.
1156.endm
1157
1158#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1159/*
1160 * The regular SDEI entry point may have been unmapped along with the rest of
1161 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1162 * argument accessible.
1163 *
1164 * This clobbers x4, __sdei_handler() will restore this from firmware's
1165 * copy.
1166 */
1167.ltorg
1168.pushsection ".entry.tramp.text", "ax"
1169SYM_CODE_START(__sdei_asm_entry_trampoline)
1170	mrs	x4, ttbr1_el1
1171	tbz	x4, #USER_ASID_BIT, 1f
1172
1173	tramp_map_kernel tmp=x4
1174	isb
1175	mov	x4, xzr
1176
1177	/*
1178	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1179	 * the kernel on exit.
1180	 */
11811:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1182
1183	tramp_data_read_var     x4, __sdei_asm_handler
1184	br	x4
1185SYM_CODE_END(__sdei_asm_entry_trampoline)
1186NOKPROBE(__sdei_asm_entry_trampoline)
1187
1188/*
1189 * Make the exit call and restore the original ttbr1_el1
1190 *
1191 * x0 & x1: setup for the exit API call
1192 * x2: exit_mode
1193 * x4: struct sdei_registered_event argument from registration time.
1194 */
1195SYM_CODE_START(__sdei_asm_exit_trampoline)
1196	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1197	cbnz	x4, 1f
1198
1199	tramp_unmap_kernel	tmp=x4
1200
12011:	sdei_handler_exit exit_mode=x2
1202SYM_CODE_END(__sdei_asm_exit_trampoline)
1203NOKPROBE(__sdei_asm_exit_trampoline)
1204	.ltorg
1205.popsection		// .entry.tramp.text
1206#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1207
1208/*
1209 * Software Delegated Exception entry point.
1210 *
1211 * x0: Event number
1212 * x1: struct sdei_registered_event argument from registration time.
1213 * x2: interrupted PC
1214 * x3: interrupted PSTATE
1215 * x4: maybe clobbered by the trampoline
1216 *
1217 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1218 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1219 * want them.
1220 */
1221SYM_CODE_START(__sdei_asm_handler)
1222	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1223	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1224	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1225	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1226	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1227	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1228	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1229	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1230	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1231	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1232	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1233	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1234	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1235	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1236	mov	x4, sp
1237	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1238
1239	mov	x19, x1
1240
1241	/* Store the registered-event for crash_smp_send_stop() */
1242	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1243	cbnz	w4, 1f
1244	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1245	b	2f
12461:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
12472:	str	x19, [x5]
1248
1249#ifdef CONFIG_VMAP_STACK
1250	/*
1251	 * entry.S may have been using sp as a scratch register, find whether
1252	 * this is a normal or critical event and switch to the appropriate
1253	 * stack for this CPU.
1254	 */
1255	cbnz	w4, 1f
1256	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1257	b	2f
12581:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12592:	mov	x6, #SDEI_STACK_SIZE
1260	add	x5, x5, x6
1261	mov	sp, x5
1262#endif
1263
1264#ifdef CONFIG_SHADOW_CALL_STACK
1265	/* Use a separate shadow call stack for normal and critical events */
1266	cbnz	w4, 3f
1267	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1268	b	4f
12693:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
12704:
1271#endif
1272
1273	/*
1274	 * We may have interrupted userspace, or a guest, or exit-from or
1275	 * return-to either of these. We can't trust sp_el0, restore it.
1276	 */
1277	mrs	x28, sp_el0
1278	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1279	msr	sp_el0, x0
1280
1281	/* If we interrupted the kernel point to the previous stack/frame. */
1282	and     x0, x3, #0xc
1283	mrs     x1, CurrentEL
1284	cmp     x0, x1
1285	csel	x29, x29, xzr, eq	// fp, or zero
1286	csel	x4, x2, xzr, eq		// elr, or zero
1287
1288	stp	x29, x4, [sp, #-16]!
1289	mov	x29, sp
1290
1291	add	x0, x19, #SDEI_EVENT_INTREGS
1292	mov	x1, x19
1293	bl	__sdei_handler
1294
1295	msr	sp_el0, x28
1296	/* restore regs >x17 that we clobbered */
1297	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1298	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1299	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1300	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1301	mov	sp, x1
1302
1303	mov	x1, x0			// address to complete_and_resume
1304	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1305	cmp	x0, #1
1306	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1307	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1308	csel	x0, x2, x3, ls
1309
1310	ldr_l	x2, sdei_exit_mode
1311
1312	/* Clear the registered-event seen by crash_smp_send_stop() */
1313	ldrb	w3, [x4, #SDEI_EVENT_PRIORITY]
1314	cbnz	w3, 1f
1315	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1316	b	2f
13171:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
13182:	str	xzr, [x5]
1319
1320alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1321	sdei_handler_exit exit_mode=x2
1322alternative_else_nop_endif
1323
1324#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1325	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1326	br	x5
1327#endif
1328SYM_CODE_END(__sdei_asm_handler)
1329NOKPROBE(__sdei_asm_handler)
1330
1331SYM_CODE_START(__sdei_handler_abort)
1332	mov_q	x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1333	adr	x1, 1f
1334	ldr_l	x2, sdei_exit_mode
1335	sdei_handler_exit exit_mode=x2
1336	// exit the handler and jump to the next instruction.
1337	// Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
13381:	ret
1339SYM_CODE_END(__sdei_handler_abort)
1340NOKPROBE(__sdei_handler_abort)
1341#endif /* CONFIG_ARM_SDE_INTERFACE */
1342