• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Low-level exception handling code
4 *
5 * Copyright (C) 2012 ARM Ltd.
6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
7 *		Will Deacon <will.deacon@arm.com>
8 */
9
10#include <linux/arm-smccc.h>
11#include <linux/init.h>
12#include <linux/linkage.h>
13
14#include <asm/alternative.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/asm_pointer_auth.h>
18#include <asm/bug.h>
19#include <asm/cpufeature.h>
20#include <asm/errno.h>
21#include <asm/esr.h>
22#include <asm/irq.h>
23#include <asm/memory.h>
24#include <asm/mmu.h>
25#include <asm/processor.h>
26#include <asm/ptrace.h>
27#include <asm/scs.h>
28#include <asm/thread_info.h>
29#include <asm/asm-uaccess.h>
30#include <asm/unistd.h>
31
32/*
33 * Context tracking and irqflag tracing need to instrument transitions between
34 * user and kernel mode.
35 */
36	.macro user_exit_irqoff
37#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
38	bl	enter_from_user_mode
39#endif
40	.endm
41
42	.macro user_enter_irqoff
43#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
44	bl	exit_to_user_mode
45#endif
46	.endm
47
48	.macro	clear_gp_regs
49	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
50	mov	x\n, xzr
51	.endr
52	.endm
53
54/*
55 * Bad Abort numbers
56 *-----------------
57 */
58#define BAD_SYNC	0
59#define BAD_IRQ		1
60#define BAD_FIQ		2
61#define BAD_ERROR	3
62
63	.macro kernel_ventry, el, label, regsize = 64
64	.align 7
65.Lventry_start\@:
66	.if	\el == 0
67	/*
68	 * This must be the first instruction of the EL0 vector entries. It is
69	 * skipped by the trampoline vectors, to trigger the cleanup.
70	 */
71	b	.Lskip_tramp_vectors_cleanup\@
72	.if	\regsize == 64
73	mrs	x30, tpidrro_el0
74	msr	tpidrro_el0, xzr
75	.else
76	mov	x30, xzr
77	.endif
78.Lskip_tramp_vectors_cleanup\@:
79	.endif
80
81	sub	sp, sp, #S_FRAME_SIZE
82#ifdef CONFIG_VMAP_STACK
83	/*
84	 * Test whether the SP has overflowed, without corrupting a GPR.
85	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
86	 * should always be zero.
87	 */
88	add	sp, sp, x0			// sp' = sp + x0
89	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
90	tbnz	x0, #THREAD_SHIFT, 0f
91	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
92	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
93	b	el\()\el\()_\label
94
950:
96	/*
97	 * Either we've just detected an overflow, or we've taken an exception
98	 * while on the overflow stack. Either way, we won't return to
99	 * userspace, and can clobber EL0 registers to free up GPRs.
100	 */
101
102	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
103	msr	tpidr_el0, x0
104
105	/* Recover the original x0 value and stash it in tpidrro_el0 */
106	sub	x0, sp, x0
107	msr	tpidrro_el0, x0
108
109	/* Switch to the overflow stack */
110	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
111
112	/*
113	 * Check whether we were already on the overflow stack. This may happen
114	 * after panic() re-enables interrupts.
115	 */
116	mrs	x0, tpidr_el0			// sp of interrupted context
117	sub	x0, sp, x0			// delta with top of overflow stack
118	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
119	b.ne	__bad_stack			// no? -> bad stack pointer
120
121	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
122	sub	sp, sp, x0
123	mrs	x0, tpidrro_el0
124#endif
125	b	el\()\el\()_\label
126.org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
127	.endm
128
129	.macro tramp_alias, dst, sym, tmp
130	mov_q	\dst, TRAMP_VALIAS
131	adr_l	\tmp, \sym
132	add	\dst, \dst, \tmp
133	adr_l	\tmp, .entry.tramp.text
134	sub	\dst, \dst, \tmp
135	.endm
136
137	/*
138	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
139	 * them if required.
140	 */
141	.macro	apply_ssbd, state, tmp1, tmp2
142alternative_cb	spectre_v4_patch_fw_mitigation_enable
143	b	.L__asm_ssbd_skip\@		// Patched to NOP
144alternative_cb_end
145	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
146	cbz	\tmp2,	.L__asm_ssbd_skip\@
147	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
148	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
149	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
150	mov	w1, #\state
151alternative_cb	smccc_patch_fw_mitigation_conduit
152	nop					// Patched to SMC/HVC #0
153alternative_cb_end
154.L__asm_ssbd_skip\@:
155	.endm
156
157	/* Check for MTE asynchronous tag check faults */
158	.macro check_mte_async_tcf, tmp, ti_flags
159#ifdef CONFIG_ARM64_MTE
160	.arch_extension lse
161alternative_if_not ARM64_MTE
162	b	1f
163alternative_else_nop_endif
164	mrs_s	\tmp, SYS_TFSRE0_EL1
165	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
166	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
167	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
168	add	\ti_flags, tsk, #TSK_TI_FLAGS
169	stset	\tmp, [\ti_flags]
170	msr_s	SYS_TFSRE0_EL1, xzr
1711:
172#endif
173	.endm
174
175	/* Clear the MTE asynchronous tag check faults */
176	.macro clear_mte_async_tcf
177#ifdef CONFIG_ARM64_MTE
178alternative_if ARM64_MTE
179	dsb	ish
180	msr_s	SYS_TFSRE0_EL1, xzr
181alternative_else_nop_endif
182#endif
183	.endm
184
185	.macro	kernel_entry, el, regsize = 64
186	.if	\regsize == 32
187	mov	w0, w0				// zero upper 32 bits of x0
188	.endif
189	stp	x0, x1, [sp, #16 * 0]
190	stp	x2, x3, [sp, #16 * 1]
191	stp	x4, x5, [sp, #16 * 2]
192	stp	x6, x7, [sp, #16 * 3]
193	stp	x8, x9, [sp, #16 * 4]
194	stp	x10, x11, [sp, #16 * 5]
195	stp	x12, x13, [sp, #16 * 6]
196	stp	x14, x15, [sp, #16 * 7]
197	stp	x16, x17, [sp, #16 * 8]
198	stp	x18, x19, [sp, #16 * 9]
199	stp	x20, x21, [sp, #16 * 10]
200	stp	x22, x23, [sp, #16 * 11]
201	stp	x24, x25, [sp, #16 * 12]
202	stp	x26, x27, [sp, #16 * 13]
203	stp	x28, x29, [sp, #16 * 14]
204
205	.if	\el == 0
206	clear_gp_regs
207	mrs	x21, sp_el0
208	ldr_this_cpu	tsk, __entry_task, x20
209	msr	sp_el0, tsk
210
211	/*
212	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
213	 * when scheduling.
214	 */
215	ldr	x19, [tsk, #TSK_TI_FLAGS]
216	disable_step_tsk x19, x20
217
218	/* Check for asynchronous tag check faults in user space */
219	check_mte_async_tcf x22, x23
220	apply_ssbd 1, x22, x23
221
222	ptrauth_keys_install_kernel tsk, x20, x22, x23
223
224	scs_load tsk, x20
225	.else
226	add	x21, sp, #S_FRAME_SIZE
227	get_current_task tsk
228	/* Save the task's original addr_limit and set USER_DS */
229	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
230	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
231	mov	x20, #USER_DS
232	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
233	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
234	.endif /* \el == 0 */
235	mrs	x22, elr_el1
236	mrs	x23, spsr_el1
237	stp	lr, x21, [sp, #S_LR]
238
239	/*
240	 * In order to be able to dump the contents of struct pt_regs at the
241	 * time the exception was taken (in case we attempt to walk the call
242	 * stack later), chain it together with the stack frames.
243	 */
244	.if \el == 0
245	stp	xzr, xzr, [sp, #S_STACKFRAME]
246	.else
247	stp	x29, x22, [sp, #S_STACKFRAME]
248	.endif
249	add	x29, sp, #S_STACKFRAME
250
251#ifdef CONFIG_ARM64_SW_TTBR0_PAN
252alternative_if_not ARM64_HAS_PAN
253	bl	__swpan_entry_el\el
254alternative_else_nop_endif
255#endif
256
257	stp	x22, x23, [sp, #S_PC]
258
259	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
260	.if	\el == 0
261	mov	w21, #NO_SYSCALL
262	str	w21, [sp, #S_SYSCALLNO]
263	.endif
264
265	/* Save pmr */
266alternative_if ARM64_HAS_IRQ_PRIO_MASKING
267	mrs_s	x20, SYS_ICC_PMR_EL1
268	str	x20, [sp, #S_PMR_SAVE]
269	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
270	msr_s	SYS_ICC_PMR_EL1, x20
271alternative_else_nop_endif
272
273	/* Re-enable tag checking (TCO set on exception entry) */
274#ifdef CONFIG_ARM64_MTE
275alternative_if ARM64_MTE
276	SET_PSTATE_TCO(0)
277alternative_else_nop_endif
278#endif
279
280	/*
281	 * Registers that may be useful after this macro is invoked:
282	 *
283	 * x20 - ICC_PMR_EL1
284	 * x21 - aborted SP
285	 * x22 - aborted PC
286	 * x23 - aborted PSTATE
287	*/
288	.endm
289
290	.macro	kernel_exit, el
291	.if	\el != 0
292	disable_daif
293
294	/* Restore the task's original addr_limit. */
295	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
296	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
297
298	/* No need to restore UAO, it will be restored from SPSR_EL1 */
299	.endif
300
301	/* Restore pmr */
302alternative_if ARM64_HAS_IRQ_PRIO_MASKING
303	ldr	x20, [sp, #S_PMR_SAVE]
304	msr_s	SYS_ICC_PMR_EL1, x20
305	mrs_s	x21, SYS_ICC_CTLR_EL1
306	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
307	dsb	sy				// Ensure priority change is seen by redistributor
308.L__skip_pmr_sync\@:
309alternative_else_nop_endif
310
311	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
312
313#ifdef CONFIG_ARM64_SW_TTBR0_PAN
314alternative_if_not ARM64_HAS_PAN
315	bl	__swpan_exit_el\el
316alternative_else_nop_endif
317#endif
318
319	.if	\el == 0
320	ldr	x23, [sp, #S_SP]		// load return stack pointer
321	msr	sp_el0, x23
322	tst	x22, #PSR_MODE32_BIT		// native task?
323	b.eq	3f
324
325#ifdef CONFIG_ARM64_ERRATUM_845719
326alternative_if ARM64_WORKAROUND_845719
327#ifdef CONFIG_PID_IN_CONTEXTIDR
328	mrs	x29, contextidr_el1
329	msr	contextidr_el1, x29
330#else
331	msr contextidr_el1, xzr
332#endif
333alternative_else_nop_endif
334#endif
3353:
336	scs_save tsk, x0
337
338	/* No kernel C function calls after this as user keys are set. */
339	ptrauth_keys_install_user tsk, x0, x1, x2
340
341	apply_ssbd 0, x0, x1
342	.endif
343
344	msr	elr_el1, x21			// set up the return data
345	msr	spsr_el1, x22
346	ldp	x0, x1, [sp, #16 * 0]
347	ldp	x2, x3, [sp, #16 * 1]
348	ldp	x4, x5, [sp, #16 * 2]
349	ldp	x6, x7, [sp, #16 * 3]
350	ldp	x8, x9, [sp, #16 * 4]
351	ldp	x10, x11, [sp, #16 * 5]
352	ldp	x12, x13, [sp, #16 * 6]
353	ldp	x14, x15, [sp, #16 * 7]
354	ldp	x16, x17, [sp, #16 * 8]
355	ldp	x18, x19, [sp, #16 * 9]
356	ldp	x20, x21, [sp, #16 * 10]
357	ldp	x22, x23, [sp, #16 * 11]
358	ldp	x24, x25, [sp, #16 * 12]
359	ldp	x26, x27, [sp, #16 * 13]
360	ldp	x28, x29, [sp, #16 * 14]
361
362	.if	\el == 0
363alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
364	ldr	lr, [sp, #S_LR]
365	add	sp, sp, #S_FRAME_SIZE		// restore sp
366	eret
367alternative_else_nop_endif
368#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
369	bne	4f
370	msr	far_el1, x29
371	tramp_alias	x30, tramp_exit_native, x29
372	br	x30
3734:
374	tramp_alias	x30, tramp_exit_compat, x29
375	br	x30
376#endif
377	.else
378	ldr	lr, [sp, #S_LR]
379	add	sp, sp, #S_FRAME_SIZE		// restore sp
380
381	/* Ensure any device/NC reads complete */
382	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
383
384	eret
385	.endif
386	sb
387	.endm
388
389#ifdef CONFIG_ARM64_SW_TTBR0_PAN
390	/*
391	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
392	 * EL0, there is no need to check the state of TTBR0_EL1 since
393	 * accesses are always enabled.
394	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
395	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
396	 * user mappings.
397	 */
398SYM_CODE_START_LOCAL(__swpan_entry_el1)
399	mrs	x21, ttbr0_el1
400	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
401	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
402	b.eq	1f				// TTBR0 access already disabled
403	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
404SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
405	__uaccess_ttbr0_disable x21
4061:	ret
407SYM_CODE_END(__swpan_entry_el1)
408
409	/*
410	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
411	 * PAN bit checking.
412	 */
413SYM_CODE_START_LOCAL(__swpan_exit_el1)
414	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
415	__uaccess_ttbr0_enable x0, x1
4161:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
417	ret
418SYM_CODE_END(__swpan_exit_el1)
419
420SYM_CODE_START_LOCAL(__swpan_exit_el0)
421	__uaccess_ttbr0_enable x0, x1
422	/*
423	 * Enable errata workarounds only if returning to user. The only
424	 * workaround currently required for TTBR0_EL1 changes are for the
425	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
426	 * corruption).
427	 */
428	b	post_ttbr_update_workaround
429SYM_CODE_END(__swpan_exit_el0)
430#endif
431
432	.macro	irq_stack_entry
433	mov	x19, sp			// preserve the original sp
434#ifdef CONFIG_SHADOW_CALL_STACK
435	mov	x24, scs_sp		// preserve the original shadow stack
436#endif
437
438	/*
439	 * Compare sp with the base of the task stack.
440	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
441	 * and should switch to the irq stack.
442	 */
443	ldr	x25, [tsk, TSK_STACK]
444	eor	x25, x25, x19
445	and	x25, x25, #~(THREAD_SIZE - 1)
446	cbnz	x25, 9998f
447
448	ldr_this_cpu x25, irq_stack_ptr, x26
449	mov	x26, #IRQ_STACK_SIZE
450	add	x26, x25, x26
451
452	/* switch to the irq stack */
453	mov	sp, x26
454
455#ifdef CONFIG_SHADOW_CALL_STACK
456	/* also switch to the irq shadow stack */
457	adr_this_cpu scs_sp, irq_shadow_call_stack, x26
458#endif
459
4609998:
461	.endm
462
463	/*
464	 * The callee-saved regs (x19-x29) should be preserved between
465	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
466	 * uses x20-x23 to store data for later use.
467	 */
468	.macro	irq_stack_exit
469	mov	sp, x19
470#ifdef CONFIG_SHADOW_CALL_STACK
471	mov	scs_sp, x24
472#endif
473	.endm
474
475/* GPRs used by entry code */
476tsk	.req	x28		// current thread_info
477
478/*
479 * Interrupt handling.
480 */
481	.macro	irq_handler, handler:req
482	ldr_l	x1, \handler
483	mov	x0, sp
484	irq_stack_entry
485	blr	x1
486	irq_stack_exit
487	.endm
488
489#ifdef CONFIG_ARM64_PSEUDO_NMI
490	/*
491	 * Set res to 0 if irqs were unmasked in interrupted context.
492	 * Otherwise set res to non-0 value.
493	 */
494	.macro	test_irqs_unmasked res:req, pmr:req
495alternative_if ARM64_HAS_IRQ_PRIO_MASKING
496	sub	\res, \pmr, #GIC_PRIO_IRQON
497alternative_else
498	mov	\res, xzr
499alternative_endif
500	.endm
501#endif
502
503	.macro	gic_prio_kentry_setup, tmp:req
504#ifdef CONFIG_ARM64_PSEUDO_NMI
505	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
506	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
507	msr_s	SYS_ICC_PMR_EL1, \tmp
508	alternative_else_nop_endif
509#endif
510	.endm
511
512	.macro el1_interrupt_handler, handler:req
513	enable_da_f
514
515	mov	x0, sp
516	bl	enter_el1_irq_or_nmi
517
518	irq_handler	\handler
519
520#ifdef CONFIG_PREEMPTION
521	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
522alternative_if ARM64_HAS_IRQ_PRIO_MASKING
523	/*
524	 * DA_F were cleared at start of handling. If anything is set in DAIF,
525	 * we come back from an NMI, so skip preemption
526	 */
527	mrs	x0, daif
528	orr	x24, x24, x0
529alternative_else_nop_endif
530	cbnz	x24, 1f				// preempt count != 0 || NMI return path
531	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
5321:
533#endif
534
535	mov	x0, sp
536	bl	exit_el1_irq_or_nmi
537	.endm
538
539	.macro el0_interrupt_handler, handler:req
540	user_exit_irqoff
541	enable_da_f
542
543	tbz	x22, #55, 1f
544	bl	do_el0_irq_bp_hardening
5451:
546	irq_handler	\handler
547	.endm
548
549	.text
550
551/*
552 * Exception vectors.
553 */
554	.pushsection ".entry.text", "ax"
555
556	.align	11
557SYM_CODE_START(vectors)
558	kernel_ventry	1, sync_invalid			// Synchronous EL1t
559	kernel_ventry	1, irq_invalid			// IRQ EL1t
560	kernel_ventry	1, fiq_invalid			// FIQ EL1t
561	kernel_ventry	1, error_invalid		// Error EL1t
562
563	kernel_ventry	1, sync				// Synchronous EL1h
564	kernel_ventry	1, irq				// IRQ EL1h
565	kernel_ventry	1, fiq_invalid			// FIQ EL1h
566	kernel_ventry	1, error			// Error EL1h
567
568	kernel_ventry	0, sync				// Synchronous 64-bit EL0
569	kernel_ventry	0, irq				// IRQ 64-bit EL0
570	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
571	kernel_ventry	0, error			// Error 64-bit EL0
572
573#ifdef CONFIG_COMPAT
574	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
575	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
576	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
577	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
578#else
579	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
580	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
581	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
582	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
583#endif
584SYM_CODE_END(vectors)
585
586#ifdef CONFIG_VMAP_STACK
587	/*
588	 * We detected an overflow in kernel_ventry, which switched to the
589	 * overflow stack. Stash the exception regs, and head to our overflow
590	 * handler.
591	 */
592__bad_stack:
593	/* Restore the original x0 value */
594	mrs	x0, tpidrro_el0
595
596	/*
597	 * Store the original GPRs to the new stack. The orginal SP (minus
598	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
599	 */
600	sub	sp, sp, #S_FRAME_SIZE
601	kernel_entry 1
602	mrs	x0, tpidr_el0
603	add	x0, x0, #S_FRAME_SIZE
604	str	x0, [sp, #S_SP]
605
606	/* Stash the regs for handle_bad_stack */
607	mov	x0, sp
608
609	/* Time to die */
610	bl	handle_bad_stack
611	ASM_BUG()
612#endif /* CONFIG_VMAP_STACK */
613
614/*
615 * Invalid mode handlers
616 */
617	.macro	inv_entry, el, reason, regsize = 64
618	kernel_entry \el, \regsize
619	mov	x0, sp
620	mov	x1, #\reason
621	mrs	x2, esr_el1
622	bl	bad_mode
623	ASM_BUG()
624	.endm
625
626SYM_CODE_START_LOCAL(el0_sync_invalid)
627	inv_entry 0, BAD_SYNC
628SYM_CODE_END(el0_sync_invalid)
629
630SYM_CODE_START_LOCAL(el0_irq_invalid)
631	inv_entry 0, BAD_IRQ
632SYM_CODE_END(el0_irq_invalid)
633
634SYM_CODE_START_LOCAL(el0_fiq_invalid)
635	inv_entry 0, BAD_FIQ
636SYM_CODE_END(el0_fiq_invalid)
637
638SYM_CODE_START_LOCAL(el0_error_invalid)
639	inv_entry 0, BAD_ERROR
640SYM_CODE_END(el0_error_invalid)
641
642#ifdef CONFIG_COMPAT
643SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
644	inv_entry 0, BAD_FIQ, 32
645SYM_CODE_END(el0_fiq_invalid_compat)
646#endif
647
648SYM_CODE_START_LOCAL(el1_sync_invalid)
649	inv_entry 1, BAD_SYNC
650SYM_CODE_END(el1_sync_invalid)
651
652SYM_CODE_START_LOCAL(el1_irq_invalid)
653	inv_entry 1, BAD_IRQ
654SYM_CODE_END(el1_irq_invalid)
655
656SYM_CODE_START_LOCAL(el1_fiq_invalid)
657	inv_entry 1, BAD_FIQ
658SYM_CODE_END(el1_fiq_invalid)
659
660SYM_CODE_START_LOCAL(el1_error_invalid)
661	inv_entry 1, BAD_ERROR
662SYM_CODE_END(el1_error_invalid)
663
664/*
665 * EL1 mode handlers.
666 */
667	.align	6
668SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
669	kernel_entry 1
670	mov	x0, sp
671	bl	el1_sync_handler
672	kernel_exit 1
673SYM_CODE_END(el1_sync)
674
675	.align	6
676SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
677	kernel_entry 1
678	el1_interrupt_handler handle_arch_irq
679	kernel_exit 1
680SYM_CODE_END(el1_irq)
681
682/*
683 * EL0 mode handlers.
684 */
685	.align	6
686SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
687	kernel_entry 0
688	mov	x0, sp
689	bl	el0_sync_handler
690	b	ret_to_user
691SYM_CODE_END(el0_sync)
692
693#ifdef CONFIG_COMPAT
694	.align	6
695SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
696	kernel_entry 0, 32
697	mov	x0, sp
698	bl	el0_sync_compat_handler
699	b	ret_to_user
700SYM_CODE_END(el0_sync_compat)
701
702	.align	6
703SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
704	kernel_entry 0, 32
705	b	el0_irq_naked
706SYM_CODE_END(el0_irq_compat)
707
708SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
709	kernel_entry 0, 32
710	b	el0_error_naked
711SYM_CODE_END(el0_error_compat)
712#endif
713
714	.align	6
715SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
716	kernel_entry 0
717el0_irq_naked:
718	el0_interrupt_handler handle_arch_irq
719	b	ret_to_user
720SYM_CODE_END(el0_irq)
721
722SYM_CODE_START_LOCAL(el1_error)
723	kernel_entry 1
724	mrs	x1, esr_el1
725	enable_dbg
726	mov	x0, sp
727	bl	do_serror
728	kernel_exit 1
729SYM_CODE_END(el1_error)
730
731SYM_CODE_START_LOCAL(el0_error)
732	kernel_entry 0
733el0_error_naked:
734	mrs	x25, esr_el1
735	user_exit_irqoff
736	enable_dbg
737	mov	x0, sp
738	mov	x1, x25
739	bl	do_serror
740	enable_da_f
741	b	ret_to_user
742SYM_CODE_END(el0_error)
743
744/*
745 * "slow" syscall return path.
746 */
747SYM_CODE_START_LOCAL(ret_to_user)
748	disable_daif
749	gic_prio_kentry_setup tmp=x3
750#ifdef CONFIG_TRACE_IRQFLAGS
751	bl	trace_hardirqs_off
752#endif
753	ldr	x19, [tsk, #TSK_TI_FLAGS]
754	and	x2, x19, #_TIF_WORK_MASK
755	cbnz	x2, work_pending
756finish_ret_to_user:
757	user_enter_irqoff
758	/* Ignore asynchronous tag check faults in the uaccess routines */
759	clear_mte_async_tcf
760	enable_step_tsk x19, x2
761#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
762	bl	stackleak_erase
763#endif
764	kernel_exit 0
765
766/*
767 * Ok, we need to do extra processing, enter the slow path.
768 */
769work_pending:
770	mov	x0, sp				// 'regs'
771	mov	x1, x19
772	bl	do_notify_resume
773	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
774	b	finish_ret_to_user
775SYM_CODE_END(ret_to_user)
776
777	.popsection				// .entry.text
778
779	// Move from tramp_pg_dir to swapper_pg_dir
780	.macro tramp_map_kernel, tmp
781	mrs	\tmp, ttbr1_el1
782	add	\tmp, \tmp, #(2 * PAGE_SIZE)
783	bic	\tmp, \tmp, #USER_ASID_FLAG
784	msr	ttbr1_el1, \tmp
785#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
786alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
787	/* ASID already in \tmp[63:48] */
788	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
789	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
790	/* 2MB boundary containing the vectors, so we nobble the walk cache */
791	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
792	isb
793	tlbi	vae1, \tmp
794	dsb	nsh
795alternative_else_nop_endif
796#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
797	.endm
798
799	// Move from swapper_pg_dir to tramp_pg_dir
800	.macro tramp_unmap_kernel, tmp
801	mrs	\tmp, ttbr1_el1
802	sub	\tmp, \tmp, #(2 * PAGE_SIZE)
803	orr	\tmp, \tmp, #USER_ASID_FLAG
804	msr	ttbr1_el1, \tmp
805	/*
806	 * We avoid running the post_ttbr_update_workaround here because
807	 * it's only needed by Cavium ThunderX, which requires KPTI to be
808	 * disabled.
809	 */
810	.endm
811
812	.macro tramp_data_page	dst
813	adr_l	\dst, .entry.tramp.text
814	sub	\dst, \dst, PAGE_SIZE
815	.endm
816
817	.macro tramp_data_read_var	dst, var
818#ifdef CONFIG_RANDOMIZE_BASE
819	tramp_data_page		\dst
820	add	\dst, \dst, #:lo12:__entry_tramp_data_\var
821	ldr	\dst, [\dst]
822#else
823	ldr	\dst, =\var
824#endif
825	.endm
826
827#define BHB_MITIGATION_NONE	0
828#define BHB_MITIGATION_LOOP	1
829#define BHB_MITIGATION_FW	2
830#define BHB_MITIGATION_INSN	3
831
832	.macro tramp_ventry, vector_start, regsize, kpti, bhb
833	.align	7
8341:
835	.if	\regsize == 64
836	msr	tpidrro_el0, x30	// Restored in kernel_ventry
837	.endif
838
839	.if	\bhb == BHB_MITIGATION_LOOP
840	/*
841	 * This sequence must appear before the first indirect branch. i.e. the
842	 * ret out of tramp_ventry. It appears here because x30 is free.
843	 */
844	__mitigate_spectre_bhb_loop	x30
845	.endif // \bhb == BHB_MITIGATION_LOOP
846
847	.if	\bhb == BHB_MITIGATION_INSN
848	clearbhb
849	isb
850	.endif // \bhb == BHB_MITIGATION_INSN
851
852	.if	\kpti == 1
853	/*
854	 * Defend against branch aliasing attacks by pushing a dummy
855	 * entry onto the return stack and using a RET instruction to
856	 * enter the full-fat kernel vectors.
857	 */
858	bl	2f
859	b	.
8602:
861	tramp_map_kernel	x30
862alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
863	tramp_data_read_var	x30, vectors
864alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
865	prfm	plil1strm, [x30, #(1b - \vector_start)]
866alternative_else_nop_endif
867
868	msr	vbar_el1, x30
869	isb
870	.else
871	ldr	x30, =vectors
872	.endif // \kpti == 1
873
874	.if	\bhb == BHB_MITIGATION_FW
875	/*
876	 * The firmware sequence must appear before the first indirect branch.
877	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
878	 * mapped to save/restore the registers the SMC clobbers.
879	 */
880	__mitigate_spectre_bhb_fw
881	.endif // \bhb == BHB_MITIGATION_FW
882
883	add	x30, x30, #(1b - \vector_start + 4)
884	ret
885.org 1b + 128	// Did we overflow the ventry slot?
886	.endm
887
888	.macro tramp_exit, regsize = 64
889	tramp_data_read_var	x30, this_cpu_vector
890	this_cpu_offset x29
891	ldr	x30, [x30, x29]
892
893	msr	vbar_el1, x30
894	ldr	lr, [sp, #S_LR]
895	tramp_unmap_kernel	x29
896	.if	\regsize == 64
897	mrs	x29, far_el1
898	.endif
899	add	sp, sp, #S_FRAME_SIZE		// restore sp
900	eret
901	sb
902	.endm
903
904	.macro	generate_tramp_vector,	kpti, bhb
905.Lvector_start\@:
906	.space	0x400
907
908	.rept	4
909	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
910	.endr
911	.rept	4
912	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
913	.endr
914	.endm
915
916#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
917/*
918 * Exception vectors trampoline.
919 * The order must match __bp_harden_el1_vectors and the
920 * arm64_bp_harden_el1_vectors enum.
921 */
922	.pushsection ".entry.tramp.text", "ax"
923	.align	11
924SYM_CODE_START_NOALIGN(tramp_vectors)
925#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
926	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
927	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
928	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
929#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
930	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
931SYM_CODE_END(tramp_vectors)
932
933SYM_CODE_START(tramp_exit_native)
934	tramp_exit
935SYM_CODE_END(tramp_exit_native)
936
937SYM_CODE_START(tramp_exit_compat)
938	tramp_exit	32
939SYM_CODE_END(tramp_exit_compat)
940
941	.ltorg
942	.popsection				// .entry.tramp.text
943#ifdef CONFIG_RANDOMIZE_BASE
944	.pushsection ".rodata", "a"
945	.align PAGE_SHIFT
946SYM_DATA_START(__entry_tramp_data_start)
947__entry_tramp_data_vectors:
948	.quad	vectors
949#ifdef CONFIG_ARM_SDE_INTERFACE
950__entry_tramp_data___sdei_asm_handler:
951	.quad	__sdei_asm_handler
952#endif /* CONFIG_ARM_SDE_INTERFACE */
953__entry_tramp_data_this_cpu_vector:
954	.quad	this_cpu_vector
955SYM_DATA_END(__entry_tramp_data_start)
956	.popsection				// .rodata
957#endif /* CONFIG_RANDOMIZE_BASE */
958#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
959
960/*
961 * Exception vectors for spectre mitigations on entry from EL1 when
962 * kpti is not in use.
963 */
964	.macro generate_el1_vector, bhb
965.Lvector_start\@:
966	kernel_ventry	1, sync_invalid			// Synchronous EL1t
967	kernel_ventry	1, irq_invalid			// IRQ EL1t
968	kernel_ventry	1, fiq_invalid			// FIQ EL1t
969	kernel_ventry	1, error_invalid		// Error EL1t
970
971	kernel_ventry	1, sync				// Synchronous EL1h
972	kernel_ventry	1, irq				// IRQ EL1h
973	kernel_ventry	1, fiq_invalid			// FIQ EL1h
974	kernel_ventry	1, error			// Error EL1h
975
976	.rept	4
977	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
978	.endr
979	.rept 4
980	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
981	.endr
982	.endm
983
984/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
985	.pushsection ".entry.text", "ax"
986	.align	11
987SYM_CODE_START(__bp_harden_el1_vectors)
988#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
989	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
990	generate_el1_vector	bhb=BHB_MITIGATION_FW
991	generate_el1_vector	bhb=BHB_MITIGATION_INSN
992#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
993SYM_CODE_END(__bp_harden_el1_vectors)
994	.popsection
995
996
997/*
998 * Register switch for AArch64. The callee-saved registers need to be saved
999 * and restored. On entry:
1000 *   x0 = previous task_struct (must be preserved across the switch)
1001 *   x1 = next task_struct
1002 * Previous and next are guaranteed not to be the same.
1003 *
1004 */
1005SYM_FUNC_START(cpu_switch_to)
1006	mov	x10, #THREAD_CPU_CONTEXT
1007	add	x8, x0, x10
1008	mov	x9, sp
1009	stp	x19, x20, [x8], #16		// store callee-saved registers
1010	stp	x21, x22, [x8], #16
1011	stp	x23, x24, [x8], #16
1012	stp	x25, x26, [x8], #16
1013	stp	x27, x28, [x8], #16
1014	stp	x29, x9, [x8], #16
1015	str	lr, [x8]
1016	add	x8, x1, x10
1017	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1018	ldp	x21, x22, [x8], #16
1019	ldp	x23, x24, [x8], #16
1020	ldp	x25, x26, [x8], #16
1021	ldp	x27, x28, [x8], #16
1022	ldp	x29, x9, [x8], #16
1023	ldr	lr, [x8]
1024	mov	sp, x9
1025	msr	sp_el0, x1
1026	ptrauth_keys_install_kernel x1, x8, x9, x10
1027	scs_save x0, x8
1028	scs_load x1, x8
1029	ret
1030SYM_FUNC_END(cpu_switch_to)
1031NOKPROBE(cpu_switch_to)
1032
1033/*
1034 * This is how we return from a fork.
1035 */
1036SYM_CODE_START(ret_from_fork)
1037	bl	schedule_tail
1038	cbz	x19, 1f				// not a kernel thread
1039	mov	x0, x20
1040	blr	x19
10411:	get_current_task tsk
1042	b	ret_to_user
1043SYM_CODE_END(ret_from_fork)
1044NOKPROBE(ret_from_fork)
1045
1046#ifdef CONFIG_ARM_SDE_INTERFACE
1047
1048#include <asm/sdei.h>
1049#include <uapi/linux/arm_sdei.h>
1050
1051.macro sdei_handler_exit exit_mode
1052	/* On success, this call never returns... */
1053	cmp	\exit_mode, #SDEI_EXIT_SMC
1054	b.ne	99f
1055	smc	#0
1056	b	.
105799:	hvc	#0
1058	b	.
1059.endm
1060
1061#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1062/*
1063 * The regular SDEI entry point may have been unmapped along with the rest of
1064 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1065 * argument accessible.
1066 *
1067 * This clobbers x4, __sdei_handler() will restore this from firmware's
1068 * copy.
1069 */
1070.ltorg
1071.pushsection ".entry.tramp.text", "ax"
1072SYM_CODE_START(__sdei_asm_entry_trampoline)
1073	mrs	x4, ttbr1_el1
1074	tbz	x4, #USER_ASID_BIT, 1f
1075
1076	tramp_map_kernel tmp=x4
1077	isb
1078	mov	x4, xzr
1079
1080	/*
1081	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1082	 * the kernel on exit.
1083	 */
10841:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1085
1086	tramp_data_read_var     x4, __sdei_asm_handler
1087	br	x4
1088SYM_CODE_END(__sdei_asm_entry_trampoline)
1089NOKPROBE(__sdei_asm_entry_trampoline)
1090
1091/*
1092 * Make the exit call and restore the original ttbr1_el1
1093 *
1094 * x0 & x1: setup for the exit API call
1095 * x2: exit_mode
1096 * x4: struct sdei_registered_event argument from registration time.
1097 */
1098SYM_CODE_START(__sdei_asm_exit_trampoline)
1099	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1100	cbnz	x4, 1f
1101
1102	tramp_unmap_kernel	tmp=x4
1103
11041:	sdei_handler_exit exit_mode=x2
1105SYM_CODE_END(__sdei_asm_exit_trampoline)
1106NOKPROBE(__sdei_asm_exit_trampoline)
1107	.ltorg
1108.popsection		// .entry.tramp.text
1109#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1110
1111/*
1112 * Software Delegated Exception entry point.
1113 *
1114 * x0: Event number
1115 * x1: struct sdei_registered_event argument from registration time.
1116 * x2: interrupted PC
1117 * x3: interrupted PSTATE
1118 * x4: maybe clobbered by the trampoline
1119 *
1120 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1121 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1122 * want them.
1123 */
1124SYM_CODE_START(__sdei_asm_handler)
1125	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1126	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1127	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1128	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1129	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1130	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1131	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1132	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1133	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1134	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1135	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1136	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1137	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1138	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1139	mov	x4, sp
1140	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1141
1142	mov	x19, x1
1143
1144#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1145	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1146#endif
1147
1148#ifdef CONFIG_VMAP_STACK
1149	/*
1150	 * entry.S may have been using sp as a scratch register, find whether
1151	 * this is a normal or critical event and switch to the appropriate
1152	 * stack for this CPU.
1153	 */
1154	cbnz	w4, 1f
1155	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1156	b	2f
11571:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
11582:	mov	x6, #SDEI_STACK_SIZE
1159	add	x5, x5, x6
1160	mov	sp, x5
1161#endif
1162
1163#ifdef CONFIG_SHADOW_CALL_STACK
1164	/* Use a separate shadow call stack for normal and critical events */
1165	cbnz	w4, 3f
1166	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal, tmp=x6
1167	b	4f
11683:	adr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical, tmp=x6
11694:
1170#endif
1171
1172	/*
1173	 * We may have interrupted userspace, or a guest, or exit-from or
1174	 * return-to either of these. We can't trust sp_el0, restore it.
1175	 */
1176	mrs	x28, sp_el0
1177	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1178	msr	sp_el0, x0
1179
1180	/* If we interrupted the kernel point to the previous stack/frame. */
1181	and     x0, x3, #0xc
1182	mrs     x1, CurrentEL
1183	cmp     x0, x1
1184	csel	x29, x29, xzr, eq	// fp, or zero
1185	csel	x4, x2, xzr, eq		// elr, or zero
1186
1187	stp	x29, x4, [sp, #-16]!
1188	mov	x29, sp
1189
1190	add	x0, x19, #SDEI_EVENT_INTREGS
1191	mov	x1, x19
1192	bl	__sdei_handler
1193
1194	msr	sp_el0, x28
1195	/* restore regs >x17 that we clobbered */
1196	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1197	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1198	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1199	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1200	mov	sp, x1
1201
1202	mov	x1, x0			// address to complete_and_resume
1203	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1204	cmp	x0, #1
1205	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1206	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1207	csel	x0, x2, x3, ls
1208
1209	ldr_l	x2, sdei_exit_mode
1210
1211alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1212	sdei_handler_exit exit_mode=x2
1213alternative_else_nop_endif
1214
1215#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1216	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1217	br	x5
1218#endif
1219SYM_CODE_END(__sdei_asm_handler)
1220NOKPROBE(__sdei_asm_handler)
1221#endif /* CONFIG_ARM_SDE_INTERFACE */
1222