• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
6 *		Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/alternative.h>
25#include <asm/assembler.h>
26#include <asm/asm-offsets.h>
27#include <asm/cpufeature.h>
28#include <asm/errno.h>
29#include <asm/esr.h>
30#include <asm/memory.h>
31#include <asm/mmu.h>
32#include <asm/ptrace.h>
33#include <asm/thread_info.h>
34#include <asm/uaccess.h>
35#include <asm/asm-uaccess.h>
36#include <asm/unistd.h>
37
38/*
39 * Context tracking subsystem.  Used to instrument transitions
40 * between user and kernel mode.
41 */
42	.macro ct_user_exit, syscall = 0
43#ifdef CONFIG_CONTEXT_TRACKING
44	bl	context_tracking_user_exit
45	.if \syscall == 1
46	/*
47	 * Save/restore needed during syscalls.  Restore syscall arguments from
48	 * the values already saved on stack during kernel_entry.
49	 */
50	ldp	x0, x1, [sp]
51	ldp	x2, x3, [sp, #S_X2]
52	ldp	x4, x5, [sp, #S_X4]
53	ldp	x6, x7, [sp, #S_X6]
54	.endif
55#endif
56	.endm
57
58	.macro ct_user_enter
59#ifdef CONFIG_CONTEXT_TRACKING
60	bl	context_tracking_user_enter
61#endif
62	.endm
63
64/*
65 * Bad Abort numbers
66 *-----------------
67 */
68#define BAD_SYNC	0
69#define BAD_IRQ		1
70#define BAD_FIQ		2
71#define BAD_ERROR	3
72
73	.macro kernel_ventry, el, label, regsize = 64
74	.align 7
75#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
76alternative_if ARM64_UNMAP_KERNEL_AT_EL0
77	.if	\el == 0
78	.if	\regsize == 64
79	mrs	x30, tpidrro_el0
80	msr	tpidrro_el0, xzr
81	.else
82	mov	x30, xzr
83	.endif
84	.endif
85alternative_else_nop_endif
86#endif
87
88	sub	sp, sp, #S_FRAME_SIZE
89	b	el\()\el\()_\label
90	.endm
91
92	.macro tramp_alias, dst, sym
93	mov_q	\dst, TRAMP_VALIAS
94	add	\dst, \dst, #(\sym - .entry.tramp.text)
95	.endm
96
97	.macro	kernel_entry, el, regsize = 64
98	.if	\regsize == 32
99	mov	w0, w0				// zero upper 32 bits of x0
100	.endif
101	stp	x0, x1, [sp, #16 * 0]
102	stp	x2, x3, [sp, #16 * 1]
103	stp	x4, x5, [sp, #16 * 2]
104	stp	x6, x7, [sp, #16 * 3]
105	stp	x8, x9, [sp, #16 * 4]
106	stp	x10, x11, [sp, #16 * 5]
107	stp	x12, x13, [sp, #16 * 6]
108	stp	x14, x15, [sp, #16 * 7]
109	stp	x16, x17, [sp, #16 * 8]
110	stp	x18, x19, [sp, #16 * 9]
111	stp	x20, x21, [sp, #16 * 10]
112	stp	x22, x23, [sp, #16 * 11]
113	stp	x24, x25, [sp, #16 * 12]
114	stp	x26, x27, [sp, #16 * 13]
115	stp	x28, x29, [sp, #16 * 14]
116
117	.if	\el == 0
118	mrs	x21, sp_el0
119	mov	tsk, sp
120	and	tsk, tsk, #~(THREAD_SIZE - 1)	// Ensure MDSCR_EL1.SS is clear,
121	ldr	x19, [tsk, #TI_FLAGS]		// since we can unmask debug
122	disable_step_tsk x19, x20		// exceptions when scheduling.
123	.else
124	add	x21, sp, #S_FRAME_SIZE
125	get_thread_info tsk
126	/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
127	ldr	x20, [tsk, #TI_ADDR_LIMIT]
128	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
129	mov	x20, #TASK_SIZE_64
130	str	x20, [tsk, #TI_ADDR_LIMIT]
131	ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
132	.endif /* \el == 0 */
133	mrs	x22, elr_el1
134	mrs	x23, spsr_el1
135	stp	lr, x21, [sp, #S_LR]
136
137#ifdef CONFIG_ARM64_SW_TTBR0_PAN
138	/*
139	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
140	 * EL0, there is no need to check the state of TTBR0_EL1 since
141	 * accesses are always enabled.
142	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
143	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
144	 * user mappings.
145	 */
146alternative_if ARM64_HAS_PAN
147	b	1f				// skip TTBR0 PAN
148alternative_else_nop_endif
149
150	.if	\el != 0
151	mrs	x21, ttbr0_el1
152	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
153	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
154	b.eq	1f				// TTBR0 access already disabled
155	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
156	.endif
157
158	__uaccess_ttbr0_disable x21
1591:
160#endif
161
162	stp	x22, x23, [sp, #S_PC]
163
164	/*
165	 * Set syscallno to -1 by default (overridden later if real syscall).
166	 */
167	.if	\el == 0
168	mvn	x21, xzr
169	str	x21, [sp, #S_SYSCALLNO]
170	.endif
171
172	/*
173	 * Set sp_el0 to current thread_info.
174	 */
175	.if	\el == 0
176	msr	sp_el0, tsk
177	.endif
178
179	/*
180	 * Registers that may be useful after this macro is invoked:
181	 *
182	 * x21 - aborted SP
183	 * x22 - aborted PC
184	 * x23 - aborted PSTATE
185	*/
186	.endm
187
188	.macro	kernel_exit, el
189	.if	\el != 0
190	/* Restore the task's original addr_limit. */
191	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
192	str	x20, [tsk, #TI_ADDR_LIMIT]
193
194	/* No need to restore UAO, it will be restored from SPSR_EL1 */
195	.endif
196
197	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
198	.if	\el == 0
199	ct_user_enter
200	.endif
201
202#ifdef CONFIG_ARM64_SW_TTBR0_PAN
203	/*
204	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
205	 * PAN bit checking.
206	 */
207alternative_if ARM64_HAS_PAN
208	b	2f				// skip TTBR0 PAN
209alternative_else_nop_endif
210
211	.if	\el != 0
212	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
213	.endif
214
215	__uaccess_ttbr0_enable x0, x1
216
217	.if	\el == 0
218	/*
219	 * Enable errata workarounds only if returning to user. The only
220	 * workaround currently required for TTBR0_EL1 changes are for the
221	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
222	 * corruption).
223	 */
224	bl	post_ttbr_update_workaround
225	.endif
2261:
227	.if	\el != 0
228	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
229	.endif
2302:
231#endif
232
233	.if	\el == 0
234	ldr	x23, [sp, #S_SP]		// load return stack pointer
235	msr	sp_el0, x23
236	tst	x22, #PSR_MODE32_BIT		// native task?
237	b.eq	3f
238
239#ifdef CONFIG_ARM64_ERRATUM_845719
240alternative_if ARM64_WORKAROUND_845719
241#ifdef CONFIG_PID_IN_CONTEXTIDR
242	mrs	x29, contextidr_el1
243	msr	contextidr_el1, x29
244#else
245	msr contextidr_el1, xzr
246#endif
247alternative_else_nop_endif
248#endif
2493:
250	.endif
251
252	msr	elr_el1, x21			// set up the return data
253	msr	spsr_el1, x22
254	ldp	x0, x1, [sp, #16 * 0]
255	ldp	x2, x3, [sp, #16 * 1]
256	ldp	x4, x5, [sp, #16 * 2]
257	ldp	x6, x7, [sp, #16 * 3]
258	ldp	x8, x9, [sp, #16 * 4]
259	ldp	x10, x11, [sp, #16 * 5]
260	ldp	x12, x13, [sp, #16 * 6]
261	ldp	x14, x15, [sp, #16 * 7]
262	ldp	x16, x17, [sp, #16 * 8]
263	ldp	x18, x19, [sp, #16 * 9]
264	ldp	x20, x21, [sp, #16 * 10]
265	ldp	x22, x23, [sp, #16 * 11]
266	ldp	x24, x25, [sp, #16 * 12]
267	ldp	x26, x27, [sp, #16 * 13]
268	ldp	x28, x29, [sp, #16 * 14]
269	ldr	lr, [sp, #S_LR]
270	add	sp, sp, #S_FRAME_SIZE		// restore sp
271
272	.if	\el == 0
273alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
274#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
275	bne	4f
276	msr	far_el1, x30
277	tramp_alias	x30, tramp_exit_native
278	br	x30
2794:
280	tramp_alias	x30, tramp_exit_compat
281	br	x30
282#endif
283	.else
284	eret
285	.endif
286	.endm
287
288/*
289 * These are the registers used in the syscall handler, and allow us to
290 * have in theory up to 7 arguments to a function - x0 to x6.
291 *
292 * x7 is reserved for the system call number in 32-bit mode.
293 */
294sc_nr	.req	x25		// number of system calls
295scno	.req	x26		// syscall number
296stbl	.req	x27		// syscall table pointer
297tsk	.req	x28		// current thread_info
298
299/*
300 * Interrupt handling.
301 */
302	.macro	irq_handler
303	adrp	x1, handle_arch_irq
304	ldr	x1, [x1, #:lo12:handle_arch_irq]
305	mov	x0, sp
306	blr	x1
307	.endm
308
309	.text
310
311/*
312 * Exception vectors.
313 */
314
315	.align	11
316ENTRY(vectors)
317	kernel_ventry	1, sync_invalid			// Synchronous EL1t
318	kernel_ventry	1, irq_invalid			// IRQ EL1t
319	kernel_ventry	1, fiq_invalid			// FIQ EL1t
320	kernel_ventry	1, error_invalid		// Error EL1t
321
322	kernel_ventry	1, sync				// Synchronous EL1h
323	kernel_ventry	1, irq				// IRQ EL1h
324	kernel_ventry	1, fiq_invalid			// FIQ EL1h
325	kernel_ventry	1, error_invalid		// Error EL1h
326
327	kernel_ventry	0, sync				// Synchronous 64-bit EL0
328	kernel_ventry	0, irq				// IRQ 64-bit EL0
329	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
330	kernel_ventry	0, error_invalid		// Error 64-bit EL0
331
332#ifdef CONFIG_COMPAT
333	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
334	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
335	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
336	kernel_ventry	0, error_invalid_compat, 32	// Error 32-bit EL0
337#else
338	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
339	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
340	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
341	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
342#endif
343END(vectors)
344
345/*
346 * Invalid mode handlers
347 */
348	.macro	inv_entry, el, reason, regsize = 64
349	kernel_entry \el, \regsize
350	mov	x0, sp
351	mov	x1, #\reason
352	mrs	x2, esr_el1
353	b	bad_mode
354	.endm
355
356el0_sync_invalid:
357	inv_entry 0, BAD_SYNC
358ENDPROC(el0_sync_invalid)
359
360el0_irq_invalid:
361	inv_entry 0, BAD_IRQ
362ENDPROC(el0_irq_invalid)
363
364el0_fiq_invalid:
365	inv_entry 0, BAD_FIQ
366ENDPROC(el0_fiq_invalid)
367
368el0_error_invalid:
369	inv_entry 0, BAD_ERROR
370ENDPROC(el0_error_invalid)
371
372#ifdef CONFIG_COMPAT
373el0_fiq_invalid_compat:
374	inv_entry 0, BAD_FIQ, 32
375ENDPROC(el0_fiq_invalid_compat)
376
377el0_error_invalid_compat:
378	inv_entry 0, BAD_ERROR, 32
379ENDPROC(el0_error_invalid_compat)
380#endif
381
382el1_sync_invalid:
383	inv_entry 1, BAD_SYNC
384ENDPROC(el1_sync_invalid)
385
386el1_irq_invalid:
387	inv_entry 1, BAD_IRQ
388ENDPROC(el1_irq_invalid)
389
390el1_fiq_invalid:
391	inv_entry 1, BAD_FIQ
392ENDPROC(el1_fiq_invalid)
393
394el1_error_invalid:
395	inv_entry 1, BAD_ERROR
396ENDPROC(el1_error_invalid)
397
398/*
399 * EL1 mode handlers.
400 */
401	.align	6
402el1_sync:
403	kernel_entry 1
404	mrs	x1, esr_el1			// read the syndrome register
405	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
406	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
407	b.eq	el1_da
408	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
409	b.eq	el1_ia
410	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
411	b.eq	el1_undef
412	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
413	b.eq	el1_sp_pc
414	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
415	b.eq	el1_sp_pc
416	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
417	b.eq	el1_undef
418	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
419	b.ge	el1_dbg
420	b	el1_inv
421
422el1_ia:
423	/*
424	 * Fall through to the Data abort case
425	 */
426el1_da:
427	/*
428	 * Data abort handling
429	 */
430	mrs	x3, far_el1
431	enable_dbg
432	// re-enable interrupts if they were enabled in the aborted context
433	tbnz	x23, #7, 1f			// PSR_I_BIT
434	enable_irq
4351:
436	clear_address_tag x0, x3
437	mov	x2, sp				// struct pt_regs
438	bl	do_mem_abort
439
440	// disable interrupts before pulling preserved data off the stack
441	disable_irq
442	kernel_exit 1
443el1_sp_pc:
444	/*
445	 * Stack or PC alignment exception handling
446	 */
447	mrs	x0, far_el1
448	enable_dbg
449	mov	x2, sp
450	b	do_sp_pc_abort
451el1_undef:
452	/*
453	 * Undefined instruction
454	 */
455	enable_dbg
456	mov	x0, sp
457	b	do_undefinstr
458el1_dbg:
459	/*
460	 * Debug exception handling
461	 */
462	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
463	cinc	x24, x24, eq			// set bit '0'
464	tbz	x24, #0, el1_inv		// EL1 only
465	mrs	x0, far_el1
466	mov	x2, sp				// struct pt_regs
467	bl	do_debug_exception
468	kernel_exit 1
469el1_inv:
470	// TODO: add support for undefined instructions in kernel mode
471	enable_dbg
472	mov	x0, sp
473	mov	x1, #BAD_SYNC
474	mrs	x2, esr_el1
475	b	bad_mode
476ENDPROC(el1_sync)
477
478	.align	6
479el1_irq:
480	kernel_entry 1
481	enable_dbg
482#ifdef CONFIG_TRACE_IRQFLAGS
483	bl	trace_hardirqs_off
484#endif
485
486	irq_handler
487
488#ifdef CONFIG_PREEMPT
489	get_thread_info tsk
490	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
491	cbnz	w24, 1f				// preempt count != 0
492	ldr	x0, [tsk, #TI_FLAGS]		// get flags
493	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
494	bl	el1_preempt
4951:
496#endif
497#ifdef CONFIG_TRACE_IRQFLAGS
498	bl	trace_hardirqs_on
499#endif
500	kernel_exit 1
501ENDPROC(el1_irq)
502
503#ifdef CONFIG_PREEMPT
504el1_preempt:
505	mov	x24, lr
5061:	bl	preempt_schedule_irq		// irq en/disable is done inside
507	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
508	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
509	ret	x24
510#endif
511
512/*
513 * EL0 mode handlers.
514 */
515	.align	6
516el0_sync:
517	kernel_entry 0
518	mrs	x25, esr_el1			// read the syndrome register
519	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
520	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
521	b.eq	el0_svc
522	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
523	b.eq	el0_da
524	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
525	b.eq	el0_ia
526	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
527	b.eq	el0_fpsimd_acc
528	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
529	b.eq	el0_fpsimd_exc
530	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
531	b.eq	el0_sys
532	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
533	b.eq	el0_sp_pc
534	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
535	b.eq	el0_sp_pc
536	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
537	b.eq	el0_undef
538	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
539	b.ge	el0_dbg
540	b	el0_inv
541
542#ifdef CONFIG_COMPAT
543	.align	6
544el0_sync_compat:
545	kernel_entry 0, 32
546	mrs	x25, esr_el1			// read the syndrome register
547	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
548	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
549	b.eq	el0_svc_compat
550	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
551	b.eq	el0_da
552	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
553	b.eq	el0_ia
554	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
555	b.eq	el0_fpsimd_acc
556	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
557	b.eq	el0_fpsimd_exc
558	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
559	b.eq	el0_undef
560	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
561	b.eq	el0_undef
562	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
563	b.eq	el0_undef
564	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
565	b.eq	el0_undef
566	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
567	b.eq	el0_undef
568	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
569	b.eq	el0_undef
570	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
571	b.ge	el0_dbg
572	b	el0_inv
573el0_svc_compat:
574	/*
575	 * AArch32 syscall handling
576	 */
577	adr	stbl, compat_sys_call_table	// load compat syscall table pointer
578	uxtw	scno, w7			// syscall number in w7 (r7)
579	mov     sc_nr, #__NR_compat_syscalls
580	b	el0_svc_naked
581
582	.align	6
583el0_irq_compat:
584	kernel_entry 0, 32
585	b	el0_irq_naked
586#endif
587
588el0_da:
589	/*
590	 * Data abort handling
591	 */
592	mrs	x26, far_el1
593	// enable interrupts before calling the main handler
594	enable_dbg_and_irq
595	ct_user_exit
596	clear_address_tag x0, x26
597	mov	x1, x25
598	mov	x2, sp
599	bl	do_mem_abort
600	b	ret_to_user
601el0_ia:
602	/*
603	 * Instruction abort handling
604	 */
605	mrs	x26, far_el1
606	// enable interrupts before calling the main handler
607	enable_dbg_and_irq
608	ct_user_exit
609	mov	x0, x26
610	mov	x1, x25
611	mov	x2, sp
612	bl	do_mem_abort
613	b	ret_to_user
614el0_fpsimd_acc:
615	/*
616	 * Floating Point or Advanced SIMD access
617	 */
618	enable_dbg
619	ct_user_exit
620	mov	x0, x25
621	mov	x1, sp
622	bl	do_fpsimd_acc
623	b	ret_to_user
624el0_fpsimd_exc:
625	/*
626	 * Floating Point or Advanced SIMD exception
627	 */
628	enable_dbg
629	ct_user_exit
630	mov	x0, x25
631	mov	x1, sp
632	bl	do_fpsimd_exc
633	b	ret_to_user
634el0_sp_pc:
635	/*
636	 * Stack or PC alignment exception handling
637	 */
638	mrs	x26, far_el1
639	// enable interrupts before calling the main handler
640	enable_dbg_and_irq
641	ct_user_exit
642	mov	x0, x26
643	mov	x1, x25
644	mov	x2, sp
645	bl	do_sp_pc_abort
646	b	ret_to_user
647el0_undef:
648	/*
649	 * Undefined instruction
650	 */
651	// enable interrupts before calling the main handler
652	enable_dbg_and_irq
653	ct_user_exit
654	mov	x0, sp
655	bl	do_undefinstr
656	b	ret_to_user
657el0_sys:
658	/*
659	 * System instructions, for trapped cache maintenance instructions
660	 */
661	enable_dbg_and_irq
662	ct_user_exit
663	mov	x0, x25
664	mov	x1, sp
665	bl	do_sysinstr
666	b	ret_to_user
667el0_dbg:
668	/*
669	 * Debug exception handling
670	 */
671	tbnz	x24, #0, el0_inv		// EL0 only
672	mrs	x0, far_el1
673	mov	x1, x25
674	mov	x2, sp
675	bl	do_debug_exception
676	enable_dbg
677	ct_user_exit
678	b	ret_to_user
679el0_inv:
680	enable_dbg
681	ct_user_exit
682	mov	x0, sp
683	mov	x1, #BAD_SYNC
684	mrs	x2, esr_el1
685	bl	bad_el0_sync
686	b	ret_to_user
687ENDPROC(el0_sync)
688
689	.align	6
690el0_irq:
691	kernel_entry 0
692el0_irq_naked:
693	enable_dbg
694#ifdef CONFIG_TRACE_IRQFLAGS
695	bl	trace_hardirqs_off
696#endif
697
698	ct_user_exit
699	irq_handler
700
701#ifdef CONFIG_TRACE_IRQFLAGS
702	bl	trace_hardirqs_on
703#endif
704	b	ret_to_user
705ENDPROC(el0_irq)
706
707/*
708 * Register switch for AArch64. The callee-saved registers need to be saved
709 * and restored. On entry:
710 *   x0 = previous task_struct (must be preserved across the switch)
711 *   x1 = next task_struct
712 * Previous and next are guaranteed not to be the same.
713 *
714 */
715ENTRY(cpu_switch_to)
716	add	x8, x0, #THREAD_CPU_CONTEXT
717	mov	x9, sp
718	stp	x19, x20, [x8], #16		// store callee-saved registers
719	stp	x21, x22, [x8], #16
720	stp	x23, x24, [x8], #16
721	stp	x25, x26, [x8], #16
722	stp	x27, x28, [x8], #16
723	stp	x29, x9, [x8], #16
724	str	lr, [x8]
725	add	x8, x1, #THREAD_CPU_CONTEXT
726	ldp	x19, x20, [x8], #16		// restore callee-saved registers
727	ldp	x21, x22, [x8], #16
728	ldp	x23, x24, [x8], #16
729	ldp	x25, x26, [x8], #16
730	ldp	x27, x28, [x8], #16
731	ldp	x29, x9, [x8], #16
732	ldr	lr, [x8]
733	mov	sp, x9
734	and	x9, x9, #~(THREAD_SIZE - 1)
735	msr	sp_el0, x9
736	ret
737ENDPROC(cpu_switch_to)
738
739/*
740 * This is the fast syscall return path.  We do as little as possible here,
741 * and this includes saving x0 back into the kernel stack.
742 */
743ret_fast_syscall:
744	disable_irq				// disable interrupts
745	str	x0, [sp, #S_X0]			// returned x0
746	ldr	x1, [tsk, #TI_FLAGS]		// re-check for syscall tracing
747	and	x2, x1, #_TIF_SYSCALL_WORK
748	cbnz	x2, ret_fast_syscall_trace
749	and	x2, x1, #_TIF_WORK_MASK
750	cbnz	x2, work_pending
751	enable_step_tsk x1, x2
752	kernel_exit 0
753ret_fast_syscall_trace:
754	enable_irq				// enable interrupts
755	b	__sys_trace_return_skipped	// we already saved x0
756
757/*
758 * Ok, we need to do extra processing, enter the slow path.
759 */
760work_pending:
761	tbnz	x1, #TIF_NEED_RESCHED, work_resched
762	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
763	ldr	x2, [sp, #S_PSTATE]
764	mov	x0, sp				// 'regs'
765	tst	x2, #PSR_MODE_MASK		// user mode regs?
766	b.ne	no_work_pending			// returning to kernel
767	enable_irq				// enable interrupts for do_notify_resume()
768	bl	do_notify_resume
769	b	ret_to_user
770work_resched:
771	bl	schedule
772
773/*
774 * "slow" syscall return path.
775 */
776ret_to_user:
777	disable_irq				// disable interrupts
778	ldr	x1, [tsk, #TI_FLAGS]
779	and	x2, x1, #_TIF_WORK_MASK
780	cbnz	x2, work_pending
781	enable_step_tsk x1, x2
782no_work_pending:
783	kernel_exit 0
784ENDPROC(ret_to_user)
785
786/*
787 * This is how we return from a fork.
788 */
789ENTRY(ret_from_fork)
790	bl	schedule_tail
791	cbz	x19, 1f				// not a kernel thread
792	mov	x0, x20
793	blr	x19
7941:	get_thread_info tsk
795	b	ret_to_user
796ENDPROC(ret_from_fork)
797
798/*
799 * SVC handler.
800 */
801	.align	6
802el0_svc:
803	adrp	stbl, sys_call_table		// load syscall table pointer
804	uxtw	scno, w8			// syscall number in w8
805	mov	sc_nr, #__NR_syscalls
806el0_svc_naked:					// compat entry point
807	stp	x0, scno, [sp, #S_ORIG_X0]	// save the original x0 and syscall number
808	enable_dbg_and_irq
809	ct_user_exit 1
810
811	ldr	x16, [tsk, #TI_FLAGS]		// check for syscall hooks
812	tst	x16, #_TIF_SYSCALL_WORK
813	b.ne	__sys_trace
814	cmp     scno, sc_nr                     // check upper syscall limit
815	b.hs	ni_sys
816	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
817	blr	x16				// call sys_* routine
818	b	ret_fast_syscall
819ni_sys:
820	mov	x0, sp
821	bl	do_ni_syscall
822	b	ret_fast_syscall
823ENDPROC(el0_svc)
824
825	/*
826	 * This is the really slow path.  We're going to be doing context
827	 * switches, and waiting for our parent to respond.
828	 */
829__sys_trace:
830	mov	w0, #-1				// set default errno for
831	cmp     scno, x0			// user-issued syscall(-1)
832	b.ne	1f
833	mov	x0, #-ENOSYS
834	str	x0, [sp, #S_X0]
8351:	mov	x0, sp
836	bl	syscall_trace_enter
837	cmp	w0, #-1				// skip the syscall?
838	b.eq	__sys_trace_return_skipped
839	uxtw	scno, w0			// syscall number (possibly new)
840	mov	x1, sp				// pointer to regs
841	cmp	scno, sc_nr			// check upper syscall limit
842	b.hs	__ni_sys_trace
843	ldp	x0, x1, [sp]			// restore the syscall args
844	ldp	x2, x3, [sp, #S_X2]
845	ldp	x4, x5, [sp, #S_X4]
846	ldp	x6, x7, [sp, #S_X6]
847	ldr	x16, [stbl, scno, lsl #3]	// address in the syscall table
848	blr	x16				// call sys_* routine
849
850__sys_trace_return:
851	str	x0, [sp, #S_X0]			// save returned x0
852__sys_trace_return_skipped:
853	mov	x0, sp
854	bl	syscall_trace_exit
855	b	ret_to_user
856
857__ni_sys_trace:
858	mov	x0, sp
859	bl	do_ni_syscall
860	b	__sys_trace_return
861
862#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
863/*
864 * Exception vectors trampoline.
865 */
866	.pushsection ".entry.tramp.text", "ax"
867
868	.macro tramp_map_kernel, tmp
869	mrs	\tmp, ttbr1_el1
870	sub	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
871	bic	\tmp, \tmp, #USER_ASID_FLAG
872	msr	ttbr1_el1, \tmp
873#ifdef CONFIG_ARCH_MSM8996
874	/* ASID already in \tmp[63:48] */
875	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
876	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
877	/* 2MB boundary containing the vectors, so we nobble the walk cache */
878	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
879	isb
880	tlbi	vae1, \tmp
881	dsb	nsh
882#endif /* CONFIG_ARCH_MSM8996 */
883	.endm
884
885	.macro tramp_unmap_kernel, tmp
886	mrs	\tmp, ttbr1_el1
887	add	\tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE)
888	orr	\tmp, \tmp, #USER_ASID_FLAG
889	msr	ttbr1_el1, \tmp
890	/*
891	 * We avoid running the post_ttbr_update_workaround here because the
892	 * user and kernel ASIDs don't have conflicting mappings, so any
893	 * "blessing" as described in:
894	 *
895	 *   http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com
896	 *
897	 * will not hurt correctness. Whilst this may partially defeat the
898	 * point of using split ASIDs in the first place, it avoids
899	 * the hit of invalidating the entire I-cache on every return to
900	 * userspace.
901	 */
902	.endm
903
904	.macro tramp_ventry, regsize = 64
905	.align	7
9061:
907	.if	\regsize == 64
908	msr	tpidrro_el0, x30	// Restored in kernel_ventry
909	.endif
910	bl	2f
911	b	.
9122:
913	tramp_map_kernel	x30
914#ifdef CONFIG_RANDOMIZE_BASE
915	adr	x30, tramp_vectors + PAGE_SIZE
916#ifndef CONFIG_ARCH_MSM8996
917	isb
918#endif
919	ldr	x30, [x30]
920#else
921	ldr	x30, =vectors
922#endif
923	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
924	msr	vbar_el1, x30
925	add	x30, x30, #(1b - tramp_vectors)
926	isb
927	ret
928	.endm
929
930	.macro tramp_exit, regsize = 64
931	adr	x30, tramp_vectors
932	msr	vbar_el1, x30
933	tramp_unmap_kernel	x30
934	.if	\regsize == 64
935	mrs	x30, far_el1
936	.endif
937	eret
938	.endm
939
940	.align	11
941ENTRY(tramp_vectors)
942	.space	0x400
943
944	tramp_ventry
945	tramp_ventry
946	tramp_ventry
947	tramp_ventry
948
949	tramp_ventry	32
950	tramp_ventry	32
951	tramp_ventry	32
952	tramp_ventry	32
953END(tramp_vectors)
954
955ENTRY(tramp_exit_native)
956	tramp_exit
957END(tramp_exit_native)
958
959ENTRY(tramp_exit_compat)
960	tramp_exit	32
961END(tramp_exit_compat)
962
963	.ltorg
964	.popsection				// .entry.tramp.text
965#ifdef CONFIG_RANDOMIZE_BASE
966	.pushsection ".rodata", "a"
967	.align PAGE_SHIFT
968	.globl	__entry_tramp_data_start
969__entry_tramp_data_start:
970	.quad	vectors
971	.popsection				// .rodata
972#endif /* CONFIG_RANDOMIZE_BASE */
973#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
974
975/*
976 * Special system call wrappers.
977 */
978ENTRY(sys_rt_sigreturn_wrapper)
979	mov	x0, sp
980	b	sys_rt_sigreturn
981ENDPROC(sys_rt_sigreturn_wrapper)
982