• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2023 Institute of Parallel And Distributed Systems (IPADS), Shanghai Jiao Tong University (SJTU)
3 * Licensed under the Mulan PSL v2.
4 * You can use this software according to the terms and conditions of the Mulan PSL v2.
5 * You may obtain a copy of Mulan PSL v2 at:
6 *     http://license.coscl.org.cn/MulanPSL2
7 * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
8 * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
9 * PURPOSE.
10 * See the Mulan PSL v2 for more details.
11 */
12#include <common/asm.h>
13#include <common/debug.h>
14#include <common/vars.h>
15#include <arch/machine/registers.h>
16#include <arch/machine/esr.h>
17#include <arch/machine/smp.h>
18
19#include "irq_entry.h"
20
21.extern syscall_table
22.extern hook_syscall
23.extern finish_switch
24.extern do_pending_resched
25
26.macro	exception_entry	label
27	/* Each entry of the exeception table should be 0x80 aligned */
28	.align	7
29	b	\label
30.endm
31
32/* See more details about the bias in registers.h */
33.macro	exception_enter
34	sub	sp, sp, #ARCH_EXEC_CONT_SIZE
35	stp	x0, x1, [sp, #16 * 0]
36	stp	x2, x3, [sp, #16 * 1]
37	stp	x4, x5, [sp, #16 * 2]
38	stp	x6, x7, [sp, #16 * 3]
39	stp	x8, x9, [sp, #16 * 4]
40	stp	x10, x11, [sp, #16 * 5]
41	stp	x12, x13, [sp, #16 * 6]
42	stp	x14, x15, [sp, #16 * 7]
43	stp	x16, x17, [sp, #16 * 8]
44	stp	x18, x19, [sp, #16 * 9]
45	stp	x20, x21, [sp, #16 * 10]
46	stp	x22, x23, [sp, #16 * 11]
47	stp	x24, x25, [sp, #16 * 12]
48	stp	x26, x27, [sp, #16 * 13]
49	stp	x28, x29, [sp, #16 * 14]
50	mrs	x21, sp_el0
51	mrs	x22, elr_el1
52	mrs	x23, spsr_el1
53	stp	x30, x21, [sp, #16 * 15]
54	stp	x22, x23, [sp, #16 * 16]
55.endm
56
57.macro	exception_exit
58	ldp	x22, x23, [sp, #16 * 16]
59	ldp	x30, x21, [sp, #16 * 15]
60	msr	sp_el0, x21
61	msr	elr_el1, x22
62	msr	spsr_el1, x23
63	ldp	x0, x1, [sp, #16 * 0]
64	ldp	x2, x3, [sp, #16 * 1]
65	ldp	x4, x5, [sp, #16 * 2]
66	ldp	x6, x7, [sp, #16 * 3]
67	ldp	x8, x9, [sp, #16 * 4]
68	ldp	x10, x11, [sp, #16 * 5]
69	ldp	x12, x13, [sp, #16 * 6]
70	ldp	x14, x15, [sp, #16 * 7]
71	ldp	x16, x17, [sp, #16 * 8]
72	ldp	x18, x19, [sp, #16 * 9]
73	ldp	x20, x21, [sp, #16 * 10]
74	ldp	x22, x23, [sp, #16 * 11]
75	ldp	x24, x25, [sp, #16 * 12]
76	ldp	x26, x27, [sp, #16 * 13]
77	ldp	x28, x29, [sp, #16 * 14]
78
79	add	sp, sp, #ARCH_EXEC_CONT_SIZE
80	eret
81.endm
82
83.macro switch_to_cpu_stack
84	mrs     x24, TPIDR_EL1
85	add	x24, x24, #OFFSET_LOCAL_CPU_STACK
86	ldr	x24, [x24]
87	mov	sp, x24
88.endm
89
90.macro switch_to_thread_ctx
91	mrs     x24, TPIDR_EL1
92	add	x24, x24, #OFFSET_CURRENT_EXEC_CTX
93	ldr	x24, [x24]
94	mov	sp, x24
95.endm
96
97/*
98 * Vector table offsets from vector table base address from ARMv8 Manual
99 *	Address		|	Exception Type		| 	Description
100 * ============================================================================
101 *	VBAR_Eln+0x000	|	 Synchronous		|	 SPSel=0
102 * 		+0x080	|	  IRQ/vIRQ		|	Current EL
103 *		+0x100	|	  FIQ/vFIQ		|   with Stack Pointer
104 * 		+0x180	|	SError/vSError		|    shared with EL0
105 * ============================================================================
106 *	VBAR_Eln+0x200	|	 Synchronous		|	 SPSel=1
107 * 		+0x280	|	  IRQ/vIRQ		|	Current EL
108 *		+0x300	|	  FIQ/vFIQ		|   with dedicated
109 * 		+0x380	|	SError/vSError		|    Stack Pointer
110 * ============================================================================
111 *	VBAR_Eln+0x400	|	 Synchronous		|
112 * 		+0x480	|	  IRQ/vIRQ		|	Lower EL
113 *		+0x500	|	  FIQ/vFIQ		|    using AArch64
114 * 		+0x580	|	SError/vSError		|
115 * ============================================================================
116 *	VBAR_Eln+0x600	|	 Synchronous		|
117 * 		+0x680	|	  IRQ/vIRQ		|     	Lower EL
118 *		+0x700	|	  FIQ/vFIQ		|    using AArch32
119 * 		+0x780	|	SError/vSError		|
120 * ============================================================================
121 */
122
123/* el1_vector should be set in VBAR_EL1. The last 11 bits of VBAR_EL1 are reserved. */
124.align	11
125EXPORT(el1_vector)
126	exception_entry	sync_el1t		// Synchronous EL1t
127	exception_entry	irq_el1t		// IRQ EL1t
128	exception_entry	fiq_el1t		// FIQ EL1t
129	exception_entry	error_el1t		// Error EL1t
130
131	exception_entry	sync_el1h		// Synchronous EL1h
132	exception_entry	irq_el1h		// IRQ EL1h
133	exception_entry	fiq_el1h		// FIQ EL1h
134	exception_entry	error_el1h		// Error EL1h
135
136	exception_entry	sync_el0_64		// Synchronous 64-bit EL0
137	exception_entry	irq_el0_64		// IRQ 64-bit EL0
138	exception_entry	fiq_el0_64		// FIQ 64-bit EL0
139	exception_entry	error_el0_64		// Error 64-bit EL0
140
141	exception_entry	sync_el0_32		// Synchronous 32-bit EL0
142	exception_entry	irq_el0_32		// IRQ 32-bit EL0
143	exception_entry	fiq_el0_32		// FIQ 32-bit EL0
144	exception_entry	error_el0_32		// Error 32-bit EL0
145
146/*
147 * The selected stack pointer can be indicated by a suffix to the Exception Level:
148 *  - t: SP_EL0 is used
149 *  - h: SP_ELx is used
150 *
151 * ChCore does not enable or handle irq_el1t, fiq_xxx, and error_xxx.
152 * The SPSR_EL1 of idle threads is set to 0b0101, which means interrupt
153 * are enabled during the their execution and SP_EL1 is selected (h).
154 * Thus, irq_el1h is enabled and handled.
155 *
156 * Similarly, sync_el1t is also not enabled while we simply reuse the handler for
157 * sync_el0 to handle sync_el1h (e.g., page fault during copy_to_user and fpu).
158 */
159
160irq_el1h:
161        /* Simply reusing exception_enter/exit is OK. */
162	exception_enter
163#ifndef CHCORE_KERNEL_RT
164	switch_to_cpu_stack
165#endif
166	bl	handle_irq_el1
167	/* should never reach here */
168	b .
169
170irq_el1t:
171fiq_el1t:
172fiq_el1h:
173error_el1t:
174error_el1h:
175sync_el1t:
176	bl unexpected_handler
177
178sync_el1h:
179	exception_enter
180	mov	x0, #SYNC_EL1h
181	mrs	x1, esr_el1
182	mrs	x2, elr_el1
183	bl	handle_entry_c
184	str     x0, [sp, #16 * 16] /* store the return value as the ELR_EL1 */
185	exception_exit
186
187sync_el0_64:
188	exception_enter
189#ifndef CHCORE_KERNEL_RT
190	switch_to_cpu_stack
191#endif
192	mrs	x25, esr_el1
193	lsr	x24, x25, #ESR_EL1_EC_SHIFT
194	cmp	x24, #ESR_EL1_EC_SVC_64
195	b.eq	el0_syscall
196	mov	x0, SYNC_EL0_64
197	mrs	x1, esr_el1
198	mrs	x2, elr_el1
199	bl	handle_entry_c
200#ifdef CHCORE_KERNEL_RT
201	bl	do_pending_resched
202#else
203	switch_to_thread_ctx
204#endif
205	exception_exit
206
207el0_syscall:
208
209/* hooking syscall: ease tracing or debugging */
210#if ENABLE_HOOKING_SYSCALL == ON
211	sub	sp, sp, #16 * 8
212	stp	x0, x1, [sp, #16 * 0]
213	stp	x2, x3, [sp, #16 * 1]
214	stp	x4, x5, [sp, #16 * 2]
215	stp	x6, x7, [sp, #16 * 3]
216	stp	x8, x9, [sp, #16 * 4]
217	stp	x10, x11, [sp, #16 * 5]
218	stp	x12, x13, [sp, #16 * 6]
219	stp	x14, x15, [sp, #16 * 7]
220
221	mov x0, x8
222	bl hook_syscall
223
224	ldp	x0, x1, [sp, #16 * 0]
225	ldp	x2, x3, [sp, #16 * 1]
226	ldp	x4, x5, [sp, #16 * 2]
227	ldp	x6, x7, [sp, #16 * 3]
228	ldp	x8, x9, [sp, #16 * 4]
229	ldp	x10, x11, [sp, #16 * 5]
230	ldp	x12, x13, [sp, #16 * 6]
231	ldp	x14, x15, [sp, #16 * 7]
232	add	sp, sp, #16 * 8
233#endif
234
235	adr	x27, syscall_table		// syscall table in x27
236	uxtw	x16, w8				// syscall number in x16
237	ldr	x16, [x27, x16, lsl #3]		// find the syscall entry
238	blr	x16
239
240	/* Ret from syscall */
241	// bl	disable_irq
242#ifdef CHCORE_KERNEL_RT
243	str	x0, [sp]
244	bl	do_pending_resched
245#else
246	switch_to_thread_ctx
247	str	x0, [sp]
248#endif
249	exception_exit
250
251irq_el0_64:
252	exception_enter
253#ifndef CHCORE_KERNEL_RT
254	switch_to_cpu_stack
255#endif
256	bl	handle_irq
257	/* should never reach here */
258	b .
259
260error_el0_64:
261#ifndef CHCORE_OH_TEE
262fiq_el0_64:
263#endif /* CHCORE_OH_TEE */
264sync_el0_32:
265irq_el0_32:
266fiq_el0_32:
267error_el0_32:
268	bl unexpected_handler
269
270#ifdef CHCORE_OH_TEE
271fiq_el0_64:
272	exception_enter
273#ifndef CHCORE_KERNEL_RT
274	switch_to_cpu_stack
275#endif
276	bl handle_fiq
277	/* should never reach here */
278	b .
279#endif /* CHCORE_OH_TEE */
280
281/* void eret_to_thread(u64 sp) */
282BEGIN_FUNC(__eret_to_thread)
283	mov	sp, x0
284	dmb ish /* smp_mb() */
285#ifdef CHCORE_KERNEL_RT
286	bl finish_switch
287#endif
288	exception_exit
289END_FUNC(__eret_to_thread)
290