• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7
8#include <assert_macros.S>
9#include <asm_macros.S>
10#include <assert_macros.S>
11#include <bl31/ea_handle.h>
12#include <context.h>
13#include <lib/extensions/ras_arch.h>
14#include <cpu_macros.S>
15#include <context.h>
16
17	.globl	handle_lower_el_ea_esb
18	.globl  handle_lower_el_async_ea
19	.globl	enter_lower_el_sync_ea
20	.globl	enter_lower_el_async_ea
21
22
23/*
24 * Function to delegate External Aborts synchronized by ESB instruction at EL3
25 * vector entry. This function assumes GP registers x0-x29 have been saved, and
26 * are available for use. It delegates the handling of the EA to platform
27 * handler, and returns only upon successfully handling the EA; otherwise
28 * panics. On return from this function, the original exception handler is
29 * expected to resume.
30 */
31func handle_lower_el_ea_esb
32	mov	x0, #ERROR_EA_ESB
33	mrs	x1, DISR_EL1
34	b	ea_proceed
35endfunc handle_lower_el_ea_esb
36
37
38/*
39 * This function forms the tail end of Synchronous Exception entry from lower
40 * EL, and expects to handle Synchronous External Aborts from lower EL and CPU
41 * Implementation Defined Exceptions. If any other kind of exception is detected,
42 * then this function reports unhandled exception.
43 *
44 * Since it's part of exception vector, this function doesn't expect any GP
45 * registers to have been saved. It delegates the handling of the EA to platform
46 * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
47 */
48func enter_lower_el_sync_ea
49	/*
50	 * Explicitly save x30 so as to free up a register and to enable
51	 * branching.
52	 */
53	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
54
55	mrs	x30, esr_el3
56	ubfx	x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
57
58	/* Check for I/D aborts from lower EL */
59	cmp	x30, #EC_IABORT_LOWER_EL
60	b.eq	1f
61
62	cmp	x30, #EC_DABORT_LOWER_EL
63	b.eq	1f
64
65	/* Save GP registers */
66	stp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
67	stp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
68	stp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
69
70	/* Get the cpu_ops pointer */
71	bl	get_cpu_ops_ptr
72
73	/* Get the cpu_ops exception handler */
74	ldr	x0, [x0, #CPU_E_HANDLER_FUNC]
75
76	/*
77	 * If the reserved function pointer is NULL, this CPU does not have an
78	 * implementation defined exception handler function
79	 */
80	cbz	x0, 2f
81	mrs	x1, esr_el3
82	ubfx	x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
83	blr	x0
84	b	2f
85
861:
87	/* Test for EA bit in the instruction syndrome */
88	mrs	x30, esr_el3
89	tbz	x30, #ESR_ISS_EABORT_EA_BIT, 3f
90
91	/*
92	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
93	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
94	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
95	 */
96	bl	save_gp_pmcr_pauth_regs
97
98#if ENABLE_PAUTH
99	/* Load and program APIAKey firmware key */
100	bl	pauth_load_bl31_apiakey
101#endif
102
103	/* Setup exception class and syndrome arguments for platform handler */
104	mov	x0, #ERROR_EA_SYNC
105	mrs	x1, esr_el3
106	bl	delegate_sync_ea
107
108	/* el3_exit assumes SP_EL0 on entry */
109	msr	spsel, #MODE_SP_EL0
110	b	el3_exit
1112:
112	ldp	x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
113	ldp	x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
114	ldp	x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
115
1163:
117	/* Synchronous exceptions other than the above are assumed to be EA */
118	ldr	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
119	no_ret	report_unhandled_exception
120endfunc enter_lower_el_sync_ea
121
122
123/*
124 * This function handles SErrors from lower ELs.
125 *
126 * Since it's part of exception vector, this function doesn't expect any GP
127 * registers to have been saved. It delegates the handling of the EA to platform
128 * handler, and upon successfully handling the EA, exits EL3; otherwise panics.
129 */
130func enter_lower_el_async_ea
131	/*
132	 * Explicitly save x30 so as to free up a register and to enable
133	 * branching
134	 */
135	str	x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
136
137handle_lower_el_async_ea:
138	/*
139	 * Save general purpose and ARMv8.3-PAuth registers (if enabled).
140	 * If Secure Cycle Counter is not disabled in MDCR_EL3 when
141	 * ARMv8.5-PMU is implemented, save PMCR_EL0 and disable Cycle Counter.
142	 */
143	bl	save_gp_pmcr_pauth_regs
144
145#if ENABLE_PAUTH
146	/* Load and program APIAKey firmware key */
147	bl	pauth_load_bl31_apiakey
148#endif
149
150	/* Setup exception class and syndrome arguments for platform handler */
151	mov	x0, #ERROR_EA_ASYNC
152	mrs	x1, esr_el3
153	bl	delegate_async_ea
154
155	/* el3_exit assumes SP_EL0 on entry */
156	msr	spsel, #MODE_SP_EL0
157	b	el3_exit
158endfunc enter_lower_el_async_ea
159
160
161/*
162 * Prelude for Synchronous External Abort handling. This function assumes that
163 * all GP registers have been saved by the caller.
164 *
165 * x0: EA reason
166 * x1: EA syndrome
167 */
168func delegate_sync_ea
169#if RAS_EXTENSION
170	/*
171	 * Check for Uncontainable error type. If so, route to the platform
172	 * fatal error handler rather than the generic EA one.
173	 */
174	ubfx    x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
175	cmp     x2, #ERROR_STATUS_SET_UC
176	b.ne    1f
177
178	/* Check fault status code */
179	ubfx    x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
180	cmp     x3, #SYNC_EA_FSC
181	b.ne    1f
182
183	no_ret  plat_handle_uncontainable_ea
1841:
185#endif
186
187	b       ea_proceed
188endfunc delegate_sync_ea
189
190
191/*
192 * Prelude for Asynchronous External Abort handling. This function assumes that
193 * all GP registers have been saved by the caller.
194 *
195 * x0: EA reason
196 * x1: EA syndrome
197 */
198func delegate_async_ea
199#if RAS_EXTENSION
200	/*
201	 * Check for Implementation Defined Syndrome. If so, skip checking
202	 * Uncontainable error type from the syndrome as the format is unknown.
203	 */
204	tbnz	x1, #SERROR_IDS_BIT, 1f
205
206	/*
207	 * Check for Uncontainable error type. If so, route to the platform
208	 * fatal error handler rather than the generic EA one.
209	 */
210	ubfx	x2, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
211	cmp	x2, #ERROR_STATUS_UET_UC
212	b.ne	1f
213
214	/* Check DFSC for SError type */
215	ubfx	x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
216	cmp	x3, #DFSC_SERROR
217	b.ne	1f
218
219	no_ret	plat_handle_uncontainable_ea
2201:
221#endif
222
223	b	ea_proceed
224endfunc delegate_async_ea
225
226
227/*
228 * Delegate External Abort handling to platform's EA handler. This function
229 * assumes that all GP registers have been saved by the caller.
230 *
231 * x0: EA reason
232 * x1: EA syndrome
233 */
234func ea_proceed
235	/*
236	 * If the ESR loaded earlier is not zero, we were processing an EA
237	 * already, and this is a double fault.
238	 */
239	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
240	cbz	x5, 1f
241	no_ret	plat_handle_double_fault
242
2431:
244	/* Save EL3 state */
245	mrs	x2, spsr_el3
246	mrs	x3, elr_el3
247	stp	x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
248
249	/*
250	 * Save ESR as handling might involve lower ELs, and returning back to
251	 * EL3 from there would trample the original ESR.
252	 */
253	mrs	x4, scr_el3
254	mrs	x5, esr_el3
255	stp	x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
256
257	/*
258	 * Setup rest of arguments, and call platform External Abort handler.
259	 *
260	 * x0: EA reason (already in place)
261	 * x1: Exception syndrome (already in place).
262	 * x2: Cookie (unused for now).
263	 * x3: Context pointer.
264	 * x4: Flags (security state from SCR for now).
265	 */
266	mov	x2, xzr
267	mov	x3, sp
268	ubfx	x4, x4, #0, #1
269
270	/* Switch to runtime stack */
271	ldr	x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
272	msr	spsel, #MODE_SP_EL0
273	mov	sp, x5
274
275	mov	x29, x30
276#if ENABLE_ASSERTIONS
277	/* Stash the stack pointer */
278	mov	x28, sp
279#endif
280	bl	plat_ea_handler
281
282#if ENABLE_ASSERTIONS
283	/*
284	 * Error handling flows might involve long jumps; so upon returning from
285	 * the platform error handler, validate that the we've completely
286	 * unwound the stack.
287	 */
288	mov	x27, sp
289	cmp	x28, x27
290	ASM_ASSERT(eq)
291#endif
292
293	/* Make SP point to context */
294	msr	spsel, #MODE_SP_ELX
295
296	/* Restore EL3 state and ESR */
297	ldp	x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
298	msr	spsr_el3, x1
299	msr	elr_el3, x2
300
301	/* Restore ESR_EL3 and SCR_EL3 */
302	ldp	x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
303	msr	scr_el3, x3
304	msr	esr_el3, x4
305
306#if ENABLE_ASSERTIONS
307	cmp	x4, xzr
308	ASM_ASSERT(ne)
309#endif
310
311	/* Clear ESR storage */
312	str	xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
313
314	ret	x29
315endfunc ea_proceed
316