• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
3  * Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
4  *
5  * SPDX-License-Identifier: BSD-3-Clause
6  *
7  * Dispatch synchronous system register traps from lower ELs.
8  */
9 
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <bl31/sync_handle.h>
13 #include <context.h>
14 #include <lib/el3_runtime/context_mgmt.h>
15 
handle_sysreg_trap(uint64_t esr_el3,cpu_context_t * ctx)16 int handle_sysreg_trap(uint64_t esr_el3, cpu_context_t *ctx)
17 {
18 	uint64_t __unused opcode = esr_el3 & ISS_SYSREG_OPCODE_MASK;
19 
20 #if ENABLE_FEAT_RNG_TRAP
21 	if ((opcode == ISS_SYSREG_OPCODE_RNDR) || (opcode == ISS_SYSREG_OPCODE_RNDRRS)) {
22 		return plat_handle_rng_trap(esr_el3, ctx);
23 	}
24 #endif
25 
26 #if IMPDEF_SYSREG_TRAP
27 	if ((opcode & ISS_SYSREG_OPCODE_IMPDEF) == ISS_SYSREG_OPCODE_IMPDEF) {
28 		return plat_handle_impdef_trap(esr_el3, ctx);
29 	}
30 #endif
31 
32 	return TRAP_RET_UNHANDLED;
33 }
34 
is_tge_enabled(void)35 static bool is_tge_enabled(void)
36 {
37 	u_register_t hcr_el2 = read_hcr_el2();
38 
39 	return ((is_feat_vhe_present()) && ((hcr_el2 & HCR_TGE_BIT) != 0U));
40 }
41 
42 /*
43  * This function is to ensure that undef injection does not happen into
44  * non-existent S-EL2. This could happen when trap happens from S-EL{1,0}
45  * and non-secure world is running with TGE bit set, considering EL3 does
46  * not save/restore EL2 registers if only one world has EL2 enabled.
47  * So reading hcr_el2.TGE would give NS world value.
48  */
is_secure_trap_without_sel2(u_register_t scr)49 static bool is_secure_trap_without_sel2(u_register_t scr)
50 {
51 	return ((scr & (SCR_NS_BIT | SCR_EEL2_BIT)) == 0);
52 }
53 
target_el(unsigned int from_el,u_register_t scr)54 static unsigned int target_el(unsigned int from_el, u_register_t scr)
55 {
56 	if (from_el > MODE_EL1) {
57 		return from_el;
58 	} else if (is_tge_enabled() && !is_secure_trap_without_sel2(scr)) {
59 		return MODE_EL2;
60 	} else {
61 		return MODE_EL1;
62 	}
63 }
64 
get_elr_el3(u_register_t spsr_el3,u_register_t vbar,unsigned int target_el)65 static u_register_t get_elr_el3(u_register_t spsr_el3, u_register_t vbar, unsigned int target_el)
66 {
67 	unsigned int outgoing_el = GET_EL(spsr_el3);
68 	u_register_t elr_el3 = 0;
69 
70 	if (outgoing_el == target_el) {
71 		/*
72 		 * Target EL is either EL1 or EL2, lsb can tell us the SPsel
73 		 *  Thread mode  : 0
74 		 *  Handler mode : 1
75 		 */
76 		if ((spsr_el3 & (MODE_SP_MASK << MODE_SP_SHIFT)) == MODE_SP_ELX) {
77 			elr_el3 = vbar + CURRENT_EL_SPX;
78 		} else {
79 			elr_el3 = vbar + CURRENT_EL_SP0;
80 		}
81 	} else {
82 		/* Vector address for Lower EL using Aarch64 */
83 		elr_el3 = vbar + LOWER_EL_AARCH64;
84 	}
85 
86 	return elr_el3;
87 }
88 
89 /*
90  * Explicitly create all bits of SPSR to get PSTATE at exception return.
91  *
92  * The code is based on "Aarch64.exceptions.takeexception" described in
93  * DDI0602 revision 2023-06.
94  * "https://developer.arm.com/documentation/ddi0602/2023-06/Shared-Pseudocode/
95  * aarch64-exceptions-takeexception"
96  *
97  * NOTE: This piece of code must be reviewed every release to ensure that
98  * we keep up with new ARCH features which introduces a new SPSR bit.
99  *
100  * TF-A 2.12 release review
101  * The latest version available is 2024-09, which has two extra features which
102  * impacts generation of SPSR, since these features are not implemented in TF-A
103  * at the time of release, just log the feature names here to be taken up when
104  * feature support is introduced.
105  *  - FEAT_PAuth_LR (2023 extension)
106  *  - FEAT_UINJ (2024 extension)
107  */
create_spsr(u_register_t old_spsr,unsigned int target_el)108 u_register_t create_spsr(u_register_t old_spsr, unsigned int target_el)
109 {
110 	u_register_t new_spsr = 0;
111 	u_register_t sctlr;
112 
113 	/* Set M bits for target EL in AArch64 mode, also get sctlr */
114 	if (target_el == MODE_EL2) {
115 		sctlr = read_sctlr_el2();
116 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL2H;
117 	} else {
118 		sctlr = read_sctlr_el1();
119 		new_spsr |= (SPSR_M_AARCH64 << SPSR_M_SHIFT) | SPSR_M_EL1H;
120 	}
121 
122 	/* Mask all exceptions, update DAIF bits */
123 	new_spsr |= SPSR_DAIF_MASK << SPSR_DAIF_SHIFT;
124 
125 	/* If FEAT_BTI is present, clear BTYPE bits */
126 	new_spsr |= old_spsr & (SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
127 	if (is_feat_bti_present()) {
128 		new_spsr &= ~(SPSR_BTYPE_MASK_AARCH64 << SPSR_BTYPE_SHIFT_AARCH64);
129 	}
130 
131 	/* If SSBS is implemented, take the value from SCTLR.DSSBS */
132 	new_spsr |= old_spsr & SPSR_SSBS_BIT_AARCH64;
133 	if (is_feat_ssbs_present()) {
134 		if ((sctlr & SCTLR_DSSBS_BIT) != 0U) {
135 			new_spsr |= SPSR_SSBS_BIT_AARCH64;
136 		} else {
137 			new_spsr &= ~SPSR_SSBS_BIT_AARCH64;
138 		}
139 	}
140 
141 	/* If FEAT_NMI is implemented, ALLINT = !(SCTLR.SPINTMASK) */
142 	new_spsr |= old_spsr & SPSR_ALLINT_BIT_AARCH64;
143 	if (is_feat_nmi_present()) {
144 		if ((sctlr & SCTLR_SPINTMASK_BIT) != 0U) {
145 			new_spsr &= ~SPSR_ALLINT_BIT_AARCH64;
146 		} else {
147 			new_spsr |= SPSR_ALLINT_BIT_AARCH64;
148 		}
149 	}
150 
151 	/* Clear PSTATE.IL bit explicitly */
152 	new_spsr &= ~SPSR_IL_BIT;
153 
154 	/* Clear PSTATE.SS bit explicitly */
155 	new_spsr &= ~SPSR_SS_BIT;
156 
157 	/* Update PSTATE.PAN bit */
158 	new_spsr |= old_spsr & SPSR_PAN_BIT;
159 	if (is_feat_pan_present() &&
160 	    ((target_el == MODE_EL1) || ((target_el == MODE_EL2) && is_tge_enabled())) &&
161 	    ((sctlr & SCTLR_SPAN_BIT) == 0U)) {
162 	    new_spsr |= SPSR_PAN_BIT;
163 	}
164 
165 	/* Clear UAO bit if FEAT_UAO is present */
166 	new_spsr |= old_spsr & SPSR_UAO_BIT_AARCH64;
167 	if (is_feat_uao_present()) {
168 		new_spsr &= ~SPSR_UAO_BIT_AARCH64;
169 	}
170 
171 	/* DIT bits are unchanged */
172 	new_spsr |= old_spsr & SPSR_DIT_BIT;
173 
174 	/* If FEAT_MTE2 is implemented mask tag faults by setting TCO bit */
175 	new_spsr |= old_spsr & SPSR_TCO_BIT_AARCH64;
176 	if (is_feat_mte2_present()) {
177 		new_spsr |= SPSR_TCO_BIT_AARCH64;
178 	}
179 
180 	/* NZCV bits are unchanged */
181 	new_spsr |= old_spsr & SPSR_NZCV;
182 
183 	/* If FEAT_EBEP is present set PM bit */
184 	new_spsr |= old_spsr & SPSR_PM_BIT_AARCH64;
185 	if (is_feat_ebep_present()) {
186 		new_spsr |= SPSR_PM_BIT_AARCH64;
187 	}
188 
189 	/* If FEAT_SEBEP is present clear PPEND bit */
190 	new_spsr |= old_spsr & SPSR_PPEND_BIT;
191 	if (is_feat_sebep_present()) {
192 		new_spsr &= ~SPSR_PPEND_BIT;
193 	}
194 
195 	/* If FEAT_GCS is present, update EXLOCK bit */
196 	new_spsr |= old_spsr & SPSR_EXLOCK_BIT_AARCH64;
197 	if (is_feat_gcs_present()) {
198 		u_register_t gcscr;
199 		if (target_el == MODE_EL2) {
200 			gcscr = read_gcscr_el2();
201 		} else {
202 			gcscr = read_gcscr_el1();
203 		}
204 		new_spsr |= (gcscr & GCSCR_EXLOCK_EN_BIT) ? SPSR_EXLOCK_BIT_AARCH64 : 0;
205 	}
206 
207 	return new_spsr;
208 }
209 
210 /*
211  * Handler for injecting Undefined exception to lower EL which is caused by
212  * lower EL accessing system registers of which (old)EL3 firmware is unaware.
213  *
214  * This is a safety net to avoid EL3 panics caused by system register access
215  * that triggers an exception syndrome EC=0x18.
216  */
inject_undef64(cpu_context_t * ctx)217 void inject_undef64(cpu_context_t *ctx)
218 {
219 	u_register_t esr = (EC_UNKNOWN << ESR_EC_SHIFT) | ESR_IL_BIT;
220 	el3_state_t *state = get_el3state_ctx(ctx);
221 	u_register_t elr_el3 = read_ctx_reg(state, CTX_ELR_EL3);
222 	u_register_t old_spsr = read_ctx_reg(state, CTX_SPSR_EL3);
223 	u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
224 	u_register_t new_spsr = 0;
225 	unsigned int to_el = target_el(GET_EL(old_spsr), scr_el3);
226 
227 	if (to_el == MODE_EL2) {
228 		write_elr_el2(elr_el3);
229 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el2(), to_el);
230 		write_esr_el2(esr);
231 		write_spsr_el2(old_spsr);
232 	} else {
233 		write_elr_el1(elr_el3);
234 		elr_el3 = get_elr_el3(old_spsr, read_vbar_el1(), to_el);
235 		write_esr_el1(esr);
236 		write_spsr_el1(old_spsr);
237 	}
238 
239 	new_spsr = create_spsr(old_spsr, to_el);
240 
241 	write_ctx_reg(state, CTX_SPSR_EL3, new_spsr);
242 	write_ctx_reg(state, CTX_ELR_EL3, elr_el3);
243 }
244