• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <stdbool.h>
8 #include <string.h>
9 
10 #include <arch_helpers.h>
11 #include <context.h>
12 #include <lib/el3_runtime/context_mgmt.h>
13 #include <lib/psci/psci.h>
14 #include <lib/utils.h>
15 #include <plat/arm/common/arm_sip_svc.h>
16 #include <plat/arm/common/plat_arm.h>
17 #include <smccc_helpers.h>
18 
19 /*
20  * Handle SMC from a lower exception level to switch its execution state
21  * (either from AArch64 to AArch32, or vice versa).
22  *
23  * smc_fid:
24  *	SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or
25  *	ARM_SIP_SVC_STATE_SWITCH_32.
26  * pc_hi, pc_lo:
27  *	PC upon re-entry to the calling exception level; width dependent on the
28  *	calling exception level.
29  * cookie_hi, cookie_lo:
30  *	Opaque pointer pairs received from the caller to pass it back, upon
31  *	re-entry.
32  * handle:
33  *	Handle to saved context.
34  */
arm_execution_state_switch(unsigned int smc_fid,uint32_t pc_hi,uint32_t pc_lo,uint32_t cookie_hi,uint32_t cookie_lo,void * handle)35 int arm_execution_state_switch(unsigned int smc_fid,
36 		uint32_t pc_hi,
37 		uint32_t pc_lo,
38 		uint32_t cookie_hi,
39 		uint32_t cookie_lo,
40 		void *handle)
41 {
42 	bool caller_64, thumb = false, from_el2;
43 	unsigned int el, endianness;
44 	u_register_t spsr, pc, scr, sctlr;
45 	entry_point_info_t ep;
46 	cpu_context_t *ctx = (cpu_context_t *) handle;
47 	el3_state_t *el3_ctx = get_el3state_ctx(ctx);
48 
49 	/* Validate supplied entry point */
50 	pc = (u_register_t) (((uint64_t) pc_hi << 32) | pc_lo);
51 	if (arm_validate_ns_entrypoint(pc) != 0)
52 		goto invalid_param;
53 
54 	/* That the SMC originated from NS is already validated by the caller */
55 
56 	/*
57 	 * Disallow state switch if any of the secondaries have been brought up.
58 	 */
59 	if (psci_secondaries_brought_up() != 0)
60 		goto exec_denied;
61 
62 	spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3);
63 	caller_64 = (GET_RW(spsr) == MODE_RW_64);
64 
65 	if (caller_64) {
66 		/*
67 		 * If the call originated from AArch64, expect 32-bit pointers when
68 		 * switching to AArch32.
69 		 */
70 		if ((pc_hi != 0U) || (cookie_hi != 0U))
71 			goto invalid_param;
72 
73 		pc = pc_lo;
74 
75 		/* Instruction state when entering AArch32 */
76 		thumb = (pc & 1U) != 0U;
77 	} else {
78 		/* Construct AArch64 PC */
79 		pc = (((u_register_t) pc_hi) << 32) | pc_lo;
80 	}
81 
82 	/* Make sure PC is 4-byte aligned, except for Thumb */
83 	if (((pc & 0x3U) != 0U) && !thumb)
84 		goto invalid_param;
85 
86 	/*
87 	 * EL3 controls register width of the immediate lower EL only. Expect
88 	 * this request from EL2/Hyp unless:
89 	 *
90 	 * - EL2 is not implemented;
91 	 * - EL2 is implemented, but was disabled. This can be inferred from
92 	 *   SCR_EL3.HCE.
93 	 */
94 	from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) :
95 		(GET_M32(spsr) == MODE32_hyp);
96 	scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3);
97 	if (!from_el2) {
98 		/* The call is from NS privilege level other than HYP */
99 
100 		/*
101 		 * Disallow switching state if there's a Hypervisor in place;
102 		 * this request must be taken up with the Hypervisor instead.
103 		 */
104 		if ((scr & SCR_HCE_BIT) != 0U)
105 			goto exec_denied;
106 	}
107 
108 	/*
109 	 * Return to the caller using the same endianness. Extract
110 	 * endianness bit from the respective system control register
111 	 * directly.
112 	 */
113 	sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1();
114 	endianness = ((sctlr & SCTLR_EE_BIT) != 0U) ? 1U : 0U;
115 
116 	/* Construct SPSR for the exception state we're about to switch to */
117 	if (caller_64) {
118 		unsigned long long impl;
119 
120 		/*
121 		 * Switching from AArch64 to AArch32. Ensure this CPU implements
122 		 * the target EL in AArch32.
123 		 */
124 		impl = from_el2 ? el_implemented(2) : el_implemented(1);
125 		if (impl != EL_IMPL_A64_A32)
126 			goto exec_denied;
127 
128 		/* Return to the equivalent AArch32 privilege level */
129 		el = from_el2 ? MODE32_hyp : MODE32_svc;
130 		spsr = SPSR_MODE32((u_register_t) el,
131 				thumb ? SPSR_T_THUMB : SPSR_T_ARM,
132 				endianness, DISABLE_ALL_EXCEPTIONS);
133 	} else {
134 		/*
135 		 * Switching from AArch32 to AArch64. Since it's not possible to
136 		 * implement an EL as AArch32-only (from which this call was
137 		 * raised), it's safe to assume AArch64 is also implemented.
138 		 */
139 		el = from_el2 ? MODE_EL2 : MODE_EL1;
140 		spsr = SPSR_64((u_register_t) el, MODE_SP_ELX,
141 				DISABLE_ALL_EXCEPTIONS);
142 	}
143 
144 	/*
145 	 * Use the context management library to re-initialize the existing
146 	 * context with the execution state flipped. Since the library takes
147 	 * entry_point_info_t pointer as the argument, construct a dummy one
148 	 * with PC, state width, endianness, security etc. appropriately set.
149 	 * Other entries in the entry point structure are irrelevant for
150 	 * purpose.
151 	 */
152 	zeromem(&ep, sizeof(ep));
153 	ep.pc = pc;
154 	ep.spsr = (uint32_t) spsr;
155 	SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1,
156 			((unsigned int) ((endianness != 0U) ? EP_EE_BIG :
157 				EP_EE_LITTLE)
158 			 | NON_SECURE | EP_ST_DISABLE));
159 
160 	/*
161 	 * Re-initialize the system register context, and exit EL3 as if for the
162 	 * first time. State switch is effectively a soft reset of the
163 	 * calling EL.
164 	 */
165 	cm_init_my_context(&ep);
166 	cm_prepare_el3_exit(NON_SECURE);
167 
168 	/*
169 	 * State switch success. The caller of SMC wouldn't see the SMC
170 	 * returning. Instead, execution starts at the supplied entry point,
171 	 * with context pointers populated in registers 0 and 1.
172 	 */
173 	SMC_RET2(handle, cookie_hi, cookie_lo);
174 
175 invalid_param:
176 	SMC_RET1(handle, STATE_SW_E_PARAM);
177 
178 exec_denied:
179 	/* State switch denied */
180 	SMC_RET1(handle, STATE_SW_E_DENIED);
181 }
182