• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef __EL3_COMMON_MACROS_S__
8#define __EL3_COMMON_MACROS_S__
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13
14	/*
15	 * Helper macro to initialise EL3 registers we care about.
16	 */
17	.macro el3_arch_init_common _exception_vectors
18	/* ---------------------------------------------------------------------
19	 * SCTLR has already been initialised - read current value before
20	 * modifying.
21	 *
22	 * SCTLR.I: Enable the instruction cache.
23	 *
24	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
25	 *  or store one or more registers have an alignment check that the
26	 *  address being accessed is aligned to the size of the data element(s)
27	 *  being accessed.
28	 * ---------------------------------------------------------------------
29	 */
30	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
31	ldcopr	r0, SCTLR
32	orr	r0, r0, r1
33	stcopr	r0, SCTLR
34	isb
35
36	/* ---------------------------------------------------------------------
37	 * Set the exception vectors (VBAR/MVBAR).
38	 * ---------------------------------------------------------------------
39	 */
40	ldr	r0, =\_exception_vectors
41	stcopr	r0, VBAR
42	stcopr	r0, MVBAR
43	isb
44
45	/* ---------------------------------------------------------------------
46	 * Initialise SCR, setting all fields rather than relying on the hw.
47	 *
48	 * SCR.SIF: Enabled so that Secure state instruction fetches from
49	 *  Non-secure memory are not permitted.
50	 * ---------------------------------------------------------------------
51	 */
52	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
53	stcopr	r0, SCR
54
55	/* -----------------------------------------------------
56	 * Enable the Asynchronous data abort now that the
57	 * exception vectors have been setup.
58	 * -----------------------------------------------------
59	 */
60	cpsie   a
61	isb
62
63	/* ---------------------------------------------------------------------
64	 * Initialise NSACR, setting all the fields, except for the
65	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
66	 * fields are architecturally UNKNOWN on reset.
67	 *
68	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
69	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
70	 *  field is set to allow access to Advanced SIMD and floating point
71	 *  features from both Security states.
72	 * ---------------------------------------------------------------------
73	 */
74	ldcopr	r0, NSACR
75	and	r0, r0, #NSACR_IMP_DEF_MASK
76	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
77	stcopr	r0, NSACR
78	isb
79
80	/* ---------------------------------------------------------------------
81	 * Initialise CPACR, setting all fields rather than relying on hw. Some
82	 * fields are architecturally UNKNOWN on reset.
83	 *
84	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
85	 *  to trace registers. Set to zero to allow access.
86	 *
87	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
88	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
89	 *  field is set to allow full access from PL0 and PL1 to floating-point
90	 *  and Advanced SIMD features.
91	 * ---------------------------------------------------------------------
92	 */
93	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
94	stcopr	r0, CPACR
95	isb
96
97	/* ---------------------------------------------------------------------
98	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
99	 * fields are architecturally UNKNOWN on reset and are set to zero
100	 * except for field(s) listed below.
101	 *
102	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
103	 *  from all exception levels.
104	 * ---------------------------------------------------------------------
105	 */
106	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
107	vmsr	FPEXC, r0
108	isb
109
110	/* ---------------------------------------------------------------------
111	 * Initialise SDCR, setting all the fields rather than relying on hw.
112	 *
113	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
114	 * Secure EL1 are disabled.
115	 * ---------------------------------------------------------------------
116	 */
117	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE))
118	stcopr	r0, SDCR
119
120	.endm
121
122/* -----------------------------------------------------------------------------
123 * This is the super set of actions that need to be performed during a cold boot
124 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
125 *
126 * This macro will always perform reset handling, architectural initialisations
127 * and stack setup. The rest of the actions are optional because they might not
128 * be needed, depending on the context in which this macro is called. This is
129 * why this macro is parameterised ; each parameter allows to enable/disable
130 * some actions.
131 *
132 *  _init_sctlr:
133 *	Whether the macro needs to initialise the SCTLR register including
134 *	configuring the endianness of data accesses.
135 *
136 *  _warm_boot_mailbox:
137 *	Whether the macro needs to detect the type of boot (cold/warm). The
138 *	detection is based on the platform entrypoint address : if it is zero
139 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
140 *	this macro jumps on the platform entrypoint address.
141 *
142 *  _secondary_cold_boot:
143 *	Whether the macro needs to identify the CPU that is calling it: primary
144 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
145 *	the platform initialisations, while the secondaries will be put in a
146 *	platform-specific state in the meantime.
147 *
148 *	If the caller knows this macro will only be called by the primary CPU
149 *	then this parameter can be defined to 0 to skip this step.
150 *
151 * _init_memory:
152 *	Whether the macro needs to initialise the memory.
153 *
154 * _init_c_runtime:
155 *	Whether the macro needs to initialise the C runtime environment.
156 *
157 * _exception_vectors:
158 *	Address of the exception vectors to program in the VBAR_EL3 register.
159 * -----------------------------------------------------------------------------
160 */
161	.macro el3_entrypoint_common					\
162		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
163		_init_memory, _init_c_runtime, _exception_vectors
164
165	/* Make sure we are in Secure Mode */
166#if ENABLE_ASSERTIONS
167	ldcopr	r0, SCR
168	tst	r0, #SCR_NS_BIT
169	ASM_ASSERT(eq)
170#endif
171
172	.if \_init_sctlr
173		/* -------------------------------------------------------------
174		 * This is the initialisation of SCTLR and so must ensure that
175		 * all fields are explicitly set rather than relying on hw. Some
176		 * fields reset to an IMPLEMENTATION DEFINED value.
177		 *
178		 * SCTLR.TE: Set to zero so that exceptions to an Exception
179		 *  Level executing at PL1 are taken to A32 state.
180		 *
181		 * SCTLR.EE: Set the CPU endianness before doing anything that
182		 *  might involve memory reads or writes. Set to zero to select
183		 *  Little Endian.
184		 *
185		 * SCTLR.V: Set to zero to select the normal exception vectors
186		 *  with base address held in VBAR.
187		 * -------------------------------------------------------------
188		 */
189		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | SCTLR_V_BIT))
190		stcopr	r0, SCTLR
191		isb
192	.endif /* _init_sctlr */
193
194	/* Switch to monitor mode */
195	cps	#MODE32_mon
196	isb
197
198	.if \_warm_boot_mailbox
199		/* -------------------------------------------------------------
200		 * This code will be executed for both warm and cold resets.
201		 * Now is the time to distinguish between the two.
202		 * Query the platform entrypoint address and if it is not zero
203		 * then it means it is a warm boot so jump to this address.
204		 * -------------------------------------------------------------
205		 */
206		bl	plat_get_my_entrypoint
207		cmp	r0, #0
208		bxne	r0
209	.endif /* _warm_boot_mailbox */
210
211	/* ---------------------------------------------------------------------
212	 * It is a cold boot.
213	 * Perform any processor specific actions upon reset e.g. cache, TLB
214	 * invalidations etc.
215	 * ---------------------------------------------------------------------
216	 */
217	bl	reset_handler
218
219	el3_arch_init_common \_exception_vectors
220
221	.if \_secondary_cold_boot
222		/* -------------------------------------------------------------
223		 * Check if this is a primary or secondary CPU cold boot.
224		 * The primary CPU will set up the platform while the
225		 * secondaries are placed in a platform-specific state until the
226		 * primary CPU performs the necessary actions to bring them out
227		 * of that state and allows entry into the OS.
228		 * -------------------------------------------------------------
229		 */
230		bl	plat_is_my_cpu_primary
231		cmp	r0, #0
232		bne	do_primary_cold_boot
233
234		/* This is a cold boot on a secondary CPU */
235		bl	plat_secondary_cold_boot_setup
236		/* plat_secondary_cold_boot_setup() is not supposed to return */
237		no_ret	plat_panic_handler
238
239	do_primary_cold_boot:
240	.endif /* _secondary_cold_boot */
241
242	/* ---------------------------------------------------------------------
243	 * Initialize memory now. Secondary CPU initialization won't get to this
244	 * point.
245	 * ---------------------------------------------------------------------
246	 */
247
248	.if \_init_memory
249		bl	platform_mem_init
250	.endif /* _init_memory */
251
252	/* ---------------------------------------------------------------------
253	 * Init C runtime environment:
254	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
255	 *       - the .bss section;
256	 *       - the coherent memory section (if any).
257	 *   - Relocate the data section from ROM to RAM, if required.
258	 * ---------------------------------------------------------------------
259	 */
260	.if \_init_c_runtime
261#ifdef IMAGE_BL32
262		/* -----------------------------------------------------------------
263		 * Invalidate the RW memory used by the BL32 (SP_MIN) image. This
264		 * includes the data and NOBITS sections. This is done to
265		 * safeguard against possible corruption of this memory by
266		 * dirty cache lines in a system cache as a result of use by
267		 * an earlier boot loader stage.
268		 * -----------------------------------------------------------------
269		 */
270		ldr	r0, =__RW_START__
271		ldr	r1, =__RW_END__
272		sub	r1, r1, r0
273		bl	inv_dcache_range
274#endif /* IMAGE_BL32 */
275
276		ldr	r0, =__BSS_START__
277		ldr	r1, =__BSS_SIZE__
278		bl	zeromem
279
280#if USE_COHERENT_MEM
281		ldr	r0, =__COHERENT_RAM_START__
282		ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
283		bl	zeromem
284#endif
285
286#ifdef IMAGE_BL1
287		/* -----------------------------------------------------
288		 * Copy data from ROM to RAM.
289		 * -----------------------------------------------------
290		 */
291		ldr	r0, =__DATA_RAM_START__
292		ldr	r1, =__DATA_ROM_START__
293		ldr	r2, =__DATA_SIZE__
294		bl	memcpy4
295#endif
296	.endif /* _init_c_runtime */
297
298	/* ---------------------------------------------------------------------
299	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
300	 * the MMU is enabled. There is no risk of reading stale stack memory
301	 * after enabling the MMU as only the primary CPU is running at the
302	 * moment.
303	 * ---------------------------------------------------------------------
304	 */
305	bl	plat_set_my_stack
306
307#if STACK_PROTECTOR_ENABLED
308	.if \_init_c_runtime
309	bl	update_stack_protector_canary
310	.endif /* _init_c_runtime */
311#endif
312	.endm
313
314#endif /* __EL3_COMMON_MACROS_S__ */
315