• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13
14	/*
15	 * Helper macro to initialise EL3 registers we care about.
16	 */
17	.macro el3_arch_init_common
18	/* ---------------------------------------------------------------------
19	 * SCTLR has already been initialised - read current value before
20	 * modifying.
21	 *
22	 * SCTLR.I: Enable the instruction cache.
23	 *
24	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
25	 *  or store one or more registers have an alignment check that the
26	 *  address being accessed is aligned to the size of the data element(s)
27	 *  being accessed.
28	 * ---------------------------------------------------------------------
29	 */
30	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
31	ldcopr	r0, SCTLR
32	orr	r0, r0, r1
33	stcopr	r0, SCTLR
34	isb
35
36	/* ---------------------------------------------------------------------
37	 * Initialise SCR, setting all fields rather than relying on the hw.
38	 *
39	 * SCR.SIF: Enabled so that Secure state instruction fetches from
40	 *  Non-secure memory are not permitted.
41	 * ---------------------------------------------------------------------
42	 */
43	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
44	stcopr	r0, SCR
45
46	/* -----------------------------------------------------
47	 * Enable the Asynchronous data abort now that the
48	 * exception vectors have been setup.
49	 * -----------------------------------------------------
50	 */
51	cpsie   a
52	isb
53
54	/* ---------------------------------------------------------------------
55	 * Initialise NSACR, setting all the fields, except for the
56	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
57	 * fields are architecturally UNKNOWN on reset.
58	 *
59	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
60	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
61	 *  field is set to allow access to Advanced SIMD and floating point
62	 *  features from both Security states.
63	 * ---------------------------------------------------------------------
64	 */
65	ldcopr	r0, NSACR
66	and	r0, r0, #NSACR_IMP_DEF_MASK
67	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
68	stcopr	r0, NSACR
69	isb
70
71	/* ---------------------------------------------------------------------
72	 * Initialise CPACR, setting all fields rather than relying on hw. Some
73	 * fields are architecturally UNKNOWN on reset.
74	 *
75	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
76	 *  to trace registers. Set to zero to allow access.
77	 *
78	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
79	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
80	 *  field is set to allow full access from PL0 and PL1 to floating-point
81	 *  and Advanced SIMD features.
82	 * ---------------------------------------------------------------------
83	 */
84	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
85	stcopr	r0, CPACR
86	isb
87
88	/* ---------------------------------------------------------------------
89	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
90	 * fields are architecturally UNKNOWN on reset and are set to zero
91	 * except for field(s) listed below.
92	 *
93	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
94	 *  from all exception levels.
95         *
96         * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
97         *  ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
98         *  hard-float variants of toolchain, avoid compiling below code with
99         *  soft-float toolchain as "vmsr" instruction will not be recognized.
100	 * ---------------------------------------------------------------------
101	 */
102#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
103	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
104	vmsr	FPEXC, r0
105	isb
106#endif
107
108#if (ARM_ARCH_MAJOR > 7)
109	/* ---------------------------------------------------------------------
110	 * Initialise SDCR, setting all the fields rather than relying on hw.
111	 *
112	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
113	 *  Secure EL1 are disabled.
114	 *
115	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
116	 *  in Secure state. This bit is RES0 in versions of the architecture
117	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect on
118	 *  them.
119	 * ---------------------------------------------------------------------
120	 */
121	ldr	r0, =(SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | SDCR_SCCD_BIT)
122	stcopr	r0, SDCR
123
124	/* ---------------------------------------------------------------------
125	 * Initialise PMCR, setting all fields rather than relying
126	 * on hw. Some fields are architecturally UNKNOWN on reset.
127	 *
128	 * PMCR.LP: Set to one so that event counter overflow, that
129	 *  is recorded in PMOVSCLR[0-30], occurs on the increment
130	 *  that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
131	 *  is implemented. This bit is RES0 in versions of the architecture
132	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
133	 *  on them.
134	 *  This bit is Reserved, UNK/SBZP in ARMv7.
135	 *
136	 * PMCR.LC: Set to one so that cycle counter overflow, that
137	 *  is recorded in PMOVSCLR[31], occurs on the increment
138	 *  that changes PMCCNTR[63] from 1 to 0.
139	 *  This bit is Reserved, UNK/SBZP in ARMv7.
140	 *
141	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
142	 * ---------------------------------------------------------------------
143	 */
144	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
145		      PMCR_LP_BIT)
146#else
147	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
148#endif
149	stcopr	r0, PMCR
150
151	/*
152	 * If Data Independent Timing (DIT) functionality is implemented,
153	 * always enable DIT in EL3
154	 */
155	ldcopr	r0, ID_PFR0
156	and	r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
157	cmp	r0, #ID_PFR0_DIT_SUPPORTED
158	bne	1f
159	mrs	r0, cpsr
160	orr	r0, r0, #CPSR_DIT_BIT
161	msr	cpsr_cxsf, r0
1621:
163	.endm
164
165/* -----------------------------------------------------------------------------
166 * This is the super set of actions that need to be performed during a cold boot
167 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
168 *
169 * This macro will always perform reset handling, architectural initialisations
170 * and stack setup. The rest of the actions are optional because they might not
171 * be needed, depending on the context in which this macro is called. This is
172 * why this macro is parameterised ; each parameter allows to enable/disable
173 * some actions.
174 *
175 *  _init_sctlr:
176 *	Whether the macro needs to initialise the SCTLR register including
177 *	configuring the endianness of data accesses.
178 *
179 *  _warm_boot_mailbox:
180 *	Whether the macro needs to detect the type of boot (cold/warm). The
181 *	detection is based on the platform entrypoint address : if it is zero
182 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
183 *	this macro jumps on the platform entrypoint address.
184 *
185 *  _secondary_cold_boot:
186 *	Whether the macro needs to identify the CPU that is calling it: primary
187 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
188 *	the platform initialisations, while the secondaries will be put in a
189 *	platform-specific state in the meantime.
190 *
191 *	If the caller knows this macro will only be called by the primary CPU
192 *	then this parameter can be defined to 0 to skip this step.
193 *
194 * _init_memory:
195 *	Whether the macro needs to initialise the memory.
196 *
197 * _init_c_runtime:
198 *	Whether the macro needs to initialise the C runtime environment.
199 *
200 * _exception_vectors:
201 *	Address of the exception vectors to program in the VBAR_EL3 register.
202 * -----------------------------------------------------------------------------
203 */
204	.macro el3_entrypoint_common					\
205		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
206		_init_memory, _init_c_runtime, _exception_vectors
207
208	/* Make sure we are in Secure Mode */
209#if ENABLE_ASSERTIONS
210	ldcopr	r0, SCR
211	tst	r0, #SCR_NS_BIT
212	ASM_ASSERT(eq)
213#endif
214
215	.if \_init_sctlr
216		/* -------------------------------------------------------------
217		 * This is the initialisation of SCTLR and so must ensure that
218		 * all fields are explicitly set rather than relying on hw. Some
219		 * fields reset to an IMPLEMENTATION DEFINED value.
220		 *
221		 * SCTLR.TE: Set to zero so that exceptions to an Exception
222		 *  Level executing at PL1 are taken to A32 state.
223		 *
224		 * SCTLR.EE: Set the CPU endianness before doing anything that
225		 *  might involve memory reads or writes. Set to zero to select
226		 *  Little Endian.
227		 *
228		 * SCTLR.V: Set to zero to select the normal exception vectors
229		 *  with base address held in VBAR.
230		 *
231		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
232		 *  safe behaviour upon exception entry to EL3.
233		 * -------------------------------------------------------------
234		 */
235		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
236				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
237		stcopr	r0, SCTLR
238		isb
239	.endif /* _init_sctlr */
240
241	/* Switch to monitor mode */
242	cps	#MODE32_mon
243	isb
244
245#if DISABLE_MTPMU
246	bl	mtpmu_disable
247#endif
248
249	.if \_warm_boot_mailbox
250		/* -------------------------------------------------------------
251		 * This code will be executed for both warm and cold resets.
252		 * Now is the time to distinguish between the two.
253		 * Query the platform entrypoint address and if it is not zero
254		 * then it means it is a warm boot so jump to this address.
255		 * -------------------------------------------------------------
256		 */
257		bl	plat_get_my_entrypoint
258		cmp	r0, #0
259		bxne	r0
260	.endif /* _warm_boot_mailbox */
261
262	/* ---------------------------------------------------------------------
263	 * Set the exception vectors (VBAR/MVBAR).
264	 * ---------------------------------------------------------------------
265	 */
266	ldr	r0, =\_exception_vectors
267	stcopr	r0, VBAR
268	stcopr	r0, MVBAR
269	isb
270
271	/* ---------------------------------------------------------------------
272	 * It is a cold boot.
273	 * Perform any processor specific actions upon reset e.g. cache, TLB
274	 * invalidations etc.
275	 * ---------------------------------------------------------------------
276	 */
277	bl	reset_handler
278
279	el3_arch_init_common
280
281	.if \_secondary_cold_boot
282		/* -------------------------------------------------------------
283		 * Check if this is a primary or secondary CPU cold boot.
284		 * The primary CPU will set up the platform while the
285		 * secondaries are placed in a platform-specific state until the
286		 * primary CPU performs the necessary actions to bring them out
287		 * of that state and allows entry into the OS.
288		 * -------------------------------------------------------------
289		 */
290		bl	plat_is_my_cpu_primary
291		cmp	r0, #0
292		bne	do_primary_cold_boot
293
294		/* This is a cold boot on a secondary CPU */
295		bl	plat_secondary_cold_boot_setup
296		/* plat_secondary_cold_boot_setup() is not supposed to return */
297		no_ret	plat_panic_handler
298
299	do_primary_cold_boot:
300	.endif /* _secondary_cold_boot */
301
302	/* ---------------------------------------------------------------------
303	 * Initialize memory now. Secondary CPU initialization won't get to this
304	 * point.
305	 * ---------------------------------------------------------------------
306	 */
307
308	.if \_init_memory
309		bl	platform_mem_init
310	.endif /* _init_memory */
311
312	/* ---------------------------------------------------------------------
313	 * Init C runtime environment:
314	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
315	 *       - the .bss section;
316	 *       - the coherent memory section (if any).
317	 *   - Relocate the data section from ROM to RAM, if required.
318	 * ---------------------------------------------------------------------
319	 */
320	.if \_init_c_runtime
321#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
322		/* -----------------------------------------------------------------
323		 * Invalidate the RW memory used by the image. This
324		 * includes the data and NOBITS sections. This is done to
325		 * safeguard against possible corruption of this memory by
326		 * dirty cache lines in a system cache as a result of use by
327		 * an earlier boot loader stage.
328		 * -----------------------------------------------------------------
329		 */
330		ldr	r0, =__RW_START__
331		ldr	r1, =__RW_END__
332		sub	r1, r1, r0
333		bl	inv_dcache_range
334#endif
335
336		/*
337		 * zeromem uses r12 whereas it is used to save previous BL arg3,
338		 * save it in r7
339		 */
340		mov	r7, r12
341		ldr	r0, =__BSS_START__
342		ldr	r1, =__BSS_SIZE__
343		bl	zeromem
344
345#if USE_COHERENT_MEM
346		ldr	r0, =__COHERENT_RAM_START__
347		ldr	r1, =__COHERENT_RAM_UNALIGNED_SIZE__
348		bl	zeromem
349#endif
350
351		/* Restore r12 */
352		mov	r12, r7
353
354#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
355		/* -----------------------------------------------------
356		 * Copy data from ROM to RAM.
357		 * -----------------------------------------------------
358		 */
359		ldr	r0, =__DATA_RAM_START__
360		ldr	r1, =__DATA_ROM_START__
361		ldr	r2, =__DATA_SIZE__
362		bl	memcpy4
363#endif
364	.endif /* _init_c_runtime */
365
366	/* ---------------------------------------------------------------------
367	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
368	 * the MMU is enabled. There is no risk of reading stale stack memory
369	 * after enabling the MMU as only the primary CPU is running at the
370	 * moment.
371	 * ---------------------------------------------------------------------
372	 */
373	bl	plat_set_my_stack
374
375#if STACK_PROTECTOR_ENABLED
376	.if \_init_c_runtime
377	bl	update_stack_protector_canary
378	.endif /* _init_c_runtime */
379#endif
380	.endm
381
382#endif /* EL3_COMMON_MACROS_S */
383