• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <arch.h>
10 #include <arch_features.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <drivers/console.h>
15 #include <lib/debugfs.h>
16 #include <lib/extensions/ras.h>
17 #include <lib/fconf/fconf.h>
18 #include <lib/gpt_rme/gpt_rme.h>
19 #include <lib/mmio.h>
20 #if TRANSFER_LIST
21 #include <lib/transfer_list.h>
22 #endif
23 #include <lib/xlat_tables/xlat_tables_compat.h>
24 #include <plat/arm/common/plat_arm.h>
25 #include <plat/common/platform.h>
26 #include <platform_def.h>
27 
28 static struct transfer_list_header *secure_tl __unused;
29 static struct transfer_list_header *ns_tl __unused;
30 
31 /*
32  * Placeholder variables for copying the arguments that have been passed to
33  * BL31 from BL2.
34  */
35 static entry_point_info_t bl32_image_ep_info;
36 static entry_point_info_t bl33_image_ep_info;
37 #if ENABLE_RME
38 static entry_point_info_t rmm_image_ep_info;
39 #endif
40 
41 #if !RESET_TO_BL31
42 /*
43  * Check that BL31_BASE is above ARM_FW_CONFIG_LIMIT. The reserved page
44  * is required for SOC_FW_CONFIG/TOS_FW_CONFIG passed from BL2.
45  */
46 #if TRANSFER_LIST
47 CASSERT(BL31_BASE >= PLAT_ARM_EL3_FW_HANDOFF_LIMIT, assert_bl31_base_overflows);
48 #else
49 CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows);
50 #endif /* TRANSFER_LIST */
51 #endif /* RESET_TO_BL31 */
52 
53 /* Weak definitions may be overridden in specific ARM standard platform */
54 #pragma weak bl31_early_platform_setup2
55 #pragma weak bl31_platform_setup
56 #pragma weak bl31_plat_arch_setup
57 #pragma weak bl31_plat_get_next_image_ep_info
58 #pragma weak bl31_plat_runtime_setup
59 
60 #define MAP_BL31_TOTAL		MAP_REGION_FLAT(			\
61 					BL31_START,			\
62 					BL31_END - BL31_START,		\
63 					MT_MEMORY | MT_RW | EL3_PAS)
64 #if RECLAIM_INIT_CODE
65 IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE);
66 IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED);
67 IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED);
68 
69 #define	BL_INIT_CODE_END	((BL_CODE_END_UNALIGNED + PAGE_SIZE - 1) & \
70 					~(PAGE_SIZE - 1))
71 #define	BL_STACKS_END		((BL_STACKS_END_UNALIGNED + PAGE_SIZE - 1) & \
72 					~(PAGE_SIZE - 1))
73 
74 #define MAP_BL_INIT_CODE	MAP_REGION_FLAT(			\
75 					BL_INIT_CODE_BASE,		\
76 					BL_INIT_CODE_END		\
77 						- BL_INIT_CODE_BASE,	\
78 					MT_CODE | EL3_PAS)
79 #endif
80 
81 #if SEPARATE_NOBITS_REGION
82 #define MAP_BL31_NOBITS		MAP_REGION_FLAT(			\
83 					BL31_NOBITS_BASE,		\
84 					BL31_NOBITS_LIMIT 		\
85 						- BL31_NOBITS_BASE,	\
86 					MT_MEMORY | MT_RW | EL3_PAS)
87 
88 #endif
89 /*******************************************************************************
90  * Return a pointer to the 'entry_point_info' structure of the next image for the
91  * security state specified. BL33 corresponds to the non-secure image type
92  * while BL32 corresponds to the secure image type. A NULL pointer is returned
93  * if the image does not exist.
94  ******************************************************************************/
bl31_plat_get_next_image_ep_info(uint32_t type)95 struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type)
96 {
97 	entry_point_info_t *next_image_info;
98 
99 	assert(sec_state_is_valid(type));
100 	if (type == NON_SECURE) {
101 #if TRANSFER_LIST && !RESET_TO_BL31
102 		next_image_info = transfer_list_set_handoff_args(
103 			ns_tl, &bl33_image_ep_info);
104 #else
105 		next_image_info = &bl33_image_ep_info;
106 #endif
107 	}
108 #if ENABLE_RME
109 	else if (type == REALM) {
110 		next_image_info = &rmm_image_ep_info;
111 	}
112 #endif
113 	else {
114 		next_image_info = &bl32_image_ep_info;
115 	}
116 
117 	/*
118 	 * None of the images on the ARM development platforms can have 0x0
119 	 * as the entrypoint
120 	 */
121 	if (next_image_info->pc)
122 		return next_image_info;
123 	else
124 		return NULL;
125 }
126 
127 /*******************************************************************************
128  * Perform any BL31 early platform setup common to ARM standard platforms.
129  * Here is an opportunity to copy parameters passed by the calling EL (S-EL1
130  * in BL2 & EL3 in BL1) before they are lost (potentially). This needs to be
131  * done before the MMU is initialized so that the memory layout can be used
132  * while creating page tables. BL2 has flushed this information to memory, so
133  * we are guaranteed to pick up good data.
134  ******************************************************************************/
135 #if TRANSFER_LIST
arm_bl31_early_platform_setup(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)136 void __init arm_bl31_early_platform_setup(u_register_t arg0, u_register_t arg1,
137 					  u_register_t arg2, u_register_t arg3)
138 {
139 #if RESET_TO_BL31
140 	/* Populate entry point information for BL33 */
141 	SET_PARAM_HEAD(&bl33_image_ep_info, PARAM_EP, VERSION_1, 0);
142 	/*
143 	 * Tell BL31 where the non-trusted software image
144 	 * is located and the entry state information
145 	 */
146 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
147 
148 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
149 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
150 
151 	bl33_image_ep_info.args.arg0 = PLAT_ARM_TRANSFER_LIST_DTB_OFFSET;
152 	bl33_image_ep_info.args.arg1 =
153 		TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
154 	bl33_image_ep_info.args.arg3 = FW_NS_HANDOFF_BASE;
155 #else
156 	struct transfer_list_entry *te = NULL;
157 	struct entry_point_info *ep;
158 
159 	secure_tl = (struct transfer_list_header *)arg3;
160 
161 	/*
162 	 * Populate the global entry point structures used to execute subsequent
163 	 * images.
164 	 */
165 	while ((te = transfer_list_next(secure_tl, te)) != NULL) {
166 		ep = transfer_list_entry_data(te);
167 
168 		if (te->tag_id == TL_TAG_EXEC_EP_INFO64) {
169 			switch (GET_SECURITY_STATE(ep->h.attr)) {
170 			case NON_SECURE:
171 				bl33_image_ep_info = *ep;
172 				break;
173 #if ENABLE_RME
174 			case REALM:
175 				rmm_image_ep_info = *ep;
176 				break;
177 #endif
178 			case SECURE:
179 				bl32_image_ep_info = *ep;
180 				break;
181 			default:
182 				ERROR("Unrecognized Image Security State %lu\n",
183 				      GET_SECURITY_STATE(ep->h.attr));
184 				panic();
185 			}
186 		}
187 	}
188 #endif /* RESET_TO_BL31 */
189 }
190 #else
arm_bl31_early_platform_setup(void * from_bl2,uintptr_t soc_fw_config,uintptr_t hw_config,void * plat_params_from_bl2)191 void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_config,
192 				uintptr_t hw_config, void *plat_params_from_bl2)
193 {
194 	/* Initialize the console to provide early debug support */
195 	arm_console_boot_init();
196 
197 #if RESET_TO_BL31
198 	/* There are no parameters from BL2 if BL31 is a reset vector */
199 	assert(from_bl2 == NULL);
200 	assert(plat_params_from_bl2 == NULL);
201 
202 # ifdef BL32_BASE
203 	/* Populate entry point information for BL32 */
204 	SET_PARAM_HEAD(&bl32_image_ep_info,
205 				PARAM_EP,
206 				VERSION_1,
207 				0);
208 	SET_SECURITY_STATE(bl32_image_ep_info.h.attr, SECURE);
209 	bl32_image_ep_info.pc = BL32_BASE;
210 	bl32_image_ep_info.spsr = arm_get_spsr_for_bl32_entry();
211 
212 #if defined(SPD_spmd)
213 	/* SPM (hafnium in secure world) expects SPM Core manifest base address
214 	 * in x0, which in !RESET_TO_BL31 case loaded after base of non shared
215 	 * SRAM(after 4KB offset of SRAM). But in RESET_TO_BL31 case all non
216 	 * shared SRAM is allocated to BL31, so to avoid overwriting of manifest
217 	 * keep it in the last page.
218 	 */
219 	bl32_image_ep_info.args.arg0 = ARM_TRUSTED_SRAM_BASE +
220 				PLAT_ARM_TRUSTED_SRAM_SIZE - PAGE_SIZE;
221 #endif
222 
223 # endif /* BL32_BASE */
224 
225 	/* Populate entry point information for BL33 */
226 	SET_PARAM_HEAD(&bl33_image_ep_info,
227 				PARAM_EP,
228 				VERSION_1,
229 				0);
230 	/*
231 	 * Tell BL31 where the non-trusted software image
232 	 * is located and the entry state information
233 	 */
234 	bl33_image_ep_info.pc = plat_get_ns_image_entrypoint();
235 
236 	bl33_image_ep_info.spsr = arm_get_spsr_for_bl33_entry();
237 	SET_SECURITY_STATE(bl33_image_ep_info.h.attr, NON_SECURE);
238 
239 #if ENABLE_RME
240 	/*
241 	 * Populate entry point information for RMM.
242 	 * Only PC needs to be set as other fields are determined by RMMD.
243 	 */
244 	rmm_image_ep_info.pc = RMM_BASE;
245 #endif /* ENABLE_RME */
246 
247 #else /* RESET_TO_BL31 */
248 
249 	/*
250 	 * In debug builds, we pass a special value in 'plat_params_from_bl2'
251 	 * to verify platform parameters from BL2 to BL31.
252 	 * In release builds, it's not used.
253 	 */
254 	assert(((unsigned long long)plat_params_from_bl2) ==
255 		ARM_BL31_PLAT_PARAM_VAL);
256 
257 	/*
258 	 * Check params passed from BL2 should not be NULL,
259 	 */
260 	bl_params_t *params_from_bl2 = (bl_params_t *)from_bl2;
261 	assert(params_from_bl2 != NULL);
262 	assert(params_from_bl2->h.type == PARAM_BL_PARAMS);
263 	assert(params_from_bl2->h.version >= VERSION_2);
264 
265 	bl_params_node_t *bl_params = params_from_bl2->head;
266 
267 	/*
268 	 * Copy BL33, BL32 and RMM (if present), entry point information.
269 	 * They are stored in Secure RAM, in BL2's address space.
270 	 */
271 	while (bl_params != NULL) {
272 		if (bl_params->image_id == BL32_IMAGE_ID) {
273 			bl32_image_ep_info = *bl_params->ep_info;
274 #if SPMC_AT_EL3
275 			/*
276 			 * Populate the BL32 image base, size and max limit in
277 			 * the entry point information, since there is no
278 			 * platform function to retrieve them in generic
279 			 * code. We choose arg2, arg3 and arg4 since the generic
280 			 * code uses arg1 for stashing the SP manifest size. The
281 			 * SPMC setup uses these arguments to update SP manifest
282 			 * with actual SP's base address and it size.
283 			 */
284 			bl32_image_ep_info.args.arg2 =
285 				bl_params->image_info->image_base;
286 			bl32_image_ep_info.args.arg3 =
287 				bl_params->image_info->image_size;
288 			bl32_image_ep_info.args.arg4 =
289 				bl_params->image_info->image_base +
290 				bl_params->image_info->image_max_size;
291 #endif
292 		}
293 #if ENABLE_RME
294 		else if (bl_params->image_id == RMM_IMAGE_ID) {
295 			rmm_image_ep_info = *bl_params->ep_info;
296 		}
297 #endif
298 		else if (bl_params->image_id == BL33_IMAGE_ID) {
299 			bl33_image_ep_info = *bl_params->ep_info;
300 		}
301 
302 		bl_params = bl_params->next_params_info;
303 	}
304 
305 	if (bl33_image_ep_info.pc == 0U)
306 		panic();
307 #if ENABLE_RME
308 	if (rmm_image_ep_info.pc == 0U)
309 		panic();
310 #endif
311 #endif /* RESET_TO_BL31 */
312 
313 # if ARM_LINUX_KERNEL_AS_BL33
314 	/*
315 	 * According to the file ``Documentation/arm64/booting.txt`` of the
316 	 * Linux kernel tree, Linux expects the physical address of the device
317 	 * tree blob (DTB) in x0, while x1-x3 are reserved for future use and
318 	 * must be 0.
319 	 * Repurpose the option to load Hafnium hypervisor in the normal world.
320 	 * It expects its manifest address in x0. This is essentially the linux
321 	 * dts (passed to the primary VM) by adding 'hypervisor' and chosen
322 	 * nodes specifying the Hypervisor configuration.
323 	 */
324 #if RESET_TO_BL31
325 	bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE;
326 #else
327 	bl33_image_ep_info.args.arg0 = (u_register_t)hw_config;
328 #endif
329 	bl33_image_ep_info.args.arg1 = 0U;
330 	bl33_image_ep_info.args.arg2 = 0U;
331 	bl33_image_ep_info.args.arg3 = 0U;
332 # endif
333 }
334 #endif
335 
bl31_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)336 void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1,
337 		u_register_t arg2, u_register_t arg3)
338 {
339 #if TRANSFER_LIST
340 	arm_bl31_early_platform_setup(arg0, arg1, arg2, arg3);
341 #else
342 	arm_bl31_early_platform_setup((void *)arg0, arg1, arg2, (void *)arg3);
343 #endif
344 
345 	/*
346 	 * Initialize Interconnect for this cluster during cold boot.
347 	 * No need for locks as no other CPU is active.
348 	 */
349 	plat_arm_interconnect_init();
350 
351 	/*
352 	 * Enable Interconnect coherency for the primary CPU's cluster.
353 	 * Earlier bootloader stages might already do this (e.g. Trusted
354 	 * Firmware's BL1 does it) but we can't assume so. There is no harm in
355 	 * executing this code twice anyway.
356 	 * Platform specific PSCI code will enable coherency for other
357 	 * clusters.
358 	 */
359 	plat_arm_interconnect_enter_coherency();
360 }
361 
362 /*******************************************************************************
363  * Perform any BL31 platform setup common to ARM standard platforms
364  ******************************************************************************/
arm_bl31_platform_setup(void)365 void arm_bl31_platform_setup(void)
366 {
367 	struct transfer_list_entry *te __unused;
368 
369 #if TRANSFER_LIST && !RESET_TO_BL31
370 	ns_tl = transfer_list_init((void *)FW_NS_HANDOFF_BASE,
371 				   PLAT_ARM_FW_HANDOFF_SIZE);
372 
373 	if (ns_tl == NULL) {
374 		ERROR("Non-secure transfer list initialisation failed!");
375 		panic();
376 	}
377 
378 	te = transfer_list_find(secure_tl, TL_TAG_FDT);
379 	assert(te != NULL);
380 
381 	/*
382 	 * A pre-existing assumption is that FCONF is unsupported w/ RESET_TO_BL2 and
383 	 * RESET_TO_BL31. In the case of RESET_TO_BL31 this makes sense because there
384 	 * isn't a prior stage to load the device tree, but the reasoning for RESET_TO_BL2 is
385 	 * less clear. For the moment hardware properties that would normally be
386 	 * derived from the DT are statically defined.
387 	 */
388 #if !RESET_TO_BL2
389 	fconf_populate("HW_CONFIG", (uintptr_t)transfer_list_entry_data(te));
390 #endif
391 
392 	te = transfer_list_add(ns_tl, TL_TAG_FDT, te->data_size,
393 			       transfer_list_entry_data(te));
394 	assert(te != NULL);
395 #endif /* TRANSFER_LIST */
396 
397 	/* Initialize the GIC driver, cpu and distributor interfaces */
398 	plat_arm_gic_driver_init();
399 	plat_arm_gic_init();
400 
401 #if RESET_TO_BL31
402 	/*
403 	 * Do initial security configuration to allow DRAM/device access
404 	 * (if earlier BL has not already done so).
405 	 */
406 	plat_arm_security_setup();
407 
408 #if defined(PLAT_ARM_MEM_PROT_ADDR)
409 	arm_nor_psci_do_dyn_mem_protect();
410 #endif /* PLAT_ARM_MEM_PROT_ADDR */
411 
412 #endif /* RESET_TO_BL31 */
413 
414 	/* Enable and initialize the System level generic timer */
415 	mmio_write_32(ARM_SYS_CNTCTL_BASE + CNTCR_OFF,
416 			CNTCR_FCREQ(0U) | CNTCR_EN);
417 
418 	/* Allow access to the System counter timer module */
419 	arm_configure_sys_timer();
420 
421 	/* Initialize power controller before setting up topology */
422 	plat_arm_pwrc_setup();
423 
424 #if ENABLE_FEAT_RAS && FFH_SUPPORT
425 	ras_init();
426 #endif
427 
428 #if USE_DEBUGFS
429 	debugfs_init();
430 #endif /* USE_DEBUGFS */
431 }
432 
433 /*******************************************************************************
434  * Perform any BL31 platform runtime setup prior to BL31 exit common to ARM
435  * standard platforms
436  ******************************************************************************/
arm_bl31_plat_runtime_setup(void)437 void arm_bl31_plat_runtime_setup(void)
438 {
439 	struct transfer_list_entry *te __unused;
440 	/* Initialize the runtime console */
441 	arm_console_runtime_init();
442 
443 #if TRANSFER_LIST && !RESET_TO_BL31
444 	/*
445 	 * We assume BL31 has added all TE's required by BL33 at this stage, ensure
446 	 * that data is visible to all observers by performing a flush operation, so
447 	 * they can access the updated data even if caching is not enabled.
448 	 */
449 	flush_dcache_range((uintptr_t)ns_tl, ns_tl->size);
450 #endif /* TRANSFER_LIST && !(RESET_TO_BL2 || RESET_TO_BL31) */
451 
452 #if RECLAIM_INIT_CODE
453 	arm_free_init_memory();
454 #endif
455 
456 #if PLAT_RO_XLAT_TABLES
457 	arm_xlat_make_tables_readonly();
458 #endif
459 }
460 
461 #if RECLAIM_INIT_CODE
462 /*
463  * Make memory for image boot time code RW to reclaim it as stack for the
464  * secondary cores, or RO where it cannot be reclaimed:
465  *
466  *            |-------- INIT SECTION --------|
467  *  -----------------------------------------
468  * |  CORE 0  |  CORE 1  |  CORE 2  | EXTRA  |
469  * |  STACK   |  STACK   |  STACK   | SPACE  |
470  *  -----------------------------------------
471  *             <-------------------> <------>
472  *                MAKE RW AND XN       MAKE
473  *                  FOR STACKS       RO AND XN
474  */
arm_free_init_memory(void)475 void arm_free_init_memory(void)
476 {
477 	int ret = 0;
478 
479 	if (BL_STACKS_END < BL_INIT_CODE_END) {
480 		/* Reclaim some of the init section as stack if possible. */
481 		if (BL_INIT_CODE_BASE < BL_STACKS_END) {
482 			ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
483 					BL_STACKS_END - BL_INIT_CODE_BASE,
484 					MT_RW_DATA);
485 		}
486 		/* Make the rest of the init section read-only. */
487 		ret |= xlat_change_mem_attributes(BL_STACKS_END,
488 				BL_INIT_CODE_END - BL_STACKS_END,
489 				MT_RO_DATA);
490 	} else {
491 		/* The stacks cover the init section, so reclaim it all. */
492 		ret |= xlat_change_mem_attributes(BL_INIT_CODE_BASE,
493 				BL_INIT_CODE_END - BL_INIT_CODE_BASE,
494 				MT_RW_DATA);
495 	}
496 
497 	if (ret != 0) {
498 		ERROR("Could not reclaim initialization code");
499 		panic();
500 	}
501 }
502 #endif
503 
bl31_platform_setup(void)504 void __init bl31_platform_setup(void)
505 {
506 	arm_bl31_platform_setup();
507 }
508 
bl31_plat_runtime_setup(void)509 void bl31_plat_runtime_setup(void)
510 {
511 	arm_bl31_plat_runtime_setup();
512 }
513 
514 /*******************************************************************************
515  * Perform the very early platform specific architectural setup shared between
516  * ARM standard platforms. This only does basic initialization. Later
517  * architectural setup (bl31_arch_setup()) does not do anything platform
518  * specific.
519  ******************************************************************************/
arm_bl31_plat_arch_setup(void)520 void __init arm_bl31_plat_arch_setup(void)
521 {
522 	const mmap_region_t bl_regions[] = {
523 		MAP_BL31_TOTAL,
524 #if ENABLE_RME
525 		ARM_MAP_L0_GPT_REGION,
526 #endif
527 #if RECLAIM_INIT_CODE
528 		MAP_BL_INIT_CODE,
529 #endif
530 #if SEPARATE_NOBITS_REGION
531 		MAP_BL31_NOBITS,
532 #endif
533 		ARM_MAP_BL_RO,
534 #if USE_ROMLIB
535 		ARM_MAP_ROMLIB_CODE,
536 		ARM_MAP_ROMLIB_DATA,
537 #endif
538 #if USE_COHERENT_MEM
539 		ARM_MAP_BL_COHERENT_RAM,
540 #endif
541 		{0}
542 	};
543 
544 	setup_page_tables(bl_regions, plat_arm_get_mmap());
545 
546 	enable_mmu_el3(0);
547 
548 #if ENABLE_RME
549 #if RESET_TO_BL31
550 	/*  initialize GPT only when RME is enabled. */
551 	assert(is_feat_rme_present());
552 
553 	/* Initialise and enable granule protection after MMU. */
554 	arm_gpt_setup();
555 #endif /* RESET_TO_BL31 */
556 	/*
557 	 * Initialise Granule Protection library and enable GPC for the primary
558 	 * processor. The tables have already been initialized by a previous BL
559 	 * stage, so there is no need to provide any PAS here. This function
560 	 * sets up pointers to those tables.
561 	 */
562 	if (gpt_runtime_init() < 0) {
563 		ERROR("gpt_runtime_init() failed!\n");
564 		panic();
565 	}
566 #endif /* ENABLE_RME */
567 
568 	arm_setup_romlib();
569 }
570 
bl31_plat_arch_setup(void)571 void __init bl31_plat_arch_setup(void)
572 {
573 	arm_bl31_plat_arch_setup();
574 }
575