1 /*
2 * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <string.h>
9
10 #include <libfdt.h>
11
12 #include <platform_def.h>
13
14 #include <arch_features.h>
15 #include <arch_helpers.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include <common/desc_image_load.h>
19 #include <common/fdt_fixup.h>
20 #include <common/fdt_wrappers.h>
21 #include <lib/optee_utils.h>
22 #include <lib/transfer_list.h>
23 #include <lib/utils.h>
24 #include <plat/common/platform.h>
25 #if ENABLE_RME
26 #include <qemu_pas_def.h>
27 #endif
28
29 #include "qemu_private.h"
30
31 #define MAP_BL2_TOTAL MAP_REGION_FLAT( \
32 bl2_tzram_layout.total_base, \
33 bl2_tzram_layout.total_size, \
34 MT_MEMORY | MT_RW | EL3_PAS)
35
36 #define MAP_BL2_RO MAP_REGION_FLAT( \
37 BL_CODE_BASE, \
38 BL_CODE_END - BL_CODE_BASE, \
39 MT_CODE | EL3_PAS), \
40 MAP_REGION_FLAT( \
41 BL_RO_DATA_BASE, \
42 BL_RO_DATA_END \
43 - BL_RO_DATA_BASE, \
44 MT_RO_DATA | EL3_PAS)
45
46 #if USE_COHERENT_MEM
47 #define MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \
48 BL_COHERENT_RAM_BASE, \
49 BL_COHERENT_RAM_END \
50 - BL_COHERENT_RAM_BASE, \
51 MT_DEVICE | MT_RW | EL3_PAS)
52 #endif
53
54 /* Data structure which holds the extents of the trusted SRAM for BL2 */
55 static meminfo_t bl2_tzram_layout __aligned(CACHE_WRITEBACK_GRANULE);
56 static struct transfer_list_header *bl2_tl;
57
bl2_early_platform_setup2(u_register_t arg0,u_register_t arg1,u_register_t arg2,u_register_t arg3)58 void bl2_early_platform_setup2(u_register_t arg0, u_register_t arg1,
59 u_register_t arg2, u_register_t arg3)
60 {
61 meminfo_t *mem_layout = (void *)arg1;
62
63 /* Initialize the console to provide early debug support */
64 qemu_console_init();
65
66 /* Setup the BL2 memory layout */
67 bl2_tzram_layout = *mem_layout;
68
69 plat_qemu_io_setup();
70 }
71
security_setup(void)72 static void security_setup(void)
73 {
74 /*
75 * This is where a TrustZone address space controller and other
76 * security related peripherals, would be configured.
77 */
78 }
79
80 #if defined (SPD_trusty) || defined(SPD_spmd)
81
82 #define GIC_SPI 0
83 #define GIC_PPI 1
84
spd_add_dt_node(void * fdt)85 static int spd_add_dt_node(void *fdt)
86 {
87 int offs, trusty_offs, root_offs;
88 int gic, ipi;
89 int len;
90 const uint32_t *prop;
91
92 if (fdt_path_offset(fdt, "/trusty") >= 0) {
93 WARN("Trusty Device Tree node already exists!\n");
94 return 0;
95 }
96
97 offs = fdt_node_offset_by_compatible(fdt, -1, "arm,cortex-a15-gic");
98 if (offs < 0)
99 offs = fdt_node_offset_by_compatible(fdt, -1, "arm,gic-v3");
100
101 if (offs < 0)
102 return -1;
103 gic = fdt_get_phandle(fdt, offs);
104 if (!gic) {
105 WARN("Failed to get gic phandle\n");
106 return -1;
107 }
108 INFO("Found gic phandle 0x%x\n", gic);
109
110 offs = fdt_path_offset(fdt, "/");
111 if (offs < 0)
112 return -1;
113 root_offs = offs;
114
115 /* CustomIPI node for pre 5.10 linux driver */
116 offs = fdt_add_subnode(fdt, offs, "interrupt-controller");
117 if (offs < 0)
118 return -1;
119 ipi = fdt_get_max_phandle(fdt) + 1;
120 if (fdt_setprop_u32(fdt, offs, "phandle", 1))
121 return -1;
122 INFO("Found ipi phandle 0x%x\n", ipi);
123
124 ipi = fdt_get_phandle(fdt, offs);
125 if (!ipi) {
126 WARN("Failed to get ipi phandle\n");
127 return -1;
128 }
129
130 if (fdt_appendprop_string(fdt, offs, "compatible", "android,CustomIPI"))
131 return -1;
132 if (fdt_setprop_u32(fdt, offs, "#interrupt-cells", 1))
133 return -1;
134 if (fdt_setprop_u32(fdt, offs, "interrupt-controller", 0))
135 return -1;
136
137 #ifdef SPD_spmd
138 offs = fdt_add_subnode(fdt, root_offs, "trusty-ffa");
139 if (offs < 0)
140 return -1;
141 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-ffa-v1"))
142 return -1;
143
144 offs = fdt_add_subnode(fdt, offs, "trusty-core");
145 if (offs < 0)
146 return -1;
147 trusty_offs = offs;
148
149 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-core-v1"))
150 return -1;
151 #else
152 offs = fdt_add_subnode(fdt, root_offs, "trusty");
153 if (offs < 0)
154 return -1;
155 trusty_offs = offs;
156
157 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-smc-v1"))
158 return -1;
159 #endif
160 if (fdt_setprop_u32(fdt, offs, "ranges", 0))
161 return -1;
162 if (fdt_setprop_u32(fdt, offs, "#address-cells", 2))
163 return -1;
164 if (fdt_setprop_u32(fdt, offs, "#size-cells", 2))
165 return -1;
166
167 offs = fdt_add_subnode(fdt, trusty_offs, "irq");
168 if (offs < 0)
169 return -1;
170 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-irq-v1"))
171 return -1;
172 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", ipi))
173 return -1;
174 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 0))
175 return -1;
176 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", gic))
177 return -1;
178 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 1))
179 return -1;
180 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", GIC_PPI))
181 return -1;
182 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 4))
183 return -1;
184 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", gic))
185 return -1;
186 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 1))
187 return -1;
188 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", GIC_SPI))
189 return -1;
190 if (fdt_appendprop_u32(fdt, offs, "interrupt-templates", 4))
191 return -1;
192
193 /* CustomIPI range for pre 5.10 linux driver */
194 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 0))
195 return -1;
196 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 15))
197 return -1;
198 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 0))
199 return -1;
200
201 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 16))
202 return -1;
203 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 31))
204 return -1;
205 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 1))
206 return -1;
207 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 32))
208 return -1;
209 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 63))
210 return -1;
211 if (fdt_appendprop_u32(fdt, offs, "interrupt-ranges", 2))
212 return -1;
213
214 if (fdt_appendprop_u32(fdt, offs, "ipi-range", 8)) /* beg */
215 return -1;
216 if (fdt_appendprop_u32(fdt, offs, "ipi-range", 15)) /* end */
217 return -1;
218 if (fdt_appendprop_u32(fdt, offs, "ipi-range", 8)) /* ipi_base */
219 return -1;
220
221 offs = fdt_add_subnode(fdt, trusty_offs, "log");
222 if (offs < 0)
223 return -1;
224 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-log-v1"))
225 return -1;
226
227 offs = fdt_add_subnode(fdt, trusty_offs, "test");
228 if (offs < 0)
229 return -1;
230 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-test-v1"))
231 return -1;
232
233 offs = fdt_add_subnode(fdt, trusty_offs, "virtio");
234 if (offs < 0)
235 return -1;
236 if (fdt_appendprop_string(fdt, offs, "compatible", "android,trusty-virtio-v1"))
237 return -1;
238
239 offs = fdt_node_offset_by_compatible(fdt, -1, "arm,armv8-timer");
240 if (offs < 0)
241 offs = fdt_node_offset_by_compatible(fdt, -1, "arm,armv7-timer");
242 if (offs < 0)
243 return -1;
244
245 prop = fdt_getprop(fdt, offs, "interrupts", &len);
246 if (fdt_setprop_inplace_namelen_partial(fdt, offs, "interrupts",
247 strlen("interrupts"), 0,
248 prop + len / 4 / 2, len / 4))
249 return -1;
250
251 return 0;
252 }
253
254 #else
255
spd_add_dt_node(void * fdt)256 static int spd_add_dt_node(void *fdt)
257 {
258 return 0;
259 }
260
261 #endif
262
qemu_dt_fixup_securemem(void * fdt)263 static int qemu_dt_fixup_securemem(void *fdt)
264 {
265 /*
266 * QEMU adds a device tree node for secure memory. Linux fails to ignore
267 * it and will crash when it allocates memory out of this secure memory
268 * region. We currently don't use this node for anything, remove it.
269 */
270
271 int offs;
272 const char *prop;
273 const char memory_device_type[] = "memory";
274
275 offs = -1;
276 while (true) {
277 offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
278 memory_device_type,
279 sizeof(memory_device_type)
280 );
281 if (offs < 0)
282 break;
283
284 prop = fdt_getprop(fdt, offs, "status", NULL);
285 if (prop == NULL)
286 continue;
287 if ((strcmp(prop, "disabled") != 0))
288 continue;
289 prop = fdt_getprop(fdt, offs, "secure-status", NULL);
290 if (prop == NULL)
291 continue;
292 if ((strcmp(prop, "okay") != 0))
293 continue;
294
295 if (fdt_del_node(fdt, offs)) {
296 return -1;
297 }
298 INFO("Removed secure memory node\n");
299 }
300
301 return 0;
302 }
303
update_dt(void)304 static void update_dt(void)
305 {
306 #if TRANSFER_LIST
307 struct transfer_list_entry *te;
308 #endif
309 int ret;
310 void *fdt = (void *)(uintptr_t)ARM_PRELOADED_DTB_BASE;
311
312 ret = fdt_open_into(fdt, fdt, PLAT_QEMU_DT_MAX_SIZE);
313 if (ret < 0) {
314 ERROR("Invalid Device Tree at %p: error %d\n", fdt, ret);
315 return;
316 }
317
318 if (qemu_dt_fixup_securemem(fdt)) {
319 ERROR("Failed to fixup secure-mem Device Tree node\n");
320 return;
321 }
322
323 if (dt_add_psci_node(fdt)) {
324 ERROR("Failed to add PSCI Device Tree node\n");
325 return;
326 }
327
328 if (dt_add_psci_cpu_enable_methods(fdt)) {
329 ERROR("Failed to add PSCI cpu enable methods in Device Tree\n");
330 return;
331 }
332
333 if (spd_add_dt_node(fdt)) {
334 ERROR("Failed to add SPD Device Tree node\n");
335 return;
336 }
337
338 #if ENABLE_RME
339 if (fdt_add_reserved_memory(fdt, "rmm", REALM_DRAM_BASE,
340 REALM_DRAM_SIZE)) {
341 ERROR("Failed to reserve RMM memory in Device Tree\n");
342 return;
343 }
344
345 INFO("Reserved RMM memory [0x%lx, 0x%lx] in Device tree\n",
346 (uintptr_t)REALM_DRAM_BASE,
347 (uintptr_t)REALM_DRAM_BASE + REALM_DRAM_SIZE - 1);
348 #endif
349
350 ret = fdt_pack(fdt);
351 if (ret < 0)
352 ERROR("Failed to pack Device Tree at %p: error %d\n", fdt, ret);
353
354 #if TRANSFER_LIST
355 /* create a TE */
356 te = transfer_list_add(bl2_tl, TL_TAG_FDT, fdt_totalsize(fdt), fdt);
357 if (!te) {
358 ERROR("Failed to add FDT entry to Transfer List\n");
359 return;
360 }
361 #endif
362 }
363
bl2_platform_setup(void)364 void bl2_platform_setup(void)
365 {
366 #if TRANSFER_LIST
367 bl2_tl = transfer_list_init((void *)(uintptr_t)FW_HANDOFF_BASE,
368 FW_HANDOFF_SIZE);
369 if (!bl2_tl) {
370 ERROR("Failed to initialize Transfer List at 0x%lx\n",
371 (unsigned long)FW_HANDOFF_BASE);
372 }
373 #endif
374 security_setup();
375 update_dt();
376
377 /* TODO Initialize timer */
378 }
379
qemu_bl2_sync_transfer_list(void)380 void qemu_bl2_sync_transfer_list(void)
381 {
382 #if TRANSFER_LIST
383 transfer_list_update_checksum(bl2_tl);
384 #endif
385 }
386
387 #if ENABLE_RME
bl2_plat_gpt_setup(void)388 static void bl2_plat_gpt_setup(void)
389 {
390 /*
391 * The GPT library might modify the gpt regions structure to optimize
392 * the layout, so the array cannot be constant.
393 */
394 pas_region_t pas_regions[] = {
395 QEMU_PAS_ROOT,
396 QEMU_PAS_SECURE,
397 QEMU_PAS_GPTS,
398 QEMU_PAS_NS0,
399 QEMU_PAS_REALM,
400 QEMU_PAS_NS1,
401 };
402
403 /*
404 * Initialize entire protected space to GPT_GPI_ANY. With each L0 entry
405 * covering 1GB (currently the only supported option), then covering
406 * 256TB of RAM (48-bit PA) would require a 2MB L0 region. At the
407 * moment we use a 8KB table, which covers 1TB of RAM (40-bit PA).
408 */
409 if (gpt_init_l0_tables(GPCCR_PPS_1TB, PLAT_QEMU_L0_GPT_BASE,
410 PLAT_QEMU_L0_GPT_SIZE +
411 PLAT_QEMU_GPT_BITLOCK_SIZE) < 0) {
412 ERROR("gpt_init_l0_tables() failed!\n");
413 panic();
414 }
415
416 /* Carve out defined PAS ranges. */
417 if (gpt_init_pas_l1_tables(GPCCR_PGS_4K,
418 PLAT_QEMU_L1_GPT_BASE,
419 PLAT_QEMU_L1_GPT_SIZE,
420 pas_regions,
421 (unsigned int)(sizeof(pas_regions) /
422 sizeof(pas_region_t))) < 0) {
423 ERROR("gpt_init_pas_l1_tables() failed!\n");
424 panic();
425 }
426
427 INFO("Enabling Granule Protection Checks\n");
428 if (gpt_enable() < 0) {
429 ERROR("gpt_enable() failed!\n");
430 panic();
431 }
432 }
433 #endif
434
bl2_plat_arch_setup(void)435 void bl2_plat_arch_setup(void)
436 {
437 const mmap_region_t bl_regions[] = {
438 MAP_BL2_TOTAL,
439 MAP_BL2_RO,
440 #if USE_COHERENT_MEM
441 MAP_BL_COHERENT_RAM,
442 #endif
443 #if ENABLE_RME
444 MAP_RMM_DRAM,
445 MAP_GPT_L0_REGION,
446 MAP_GPT_L1_REGION,
447 #endif
448 {0}
449 };
450
451 setup_page_tables(bl_regions, plat_qemu_get_mmap());
452
453 #if ENABLE_RME
454 /* BL2 runs in EL3 when RME enabled. */
455 assert(is_feat_rme_present());
456 enable_mmu_el3(0);
457
458 /* Initialise and enable granule protection after MMU. */
459 bl2_plat_gpt_setup();
460 #else /* ENABLE_RME */
461
462 #ifdef __aarch64__
463 enable_mmu_el1(0);
464 #else
465 enable_mmu_svc_mon(0);
466 #endif
467 #endif /* ENABLE_RME */
468 }
469
470 /*******************************************************************************
471 * Gets SPSR for BL32 entry
472 ******************************************************************************/
qemu_get_spsr_for_bl32_entry(void)473 static uint32_t qemu_get_spsr_for_bl32_entry(void)
474 {
475 #ifdef __aarch64__
476 /*
477 * The Secure Payload Dispatcher service is responsible for
478 * setting the SPSR prior to entry into the BL3-2 image.
479 */
480 return 0;
481 #else
482 return SPSR_MODE32(MODE32_svc, SPSR_T_ARM, SPSR_E_LITTLE,
483 DISABLE_ALL_EXCEPTIONS);
484 #endif
485 }
486
487 /*******************************************************************************
488 * Gets SPSR for BL33 entry
489 ******************************************************************************/
qemu_get_spsr_for_bl33_entry(void)490 static uint32_t qemu_get_spsr_for_bl33_entry(void)
491 {
492 uint32_t spsr;
493 #ifdef __aarch64__
494 unsigned int mode;
495
496 /* Figure out what mode we enter the non-secure world in */
497 mode = (el_implemented(2) != EL_IMPL_NONE) ? MODE_EL2 : MODE_EL1;
498
499 /*
500 * TODO: Consider the possibility of specifying the SPSR in
501 * the FIP ToC and allowing the platform to have a say as
502 * well.
503 */
504 spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
505 #else
506 spsr = SPSR_MODE32(MODE32_svc,
507 plat_get_ns_image_entrypoint() & 0x1,
508 SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
509 #endif
510 return spsr;
511 }
512
513 #if defined(SPD_spmd) && SPMD_SPM_AT_SEL2
load_sps_from_tb_fw_config(struct image_info * image_info)514 static int load_sps_from_tb_fw_config(struct image_info *image_info)
515 {
516 void *dtb = (void *)image_info->image_base;
517 const char *compat_str = "arm,sp";
518 const struct fdt_property *uuid;
519 uint32_t load_addr;
520 const char *name;
521 int sp_node;
522 int node;
523
524 node = fdt_node_offset_by_compatible(dtb, -1, compat_str);
525 if (node < 0) {
526 ERROR("Can't find %s in TB_FW_CONFIG", compat_str);
527 return -1;
528 }
529
530 fdt_for_each_subnode(sp_node, dtb, node) {
531 name = fdt_get_name(dtb, sp_node, NULL);
532 if (name == NULL) {
533 ERROR("Can't get name of node in dtb\n");
534 return -1;
535 }
536 uuid = fdt_get_property(dtb, sp_node, "uuid", NULL);
537 if (uuid == NULL) {
538 ERROR("Can't find property uuid in node %s", name);
539 return -1;
540 }
541 if (fdt_read_uint32(dtb, sp_node, "load-address",
542 &load_addr) < 0) {
543 ERROR("Can't read load-address in node %s", name);
544 return -1;
545 }
546 if (qemu_io_register_sp_pkg(name, uuid->data, load_addr) < 0) {
547 return -1;
548 }
549 }
550
551 return 0;
552 }
553 #endif /*defined(SPD_spmd) && SPMD_SPM_AT_SEL2*/
554
555 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
handoff_pageable_part(uint64_t pagable_part)556 static int handoff_pageable_part(uint64_t pagable_part)
557 {
558 #if TRANSFER_LIST
559 struct transfer_list_entry *te;
560
561 te = transfer_list_add(bl2_tl, TL_TAG_OPTEE_PAGABLE_PART,
562 sizeof(pagable_part), &pagable_part);
563 if (!te) {
564 INFO("Cannot add TE for pageable part\n");
565 return -1;
566 }
567 #endif
568 return 0;
569 }
570 #endif
571
qemu_bl2_handle_post_image_load(unsigned int image_id)572 static int qemu_bl2_handle_post_image_load(unsigned int image_id)
573 {
574 int err = 0;
575 bl_mem_params_node_t *bl_mem_params = get_bl_mem_params_node(image_id);
576 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
577 bl_mem_params_node_t *pager_mem_params = NULL;
578 bl_mem_params_node_t *paged_mem_params = NULL;
579 #endif
580 #if defined(SPD_spmd)
581 bl_mem_params_node_t *bl32_mem_params = NULL;
582 #endif
583 #if TRANSFER_LIST
584 struct transfer_list_header *ns_tl = NULL;
585 #endif
586
587 assert(bl_mem_params);
588
589 switch (image_id) {
590 #if TRANSFER_LIST
591 case BL31_IMAGE_ID:
592 /*
593 * arg0 is a bl_params_t reserved for bl31_early_platform_setup2
594 * we just need arg1 and arg3 for BL31 to update the TL from S
595 * to NS memory before it exits
596 */
597 #ifdef __aarch64__
598 if (GET_RW(bl_mem_params->ep_info.spsr) == MODE_RW_64) {
599 bl_mem_params->ep_info.args.arg1 =
600 TRANSFER_LIST_HANDOFF_X1_VALUE(REGISTER_CONVENTION_VERSION);
601 } else
602 #endif
603 {
604 bl_mem_params->ep_info.args.arg1 =
605 TRANSFER_LIST_HANDOFF_R1_VALUE(REGISTER_CONVENTION_VERSION);
606 }
607
608 bl_mem_params->ep_info.args.arg3 = (uintptr_t)bl2_tl;
609 break;
610 #endif
611 case BL32_IMAGE_ID:
612 #if defined(SPD_opteed) || defined(AARCH32_SP_OPTEE) || defined(SPMC_OPTEE)
613 pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
614 assert(pager_mem_params);
615
616 paged_mem_params = get_bl_mem_params_node(BL32_EXTRA2_IMAGE_ID);
617 assert(paged_mem_params);
618
619 err = parse_optee_header(&bl_mem_params->ep_info,
620 &pager_mem_params->image_info,
621 &paged_mem_params->image_info);
622 if (err != 0) {
623 WARN("OPTEE header parse error.\n");
624 }
625
626 /* add TL_TAG_OPTEE_PAGABLE_PART entry to the TL */
627 if (handoff_pageable_part(bl_mem_params->ep_info.args.arg1)) {
628 return -1;
629 }
630 #endif
631
632 INFO("Handoff to BL32\n");
633 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl32_entry();
634 if (TRANSFER_LIST &&
635 transfer_list_set_handoff_args(bl2_tl,
636 &bl_mem_params->ep_info))
637 break;
638
639 INFO("Using default arguments\n");
640 #if defined(SPMC_OPTEE)
641 /*
642 * Explicit zeroes to unused registers since they may have
643 * been populated by parse_optee_header() above.
644 *
645 * OP-TEE expects system DTB in x2 and TOS_FW_CONFIG in x0,
646 * the latter is filled in below for TOS_FW_CONFIG_ID and
647 * applies to any other SPMC too.
648 */
649 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
650 #elif defined(SPD_opteed)
651 /*
652 * OP-TEE expect to receive DTB address in x2.
653 * This will be copied into x2 by dispatcher.
654 */
655 bl_mem_params->ep_info.args.arg3 = ARM_PRELOADED_DTB_BASE;
656 #elif defined(AARCH32_SP_OPTEE)
657 bl_mem_params->ep_info.args.arg0 =
658 bl_mem_params->ep_info.args.arg1;
659 bl_mem_params->ep_info.args.arg1 = 0;
660 bl_mem_params->ep_info.args.arg2 = ARM_PRELOADED_DTB_BASE;
661 bl_mem_params->ep_info.args.arg3 = 0;
662 #endif
663 break;
664
665 case BL33_IMAGE_ID:
666 #ifdef AARCH32_SP_OPTEE
667 /* AArch32 only core: OP-TEE expects NSec EP in register LR */
668 pager_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
669 assert(pager_mem_params);
670 pager_mem_params->ep_info.lr_svc = bl_mem_params->ep_info.pc;
671 #endif
672
673 bl_mem_params->ep_info.spsr = qemu_get_spsr_for_bl33_entry();
674
675 #if ARM_LINUX_KERNEL_AS_BL33
676 /*
677 * According to the file ``Documentation/arm64/booting.txt`` of
678 * the Linux kernel tree, Linux expects the physical address of
679 * the device tree blob (DTB) in x0, while x1-x3 are reserved
680 * for future use and must be 0.
681 */
682 bl_mem_params->ep_info.args.arg0 =
683 (u_register_t)ARM_PRELOADED_DTB_BASE;
684 bl_mem_params->ep_info.args.arg1 = 0U;
685 bl_mem_params->ep_info.args.arg2 = 0U;
686 bl_mem_params->ep_info.args.arg3 = 0U;
687 #elif TRANSFER_LIST
688 if (bl2_tl) {
689 /* relocate the tl to pre-allocate NS memory */
690 ns_tl = transfer_list_relocate(bl2_tl,
691 (void *)(uintptr_t)FW_NS_HANDOFF_BASE,
692 bl2_tl->max_size);
693 if (!ns_tl) {
694 ERROR("Relocate TL to 0x%lx failed\n",
695 (unsigned long)FW_NS_HANDOFF_BASE);
696 return -1;
697 }
698 }
699
700 INFO("Handoff to BL33\n");
701 if (!transfer_list_set_handoff_args(ns_tl,
702 &bl_mem_params->ep_info)) {
703 INFO("Invalid TL, fallback to default arguments\n");
704 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
705 }
706 #else
707 /* BL33 expects to receive the primary CPU MPID (through r0) */
708 bl_mem_params->ep_info.args.arg0 = 0xffff & read_mpidr();
709 #endif /* ARM_LINUX_KERNEL_AS_BL33 */
710
711 break;
712 #ifdef SPD_spmd
713 #if SPMD_SPM_AT_SEL2
714 case TB_FW_CONFIG_ID:
715 err = load_sps_from_tb_fw_config(&bl_mem_params->image_info);
716 break;
717 #endif
718 case TOS_FW_CONFIG_ID:
719 /* An SPMC expects TOS_FW_CONFIG in x0/r0 */
720 bl32_mem_params = get_bl_mem_params_node(BL32_IMAGE_ID);
721 bl32_mem_params->ep_info.args.arg0 =
722 bl_mem_params->image_info.image_base;
723 break;
724 #endif
725 default:
726 /* Do nothing in default case */
727 break;
728 }
729
730 return err;
731 }
732
733 /*******************************************************************************
734 * This function can be used by the platforms to update/use image
735 * information for given `image_id`.
736 ******************************************************************************/
bl2_plat_handle_post_image_load(unsigned int image_id)737 int bl2_plat_handle_post_image_load(unsigned int image_id)
738 {
739 return qemu_bl2_handle_post_image_load(image_id);
740 }
741
plat_get_ns_image_entrypoint(void)742 uintptr_t plat_get_ns_image_entrypoint(void)
743 {
744 return NS_IMAGE_OFFSET;
745 }
746