• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020-2024, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch/aarch64/arch_features.h>
15 #include <bl31/bl31.h>
16 #include <bl31/interrupt_mgmt.h>
17 #include <common/debug.h>
18 #include <common/runtime_svc.h>
19 #include <common/tbbr/tbbr_img_def.h>
20 #include <lib/el3_runtime/context_mgmt.h>
21 #include <lib/fconf/fconf.h>
22 #include <lib/fconf/fconf_dyn_cfg_getter.h>
23 #include <lib/smccc.h>
24 #include <lib/spinlock.h>
25 #include <lib/utils.h>
26 #include <lib/xlat_tables/xlat_tables_v2.h>
27 #include <plat/common/common_def.h>
28 #include <plat/common/platform.h>
29 #include <platform_def.h>
30 #include <services/el3_spmd_logical_sp.h>
31 #include <services/ffa_svc.h>
32 #include <services/spmc_svc.h>
33 #include <services/spmd_svc.h>
34 #include <smccc_helpers.h>
35 #include "spmd_private.h"
36 
37 /*******************************************************************************
38  * SPM Core context information.
39  ******************************************************************************/
40 static spmd_spm_core_context_t spm_core_context[PLATFORM_CORE_COUNT];
41 
42 /*******************************************************************************
43  * SPM Core attribute information is read from its manifest if the SPMC is not
44  * at EL3. Else, it is populated from the SPMC directly.
45  ******************************************************************************/
46 static spmc_manifest_attribute_t spmc_attrs;
47 
48 /*******************************************************************************
49  * FFA version used by nonsecure endpoint
50  ******************************************************************************/
51 static uint32_t nonsecure_ffa_version;
52 
53 /*******************************************************************************
54  * SPM Core entry point information. Discovered on the primary core and reused
55  * on secondary cores.
56  ******************************************************************************/
57 static entry_point_info_t *spmc_ep_info;
58 
59 /*******************************************************************************
60  * SPM Core context on current CPU get helper.
61  ******************************************************************************/
spmd_get_context(void)62 spmd_spm_core_context_t *spmd_get_context(void)
63 {
64 	return &spm_core_context[plat_my_core_pos()];
65 }
66 
67 /*******************************************************************************
68  * SPM Core ID getter.
69  ******************************************************************************/
spmd_spmc_id_get(void)70 uint16_t spmd_spmc_id_get(void)
71 {
72 	return spmc_attrs.spmc_id;
73 }
74 
75 /*******************************************************************************
76  * Static function declaration.
77  ******************************************************************************/
78 static int32_t spmd_init(void);
79 static int spmd_spmc_init(void *pm_addr);
80 
81 static uint64_t spmd_smc_forward(uint32_t smc_fid,
82 				 bool secure_origin,
83 				 uint64_t x1,
84 				 uint64_t x2,
85 				 uint64_t x3,
86 				 uint64_t x4,
87 				 void *cookie,
88 				 void *handle,
89 				 uint64_t flags);
90 
91 /******************************************************************************
92  * Builds an SPMD to SPMC direct message request.
93  *****************************************************************************/
spmd_build_spmc_message(gp_regs_t * gpregs,uint8_t target_func,unsigned long long message)94 void spmd_build_spmc_message(gp_regs_t *gpregs, uint8_t target_func,
95 			     unsigned long long message)
96 {
97 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_MSG_SEND_DIRECT_REQ_SMC32);
98 	write_ctx_reg(gpregs, CTX_GPREG_X1,
99 		(SPMD_DIRECT_MSG_ENDPOINT_ID << FFA_DIRECT_MSG_SOURCE_SHIFT) |
100 		 spmd_spmc_id_get());
101 	write_ctx_reg(gpregs, CTX_GPREG_X2, BIT(31) | target_func);
102 	write_ctx_reg(gpregs, CTX_GPREG_X3, message);
103 
104 	/* Zero out x4-x7 for the direct request emitted towards the SPMC. */
105 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
106 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
107 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
108 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
109 }
110 
111 
112 /*******************************************************************************
113  * This function takes an SPMC context pointer and performs a synchronous
114  * SPMC entry.
115  ******************************************************************************/
spmd_spm_core_sync_entry(spmd_spm_core_context_t * spmc_ctx)116 uint64_t spmd_spm_core_sync_entry(spmd_spm_core_context_t *spmc_ctx)
117 {
118 	uint64_t rc;
119 
120 	assert(spmc_ctx != NULL);
121 
122 	cm_set_context(&(spmc_ctx->cpu_ctx), SECURE);
123 
124 	/* Restore the context assigned above */
125 #if SPMD_SPM_AT_SEL2
126 	cm_el2_sysregs_context_restore(SECURE);
127 #else
128 	cm_el1_sysregs_context_restore(SECURE);
129 #endif
130 	cm_set_next_eret_context(SECURE);
131 
132 	/* Enter SPMC */
133 	rc = spmd_spm_core_enter(&spmc_ctx->c_rt_ctx);
134 
135 	/* Save secure state */
136 #if SPMD_SPM_AT_SEL2
137 	cm_el2_sysregs_context_save(SECURE);
138 #else
139 	cm_el1_sysregs_context_save(SECURE);
140 #endif
141 
142 	return rc;
143 }
144 
145 /*******************************************************************************
146  * This function returns to the place where spmd_spm_core_sync_entry() was
147  * called originally.
148  ******************************************************************************/
spmd_spm_core_sync_exit(uint64_t rc)149 __dead2 void spmd_spm_core_sync_exit(uint64_t rc)
150 {
151 	spmd_spm_core_context_t *ctx = spmd_get_context();
152 
153 	/* Get current CPU context from SPMC context */
154 	assert(cm_get_context(SECURE) == &(ctx->cpu_ctx));
155 
156 	/*
157 	 * The SPMD must have initiated the original request through a
158 	 * synchronous entry into SPMC. Jump back to the original C runtime
159 	 * context with the value of rc in x0;
160 	 */
161 	spmd_spm_core_exit(ctx->c_rt_ctx, rc);
162 
163 	panic();
164 }
165 
166 /*******************************************************************************
167  * Jump to the SPM Core for the first time.
168  ******************************************************************************/
spmd_init(void)169 static int32_t spmd_init(void)
170 {
171 	spmd_spm_core_context_t *ctx = spmd_get_context();
172 	uint64_t rc;
173 
174 	VERBOSE("SPM Core init start.\n");
175 
176 	/* Primary boot core enters the SPMC for initialization. */
177 	ctx->state = SPMC_STATE_ON_PENDING;
178 
179 	rc = spmd_spm_core_sync_entry(ctx);
180 	if (rc != 0ULL) {
181 		ERROR("SPMC initialisation failed 0x%" PRIx64 "\n", rc);
182 		return 0;
183 	}
184 
185 	ctx->state = SPMC_STATE_ON;
186 
187 	VERBOSE("SPM Core init end.\n");
188 
189 	spmd_logical_sp_set_spmc_initialized();
190 	rc = spmd_logical_sp_init();
191 	if (rc != 0) {
192 		WARN("SPMD Logical partitions failed init.\n");
193 	}
194 
195 	return 1;
196 }
197 
198 /*******************************************************************************
199  * spmd_secure_interrupt_handler
200  * Enter the SPMC for further handling of the secure interrupt by the SPMC
201  * itself or a Secure Partition.
202  ******************************************************************************/
spmd_secure_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)203 static uint64_t spmd_secure_interrupt_handler(uint32_t id,
204 					      uint32_t flags,
205 					      void *handle,
206 					      void *cookie)
207 {
208 	spmd_spm_core_context_t *ctx = spmd_get_context();
209 	gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
210 	int64_t rc;
211 
212 	/* Sanity check the security state when the exception was generated */
213 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
214 
215 	/* Sanity check the pointer to this cpu's context */
216 	assert(handle == cm_get_context(NON_SECURE));
217 
218 	/* Save the non-secure context before entering SPMC */
219 #if SPMD_SPM_AT_SEL2
220 	cm_el2_sysregs_context_save(NON_SECURE);
221 #else
222 	cm_el1_sysregs_context_save(NON_SECURE);
223 
224 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
225 	/*
226 	 * The hint bit denoting absence of SVE live state is effectively false
227 	 * in this scenario where execution was trapped to EL3 due to FIQ.
228 	 */
229 	simd_ctx_save(NON_SECURE, false);
230 #endif
231 #endif
232 
233 	/* Convey the event to the SPMC through the FFA_INTERRUPT interface. */
234 	write_ctx_reg(gpregs, CTX_GPREG_X0, FFA_INTERRUPT);
235 	write_ctx_reg(gpregs, CTX_GPREG_X1, 0);
236 	write_ctx_reg(gpregs, CTX_GPREG_X2, 0);
237 	write_ctx_reg(gpregs, CTX_GPREG_X3, 0);
238 	write_ctx_reg(gpregs, CTX_GPREG_X4, 0);
239 	write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
240 	write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
241 	write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
242 
243 	/* Mark current core as handling a secure interrupt. */
244 	ctx->secure_interrupt_ongoing = true;
245 
246 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
247 	simd_ctx_restore(SECURE);
248 #endif
249 	rc = spmd_spm_core_sync_entry(ctx);
250 
251 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
252 	simd_ctx_save(SECURE, false);
253 #endif
254 	if (rc != 0ULL) {
255 		ERROR("%s failed (%" PRId64 ") on CPU%u\n", __func__, rc, plat_my_core_pos());
256 	}
257 
258 	ctx->secure_interrupt_ongoing = false;
259 
260 #if SPMD_SPM_AT_SEL2
261 	cm_el2_sysregs_context_restore(NON_SECURE);
262 #else
263 	cm_el1_sysregs_context_restore(NON_SECURE);
264 
265 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
266 	simd_ctx_restore(NON_SECURE);
267 #endif
268 #endif
269 	cm_set_next_eret_context(NON_SECURE);
270 
271 	SMC_RET0(&ctx->cpu_ctx);
272 }
273 
274 #if (EL3_EXCEPTION_HANDLING == 0)
275 /*******************************************************************************
276  * spmd_group0_interrupt_handler_nwd
277  * Group0 secure interrupt in the normal world are trapped to EL3. Delegate the
278  * handling of the interrupt to the platform handler, and return only upon
279  * successfully handling the Group0 interrupt.
280  ******************************************************************************/
spmd_group0_interrupt_handler_nwd(uint32_t id,uint32_t flags,void * handle,void * cookie)281 static uint64_t spmd_group0_interrupt_handler_nwd(uint32_t id,
282 						  uint32_t flags,
283 						  void *handle,
284 						  void *cookie)
285 {
286 	uint32_t intid;
287 
288 	/* Sanity check the security state when the exception was generated. */
289 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
290 
291 	/* Sanity check the pointer to this cpu's context. */
292 	assert(handle == cm_get_context(NON_SECURE));
293 
294 	assert(id == INTR_ID_UNAVAILABLE);
295 
296 	assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
297 
298 	intid = plat_ic_acknowledge_interrupt();
299 
300 	if (plat_spmd_handle_group0_interrupt(intid) < 0) {
301 		ERROR("Group0 interrupt %u not handled\n", intid);
302 		panic();
303 	}
304 
305 	/* Deactivate the corresponding Group0 interrupt. */
306 	plat_ic_end_of_interrupt(intid);
307 
308 	return 0U;
309 }
310 #endif
311 
312 /*******************************************************************************
313  * spmd_handle_group0_intr_swd
314  * SPMC delegates handling of Group0 secure interrupt to EL3 firmware using
315  * FFA_EL3_INTR_HANDLE SMC call. Further, SPMD delegates the handling of the
316  * interrupt to the platform handler, and returns only upon successfully
317  * handling the Group0 interrupt.
318  ******************************************************************************/
spmd_handle_group0_intr_swd(void * handle)319 static uint64_t spmd_handle_group0_intr_swd(void *handle)
320 {
321 	uint32_t intid;
322 
323 	/* Sanity check the pointer to this cpu's context */
324 	assert(handle == cm_get_context(SECURE));
325 
326 	assert(plat_ic_get_pending_interrupt_type() == INTR_TYPE_EL3);
327 
328 	intid = plat_ic_acknowledge_interrupt();
329 
330 	/*
331 	 * TODO: Currently due to a limitation in SPMD implementation, the
332 	 * platform handler is expected to not delegate handling to NWd while
333 	 * processing Group0 secure interrupt.
334 	 */
335 	if (plat_spmd_handle_group0_interrupt(intid) < 0) {
336 		/* Group0 interrupt was not handled by the platform. */
337 		ERROR("Group0 interrupt %u not handled\n", intid);
338 		panic();
339 	}
340 
341 	/* Deactivate the corresponding Group0 interrupt. */
342 	plat_ic_end_of_interrupt(intid);
343 
344 	/* Return success. */
345 	SMC_RET8(handle, FFA_SUCCESS_SMC32, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
346 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
347 		 FFA_PARAM_MBZ);
348 }
349 
350 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
spmd_dynamic_map_mem(uintptr_t base_addr,size_t size,unsigned int attr,uintptr_t * align_addr,size_t * align_size)351 static int spmd_dynamic_map_mem(uintptr_t base_addr, size_t size,
352 				 unsigned int attr, uintptr_t *align_addr,
353 				 size_t *align_size)
354 {
355 	uintptr_t base_addr_align;
356 	size_t mapped_size_align;
357 	int rc;
358 
359 	/* Page aligned address and size if necessary */
360 	base_addr_align = page_align(base_addr, DOWN);
361 	mapped_size_align = page_align(size, UP);
362 
363 	if ((base_addr != base_addr_align) &&
364 	    (size == mapped_size_align)) {
365 		mapped_size_align += PAGE_SIZE;
366 	}
367 
368 	/*
369 	 * Map dynamically given region with its aligned base address and
370 	 * size
371 	 */
372 	rc = mmap_add_dynamic_region((unsigned long long)base_addr_align,
373 				     base_addr_align,
374 				     mapped_size_align,
375 				     attr);
376 	if (rc == 0) {
377 		*align_addr = base_addr_align;
378 		*align_size = mapped_size_align;
379 	}
380 
381 	return rc;
382 }
383 
spmd_do_sec_cpy(uintptr_t root_base_addr,uintptr_t sec_base_addr,size_t size)384 static void spmd_do_sec_cpy(uintptr_t root_base_addr, uintptr_t sec_base_addr,
385 			    size_t size)
386 {
387 	uintptr_t root_base_addr_align, sec_base_addr_align;
388 	size_t root_mapped_size_align, sec_mapped_size_align;
389 	int rc;
390 
391 	assert(root_base_addr != 0UL);
392 	assert(sec_base_addr != 0UL);
393 	assert(size != 0UL);
394 
395 	/* Map the memory with required attributes */
396 	rc = spmd_dynamic_map_mem(root_base_addr, size, MT_RO_DATA | MT_ROOT,
397 				  &root_base_addr_align,
398 				  &root_mapped_size_align);
399 	if (rc != 0) {
400 		ERROR("%s %s %lu (%d)\n", "Error while mapping", "root region",
401 		      root_base_addr, rc);
402 		panic();
403 	}
404 
405 	rc = spmd_dynamic_map_mem(sec_base_addr, size, MT_RW_DATA | MT_SECURE,
406 				  &sec_base_addr_align, &sec_mapped_size_align);
407 	if (rc != 0) {
408 		ERROR("%s %s %lu (%d)\n", "Error while mapping",
409 		      "secure region", sec_base_addr, rc);
410 		panic();
411 	}
412 
413 	/* Do copy operation */
414 	(void)memcpy((void *)sec_base_addr, (void *)root_base_addr, size);
415 
416 	/* Unmap root memory region */
417 	rc = mmap_remove_dynamic_region(root_base_addr_align,
418 					root_mapped_size_align);
419 	if (rc != 0) {
420 		ERROR("%s %s %lu (%d)\n", "Error while unmapping",
421 		      "root region", root_base_addr_align, rc);
422 		panic();
423 	}
424 
425 	/* Unmap secure memory region */
426 	rc = mmap_remove_dynamic_region(sec_base_addr_align,
427 					sec_mapped_size_align);
428 	if (rc != 0) {
429 		ERROR("%s %s %lu (%d)\n", "Error while unmapping",
430 		      "secure region", sec_base_addr_align, rc);
431 		panic();
432 	}
433 }
434 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
435 
436 /*******************************************************************************
437  * Loads SPMC manifest and inits SPMC.
438  ******************************************************************************/
spmd_spmc_init(void * pm_addr)439 static int spmd_spmc_init(void *pm_addr)
440 {
441 	cpu_context_t *cpu_ctx;
442 	unsigned int core_id;
443 	uint32_t ep_attr, flags;
444 	int rc;
445 	const struct dyn_cfg_dtb_info_t *image_info __unused;
446 
447 	/* Load the SPM Core manifest */
448 	rc = plat_spm_core_manifest_load(&spmc_attrs, pm_addr);
449 	if (rc != 0) {
450 		WARN("No or invalid SPM Core manifest image provided by BL2\n");
451 		return rc;
452 	}
453 
454 	/*
455 	 * Ensure that the SPM Core version is compatible with the SPM
456 	 * Dispatcher version.
457 	 */
458 	if ((spmc_attrs.major_version != FFA_VERSION_MAJOR) ||
459 	    (spmc_attrs.minor_version > FFA_VERSION_MINOR)) {
460 		WARN("Unsupported FFA version (%u.%u)\n",
461 		     spmc_attrs.major_version, spmc_attrs.minor_version);
462 		return -EINVAL;
463 	}
464 
465 	VERBOSE("FFA version (%u.%u)\n", spmc_attrs.major_version,
466 	     spmc_attrs.minor_version);
467 
468 	VERBOSE("SPM Core run time EL%x.\n",
469 	     SPMD_SPM_AT_SEL2 ? MODE_EL2 : MODE_EL1);
470 
471 	/* Validate the SPMC ID, Ensure high bit is set */
472 	if (((spmc_attrs.spmc_id >> SPMC_SECURE_ID_SHIFT) &
473 			SPMC_SECURE_ID_MASK) == 0U) {
474 		WARN("Invalid ID (0x%x) for SPMC.\n", spmc_attrs.spmc_id);
475 		return -EINVAL;
476 	}
477 
478 	/* Validate the SPM Core execution state */
479 	if ((spmc_attrs.exec_state != MODE_RW_64) &&
480 	    (spmc_attrs.exec_state != MODE_RW_32)) {
481 		WARN("Unsupported %s%x.\n", "SPM Core execution state 0x",
482 		     spmc_attrs.exec_state);
483 		return -EINVAL;
484 	}
485 
486 	VERBOSE("%s%x.\n", "SPM Core execution state 0x",
487 		spmc_attrs.exec_state);
488 
489 #if SPMD_SPM_AT_SEL2
490 	/* Ensure manifest has not requested AArch32 state in S-EL2 */
491 	if (spmc_attrs.exec_state == MODE_RW_32) {
492 		WARN("AArch32 state at S-EL2 is not supported.\n");
493 		return -EINVAL;
494 	}
495 
496 	/*
497 	 * Check if S-EL2 is supported on this system if S-EL2
498 	 * is required for SPM
499 	 */
500 	if (!is_feat_sel2_supported()) {
501 		WARN("SPM Core run time S-EL2 is not supported.\n");
502 		return -EINVAL;
503 	}
504 #endif /* SPMD_SPM_AT_SEL2 */
505 
506 	/* Initialise an entrypoint to set up the CPU context */
507 	ep_attr = SECURE | EP_ST_ENABLE;
508 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0ULL) {
509 		ep_attr |= EP_EE_BIG;
510 	}
511 
512 	SET_PARAM_HEAD(spmc_ep_info, PARAM_EP, VERSION_1, ep_attr);
513 
514 	/*
515 	 * Populate SPSR for SPM Core based upon validated parameters from the
516 	 * manifest.
517 	 */
518 	if (spmc_attrs.exec_state == MODE_RW_32) {
519 		spmc_ep_info->spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM,
520 						 SPSR_E_LITTLE,
521 						 DAIF_FIQ_BIT |
522 						 DAIF_IRQ_BIT |
523 						 DAIF_ABT_BIT);
524 	} else {
525 
526 #if SPMD_SPM_AT_SEL2
527 		static const uint32_t runtime_el = MODE_EL2;
528 #else
529 		static const uint32_t runtime_el = MODE_EL1;
530 #endif
531 		spmc_ep_info->spsr = SPSR_64(runtime_el,
532 					     MODE_SP_ELX,
533 					     DISABLE_ALL_EXCEPTIONS);
534 	}
535 
536 #if ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31
537 	image_info = FCONF_GET_PROPERTY(dyn_cfg, dtb, TOS_FW_CONFIG_ID);
538 	assert(image_info != NULL);
539 
540 	if ((image_info->config_addr == 0UL) ||
541 	    (image_info->secondary_config_addr == 0UL) ||
542 	    (image_info->config_max_size == 0UL)) {
543 		return -EINVAL;
544 	}
545 
546 	/* Copy manifest from root->secure region */
547 	spmd_do_sec_cpy(image_info->config_addr,
548 			image_info->secondary_config_addr,
549 			image_info->config_max_size);
550 
551 	/* Update ep info of BL32 */
552 	assert(spmc_ep_info != NULL);
553 	spmc_ep_info->args.arg0 = image_info->secondary_config_addr;
554 #endif /* ENABLE_RME && SPMD_SPM_AT_SEL2 && !RESET_TO_BL31 */
555 
556 	/* Set an initial SPMC context state for all cores. */
557 	for (core_id = 0U; core_id < PLATFORM_CORE_COUNT; core_id++) {
558 		spm_core_context[core_id].state = SPMC_STATE_OFF;
559 
560 		/* Setup an initial cpu context for the SPMC. */
561 		cpu_ctx = &spm_core_context[core_id].cpu_ctx;
562 		cm_setup_context(cpu_ctx, spmc_ep_info);
563 
564 		/*
565 		 * Pass the core linear ID to the SPMC through x4.
566 		 * (TF-A implementation defined behavior helping
567 		 * a legacy TOS migration to adopt FF-A).
568 		 */
569 		write_ctx_reg(get_gpregs_ctx(cpu_ctx), CTX_GPREG_X4, core_id);
570 	}
571 
572 	/* Register power management hooks with PSCI */
573 	psci_register_spd_pm_hook(&spmd_pm);
574 
575 	/* Register init function for deferred init. */
576 	bl31_register_bl32_init(&spmd_init);
577 
578 	INFO("SPM Core setup done.\n");
579 
580 	/*
581 	 * Register an interrupt handler routing secure interrupts to SPMD
582 	 * while the NWd is running.
583 	 */
584 	flags = 0;
585 	set_interrupt_rm_flag(flags, NON_SECURE);
586 	rc = register_interrupt_type_handler(INTR_TYPE_S_EL1,
587 					     spmd_secure_interrupt_handler,
588 					     flags);
589 	if (rc != 0) {
590 		panic();
591 	}
592 
593 	/*
594 	 * Permit configurations where the SPM resides at S-EL1/2 and upon a
595 	 * Group0 interrupt triggering while the normal world runs, the
596 	 * interrupt is routed either through the EHF or directly to the SPMD:
597 	 *
598 	 * EL3_EXCEPTION_HANDLING=0: the Group0 interrupt is routed to the SPMD
599 	 *                   for handling by spmd_group0_interrupt_handler_nwd.
600 	 *
601 	 * EL3_EXCEPTION_HANDLING=1: the Group0 interrupt is routed to the EHF.
602 	 *
603 	 */
604 #if (EL3_EXCEPTION_HANDLING == 0)
605 	/*
606 	 * If EL3 interrupts are supported by the platform, register an
607 	 * interrupt handler routing Group0 interrupts to SPMD while the NWd is
608 	 * running.
609 	 */
610 	if (plat_ic_has_interrupt_type(INTR_TYPE_EL3)) {
611 		rc = register_interrupt_type_handler(INTR_TYPE_EL3,
612 						     spmd_group0_interrupt_handler_nwd,
613 						     flags);
614 		if (rc != 0) {
615 			panic();
616 		}
617 	}
618 #endif
619 
620 	return 0;
621 }
622 
623 /*******************************************************************************
624  * Initialize context of SPM Core.
625  ******************************************************************************/
spmd_setup(void)626 int spmd_setup(void)
627 {
628 	int rc;
629 	void *spmc_manifest;
630 
631 	/*
632 	 * If the SPMC is at EL3, then just initialise it directly. The
633 	 * shenanigans of when it is at a lower EL are not needed.
634 	 */
635 	if (is_spmc_at_el3()) {
636 		rc = spmc_setup();
637 		if (rc != 0) {
638 			WARN("SPMC initialisation failed 0x%x.\n", rc);
639 		}
640 
641 		/* Allow the SPMC to populate its attributes directly. */
642 		/*
643 		 * I think it's safe to reorder the calls to
644 		 *  spmc_populate_attrs() and spmc_setup() so that I can
645 		 *  set sp_ffa_version in spmc_populate_attrs()
646 		 */
647 		spmc_populate_attrs(&spmc_attrs);
648 
649 		return 0;
650 	}
651 
652 	spmc_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
653 	if (spmc_ep_info == NULL) {
654 		WARN("No SPM Core image provided by BL2 boot loader.\n");
655 		return 0;
656 	}
657 
658 	/* Under no circumstances will this parameter be 0 */
659 	assert(spmc_ep_info->pc != 0ULL);
660 
661 	/*
662 	 * Check if BL32 ep_info has a reference to 'tos_fw_config'. This will
663 	 * be used as a manifest for the SPM Core at the next lower EL/mode.
664 	 */
665 	spmc_manifest = (void *)spmc_ep_info->args.arg0;
666 	if (spmc_manifest == NULL) {
667 		WARN("Invalid or absent SPM Core manifest.\n");
668 		return 0;
669 	}
670 
671 	/* Load manifest, init SPMC */
672 	rc = spmd_spmc_init(spmc_manifest);
673 	if (rc != 0) {
674 		WARN("Booting device without SPM initialization.\n");
675 	}
676 
677 	return 0;
678 }
679 
680 /*******************************************************************************
681  * Forward FF-A SMCs to the other security state.
682  ******************************************************************************/
spmd_smc_switch_state(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,uint64_t flags)683 uint64_t spmd_smc_switch_state(uint32_t smc_fid,
684 			       bool secure_origin,
685 			       uint64_t x1,
686 			       uint64_t x2,
687 			       uint64_t x3,
688 			       uint64_t x4,
689 			       void *handle,
690 			       uint64_t flags)
691 {
692 	unsigned int secure_state_in = (secure_origin) ? SECURE : NON_SECURE;
693 	unsigned int secure_state_out = (!secure_origin) ? SECURE : NON_SECURE;
694 	void *ctx_out;
695 	uint32_t dst_version;
696 	uint32_t src_version;
697 
698 #if SPMD_SPM_AT_SEL2
699 	if ((secure_state_out == SECURE) && (is_sve_hint_set(flags) == true)) {
700 		/*
701 		 * Set the SVE hint bit in x0 and pass to the lower secure EL,
702 		 * if it was set by the caller.
703 		 */
704 		smc_fid |= (FUNCID_SVE_HINT_MASK << FUNCID_SVE_HINT_SHIFT);
705 	}
706 #endif
707 
708 	/* Save incoming security state */
709 #if SPMD_SPM_AT_SEL2
710 	cm_el2_sysregs_context_save(secure_state_in);
711 #else
712 	cm_el1_sysregs_context_save(secure_state_in);
713 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
714 	/* Forward the hint bit denoting the absence of SVE live state. */
715 	simd_ctx_save(secure_state_in, (!secure_origin && (is_sve_hint_set(flags) == true)));
716 #endif
717 #endif
718 
719 	/* Restore outgoing security state */
720 #if SPMD_SPM_AT_SEL2
721 	cm_el2_sysregs_context_restore(secure_state_out);
722 #else
723 	cm_el1_sysregs_context_restore(secure_state_out);
724 #if CTX_INCLUDE_FPREGS || CTX_INCLUDE_SVE_REGS
725 	simd_ctx_restore(secure_state_out);
726 #endif
727 #endif
728 	cm_set_next_eret_context(secure_state_out);
729 
730 	ctx_out = cm_get_context(secure_state_out);
731 	if (smc_fid == FFA_NORMAL_WORLD_RESUME) {
732 		SMC_RET0(ctx_out);
733 	}
734 
735 	if (secure_origin && is_spmc_at_el3()) {
736 		dst_version = nonsecure_ffa_version;
737 		src_version = spmc_attrs.sp_ffa_version;
738 	} else if (secure_origin && !is_spmc_at_el3()) {
739 		dst_version = nonsecure_ffa_version;
740 		src_version = MAKE_FFA_VERSION(spmc_attrs.major_version,
741 					       spmc_attrs.minor_version);
742 	} else if (!secure_origin && is_spmc_at_el3()) {
743 		dst_version = spmc_attrs.sp_ffa_version;
744 		src_version = nonsecure_ffa_version;
745 	} else {  /*(!secure_origin && !is_spmc_at_el3()) */
746 		dst_version = MAKE_FFA_VERSION(spmc_attrs.major_version,
747 					       spmc_attrs.minor_version);
748 		src_version = nonsecure_ffa_version;
749 	}
750 
751 
752 	if ((GET_SMC_CC(smc_fid) == SMC_64) && (dst_version >= MAKE_FFA_VERSION(U(1), U(2)))) {
753 		if (src_version < MAKE_FFA_VERSION(U(1), U(2))) {
754 			/* FFA version mismatch, with dest >= 1.2 - set outgoing x8-x17 to zero */
755 			SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
756 				  SMC_GET_GP(handle, CTX_GPREG_X5),
757 				  SMC_GET_GP(handle, CTX_GPREG_X6),
758 				  SMC_GET_GP(handle, CTX_GPREG_X7),
759 				  0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
760 		} else {
761 			/* Both FFA versions >= 1.2 - pass incoming x8-x17 to dest */
762 			SMC_RET18(ctx_out, smc_fid, x1, x2, x3, x4,
763 				  SMC_GET_GP(handle, CTX_GPREG_X5),
764 				  SMC_GET_GP(handle, CTX_GPREG_X6),
765 				  SMC_GET_GP(handle, CTX_GPREG_X7),
766 				  SMC_GET_GP(handle, CTX_GPREG_X8),
767 				  SMC_GET_GP(handle, CTX_GPREG_X9),
768 				  SMC_GET_GP(handle, CTX_GPREG_X10),
769 				  SMC_GET_GP(handle, CTX_GPREG_X11),
770 				  SMC_GET_GP(handle, CTX_GPREG_X12),
771 				  SMC_GET_GP(handle, CTX_GPREG_X13),
772 				  SMC_GET_GP(handle, CTX_GPREG_X14),
773 				  SMC_GET_GP(handle, CTX_GPREG_X15),
774 				  SMC_GET_GP(handle, CTX_GPREG_X16),
775 				  SMC_GET_GP(handle, CTX_GPREG_X17)
776 				);
777 		}
778 	} else {
779 		/* 32 bit call or dest has FFA version < 1.2 or unknown */
780 		SMC_RET8(ctx_out, smc_fid, x1, x2, x3, x4,
781 			 SMC_GET_GP(handle, CTX_GPREG_X5),
782 			 SMC_GET_GP(handle, CTX_GPREG_X6),
783 			 SMC_GET_GP(handle, CTX_GPREG_X7));
784 	}
785 }
786 
787 /*******************************************************************************
788  * Forward SMCs to the other security state.
789  ******************************************************************************/
spmd_smc_forward(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)790 static uint64_t spmd_smc_forward(uint32_t smc_fid,
791 				 bool secure_origin,
792 				 uint64_t x1,
793 				 uint64_t x2,
794 				 uint64_t x3,
795 				 uint64_t x4,
796 				 void *cookie,
797 				 void *handle,
798 				 uint64_t flags)
799 {
800 	if (is_spmc_at_el3() && !secure_origin) {
801 		return spmc_smc_handler(smc_fid, secure_origin, x1, x2, x3, x4,
802 					cookie, handle, flags);
803 	}
804 
805 	return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2, x3, x4,
806 				     handle, flags);
807 
808 }
809 
810 /*******************************************************************************
811  * Return FFA_ERROR with specified error code
812  ******************************************************************************/
spmd_ffa_error_return(void * handle,int error_code)813 uint64_t spmd_ffa_error_return(void *handle, int error_code)
814 {
815 	SMC_RET8(handle, (uint32_t) FFA_ERROR,
816 		 FFA_TARGET_INFO_MBZ, (uint32_t)error_code,
817 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
818 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
819 }
820 
821 /*******************************************************************************
822  * spmd_check_address_in_binary_image
823  ******************************************************************************/
spmd_check_address_in_binary_image(uint64_t address)824 bool spmd_check_address_in_binary_image(uint64_t address)
825 {
826 	assert(!check_uptr_overflow(spmc_attrs.load_address, spmc_attrs.binary_size));
827 
828 	return ((address >= spmc_attrs.load_address) &&
829 		(address < (spmc_attrs.load_address + spmc_attrs.binary_size)));
830 }
831 
832 /******************************************************************************
833  * spmd_is_spmc_message
834  *****************************************************************************/
spmd_is_spmc_message(unsigned int ep)835 static bool spmd_is_spmc_message(unsigned int ep)
836 {
837 	if (is_spmc_at_el3()) {
838 		return false;
839 	}
840 
841 	return ((ffa_endpoint_destination(ep) == SPMD_DIRECT_MSG_ENDPOINT_ID)
842 		&& (ffa_endpoint_source(ep) == spmc_attrs.spmc_id));
843 }
844 
845 /*******************************************************************************
846  * This function forwards FF-A SMCs to either the main SPMD handler or the
847  * SPMC at EL3, depending on the origin security state, if enabled.
848  ******************************************************************************/
spmd_ffa_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)849 uint64_t spmd_ffa_smc_handler(uint32_t smc_fid,
850 			      uint64_t x1,
851 			      uint64_t x2,
852 			      uint64_t x3,
853 			      uint64_t x4,
854 			      void *cookie,
855 			      void *handle,
856 			      uint64_t flags)
857 {
858 	if (is_spmc_at_el3()) {
859 		/*
860 		 * If we have an SPMC at EL3 allow handling of the SMC first.
861 		 * The SPMC will call back through to SPMD handler if required.
862 		 */
863 		if (is_caller_secure(flags)) {
864 			return spmc_smc_handler(smc_fid,
865 						is_caller_secure(flags),
866 						x1, x2, x3, x4, cookie,
867 						handle, flags);
868 		}
869 	}
870 	return spmd_smc_handler(smc_fid, x1, x2, x3, x4, cookie,
871 				handle, flags);
872 }
873 
get_common_ffa_version(void)874 static uint32_t get_common_ffa_version(void)
875 {
876 	uint32_t secure_ffa_version;
877 	if (is_spmc_at_el3()) {
878 		secure_ffa_version = spmc_attrs.sp_ffa_version;
879 	} else {
880 		secure_ffa_version = MAKE_FFA_VERSION(spmc_attrs.major_version,
881 						      spmc_attrs.minor_version);
882 	}
883 
884 	if (secure_ffa_version <= nonsecure_ffa_version) {
885 		return secure_ffa_version;
886 	} else {
887 		return nonsecure_ffa_version;
888 	}
889 }
890 
891 /*******************************************************************************
892  * This function handles all SMCs in the range reserved for FFA. Each call is
893  * either forwarded to the other security state or handled by the SPM dispatcher
894  ******************************************************************************/
spmd_smc_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)895 uint64_t spmd_smc_handler(uint32_t smc_fid,
896 			  uint64_t x1,
897 			  uint64_t x2,
898 			  uint64_t x3,
899 			  uint64_t x4,
900 			  void *cookie,
901 			  void *handle,
902 			  uint64_t flags)
903 {
904 	spmd_spm_core_context_t *ctx = spmd_get_context();
905 	bool secure_origin;
906 	int ret;
907 	uint32_t input_version;
908 
909 	/* Determine which security state this SMC originated from */
910 	secure_origin = is_caller_secure(flags);
911 
912 	VERBOSE("SPM(%u): 0x%x 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64
913 		" 0x%" PRIx64 " 0x%" PRIx64 " 0x%" PRIx64 "\n",
914 		    plat_my_core_pos(), smc_fid, x1, x2, x3, x4,
915 		    SMC_GET_GP(handle, CTX_GPREG_X5),
916 		    SMC_GET_GP(handle, CTX_GPREG_X6),
917 		    SMC_GET_GP(handle, CTX_GPREG_X7));
918 
919 	/*
920 	 * If there is an on-going info regs from EL3 SPMD LP, unconditionally
921 	 * return, we don't expect any other FF-A ABIs to be called between
922 	 * calls to FFA_PARTITION_INFO_GET_REGS.
923 	 */
924 	if (is_spmd_logical_sp_info_regs_req_in_progress(ctx)) {
925 		assert(secure_origin);
926 		spmd_spm_core_sync_exit(0ULL);
927 	}
928 
929 	switch (smc_fid) {
930 	case FFA_ERROR:
931 		/*
932 		 * Check if this is the first invocation of this interface on
933 		 * this CPU. If so, then indicate that the SPM Core initialised
934 		 * unsuccessfully.
935 		 */
936 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
937 			spmd_spm_core_sync_exit(x2);
938 		}
939 
940 		/*
941 		 * If there was an SPMD logical partition direct request on-going,
942 		 * return back to the SPMD logical partition so the error can be
943 		 * consumed.
944 		 */
945 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
946 			assert(secure_origin);
947 			spmd_spm_core_sync_exit(0ULL);
948 		}
949 
950 		return spmd_smc_forward(smc_fid, secure_origin,
951 					x1, x2, x3, x4, cookie,
952 					handle, flags);
953 		break; /* not reached */
954 
955 	case FFA_VERSION:
956 		input_version = (uint32_t)(0xFFFFFFFF & x1);
957 		/*
958 		 * If caller is secure and SPMC was initialized,
959 		 * return FFA_VERSION of SPMD.
960 		 * If caller is non secure and SPMC was initialized,
961 		 * forward to the EL3 SPMC if enabled, otherwise return
962 		 * the SPMC version if implemented at a lower EL.
963 		 * Sanity check to "input_version".
964 		 * If the EL3 SPMC is enabled, ignore the SPMC state as
965 		 * this is not used.
966 		 */
967 		if ((input_version & FFA_VERSION_BIT31_MASK) ||
968 		    (!is_spmc_at_el3() && (ctx->state == SPMC_STATE_RESET))) {
969 			ret = FFA_ERROR_NOT_SUPPORTED;
970 		} else if (!secure_origin) {
971 			nonsecure_ffa_version = input_version;
972 
973 			if (is_spmc_at_el3()) {
974 				/*
975 				 * Forward the call directly to the EL3 SPMC, if
976 				 * enabled, as we don't need to wrap the call in
977 				 * a direct request.
978 				 */
979 				return spmd_smc_forward(smc_fid, secure_origin,
980 							x1, x2, x3, x4, cookie,
981 							handle, flags);
982 			}
983 
984 			gp_regs_t *gpregs = get_gpregs_ctx(&ctx->cpu_ctx);
985 			uint64_t rc;
986 
987 			if (spmc_attrs.major_version == 1 &&
988 			    spmc_attrs.minor_version == 0) {
989 				ret = MAKE_FFA_VERSION(spmc_attrs.major_version,
990 						       spmc_attrs.minor_version);
991 				SMC_RET8(handle, (uint32_t)ret,
992 					 FFA_TARGET_INFO_MBZ,
993 					 FFA_TARGET_INFO_MBZ,
994 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
995 					 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
996 					 FFA_PARAM_MBZ);
997 				break;
998 			}
999 			/* Save non-secure system registers context */
1000 #if SPMD_SPM_AT_SEL2
1001 			cm_el2_sysregs_context_save(NON_SECURE);
1002 #else
1003 			cm_el1_sysregs_context_save(NON_SECURE);
1004 #endif
1005 
1006 			/*
1007 			 * The incoming request has FFA_VERSION as X0 smc_fid
1008 			 * and requested version in x1. Prepare a direct request
1009 			 * from SPMD to SPMC with FFA_VERSION framework function
1010 			 * identifier in X2 and requested version in X3.
1011 			 */
1012 			spmd_build_spmc_message(gpregs,
1013 						SPMD_FWK_MSG_FFA_VERSION_REQ,
1014 						input_version);
1015 
1016 			/*
1017 			 * Ensure x8-x17 NS GP register values are untouched when returning
1018 			 * from the SPMC.
1019 			 */
1020 			write_ctx_reg(gpregs, CTX_GPREG_X8, SMC_GET_GP(handle, CTX_GPREG_X8));
1021 			write_ctx_reg(gpregs, CTX_GPREG_X9, SMC_GET_GP(handle, CTX_GPREG_X9));
1022 			write_ctx_reg(gpregs, CTX_GPREG_X10, SMC_GET_GP(handle, CTX_GPREG_X10));
1023 			write_ctx_reg(gpregs, CTX_GPREG_X11, SMC_GET_GP(handle, CTX_GPREG_X11));
1024 			write_ctx_reg(gpregs, CTX_GPREG_X12, SMC_GET_GP(handle, CTX_GPREG_X12));
1025 			write_ctx_reg(gpregs, CTX_GPREG_X13, SMC_GET_GP(handle, CTX_GPREG_X13));
1026 			write_ctx_reg(gpregs, CTX_GPREG_X14, SMC_GET_GP(handle, CTX_GPREG_X14));
1027 			write_ctx_reg(gpregs, CTX_GPREG_X15, SMC_GET_GP(handle, CTX_GPREG_X15));
1028 			write_ctx_reg(gpregs, CTX_GPREG_X16, SMC_GET_GP(handle, CTX_GPREG_X16));
1029 			write_ctx_reg(gpregs, CTX_GPREG_X17, SMC_GET_GP(handle, CTX_GPREG_X17));
1030 
1031 			rc = spmd_spm_core_sync_entry(ctx);
1032 
1033 			if ((rc != 0ULL) ||
1034 			    (SMC_GET_GP(gpregs, CTX_GPREG_X0) !=
1035 				FFA_MSG_SEND_DIRECT_RESP_SMC32) ||
1036 			    (SMC_GET_GP(gpregs, CTX_GPREG_X2) !=
1037 				(FFA_FWK_MSG_BIT |
1038 				 SPMD_FWK_MSG_FFA_VERSION_RESP))) {
1039 				ERROR("Failed to forward FFA_VERSION\n");
1040 				ret = FFA_ERROR_NOT_SUPPORTED;
1041 			} else {
1042 				ret = SMC_GET_GP(gpregs, CTX_GPREG_X3);
1043 			}
1044 
1045 			/*
1046 			 * x0-x4 are updated by spmd_smc_forward below.
1047 			 * Zero out x5-x7 in the FFA_VERSION response.
1048 			 */
1049 			write_ctx_reg(gpregs, CTX_GPREG_X5, 0);
1050 			write_ctx_reg(gpregs, CTX_GPREG_X6, 0);
1051 			write_ctx_reg(gpregs, CTX_GPREG_X7, 0);
1052 
1053 			/*
1054 			 * Return here after SPMC has handled FFA_VERSION.
1055 			 * The returned SPMC version is held in X3.
1056 			 * Forward this version in X0 to the non-secure caller.
1057 			 */
1058 			return spmd_smc_forward(ret, true, FFA_PARAM_MBZ,
1059 						FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1060 						FFA_PARAM_MBZ, cookie, gpregs,
1061 						flags);
1062 		} else {
1063 			ret = MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
1064 					       FFA_VERSION_MINOR);
1065 		}
1066 
1067 		SMC_RET8(handle, (uint32_t)ret, FFA_TARGET_INFO_MBZ,
1068 			 FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1069 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ);
1070 		break; /* not reached */
1071 
1072 	case FFA_FEATURES:
1073 		/*
1074 		 * This is an optional interface. Do the minimal checks and
1075 		 * forward to SPM Core which will handle it if implemented.
1076 		 */
1077 
1078 		/* Forward SMC from Normal world to the SPM Core */
1079 		if (!secure_origin) {
1080 			return spmd_smc_forward(smc_fid, secure_origin,
1081 						x1, x2, x3, x4, cookie,
1082 						handle, flags);
1083 		}
1084 
1085 		/*
1086 		 * Return success if call was from secure world i.e. all
1087 		 * FFA functions are supported. This is essentially a
1088 		 * nop.
1089 		 */
1090 		SMC_RET8(handle, FFA_SUCCESS_SMC32, x1, x2, x3, x4,
1091 			 SMC_GET_GP(handle, CTX_GPREG_X5),
1092 			 SMC_GET_GP(handle, CTX_GPREG_X6),
1093 			 SMC_GET_GP(handle, CTX_GPREG_X7));
1094 
1095 		break; /* not reached */
1096 
1097 	case FFA_ID_GET:
1098 		/*
1099 		 * Returns the ID of the calling FFA component.
1100 		 */
1101 		if (!secure_origin) {
1102 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
1103 				 FFA_TARGET_INFO_MBZ, FFA_NS_ENDPOINT_ID,
1104 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1105 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1106 				 FFA_PARAM_MBZ);
1107 		}
1108 
1109 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
1110 			 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1111 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1112 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1113 			 FFA_PARAM_MBZ);
1114 
1115 		break; /* not reached */
1116 
1117 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1118 		if (secure_origin) {
1119 			ret = spmd_pm_secondary_ep_register(x1);
1120 
1121 			if (ret < 0) {
1122 				SMC_RET8(handle, FFA_ERROR_SMC64,
1123 					FFA_TARGET_INFO_MBZ, ret,
1124 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1125 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1126 					FFA_PARAM_MBZ);
1127 			} else {
1128 				SMC_RET8(handle, FFA_SUCCESS_SMC64,
1129 					FFA_TARGET_INFO_MBZ, FFA_PARAM_MBZ,
1130 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1131 					FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1132 					FFA_PARAM_MBZ);
1133 			}
1134 		}
1135 
1136 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1137 		break; /* Not reached */
1138 
1139 	case FFA_SPM_ID_GET:
1140 		if (MAKE_FFA_VERSION(1, 1) > FFA_VERSION_COMPILED) {
1141 			return spmd_ffa_error_return(handle,
1142 						     FFA_ERROR_NOT_SUPPORTED);
1143 		}
1144 		/*
1145 		 * Returns the ID of the SPMC or SPMD depending on the FF-A
1146 		 * instance where this function is invoked
1147 		 */
1148 		if (!secure_origin) {
1149 			SMC_RET8(handle, FFA_SUCCESS_SMC32,
1150 				 FFA_TARGET_INFO_MBZ, spmc_attrs.spmc_id,
1151 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1152 				 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1153 				 FFA_PARAM_MBZ);
1154 		}
1155 		SMC_RET8(handle, FFA_SUCCESS_SMC32,
1156 			 FFA_TARGET_INFO_MBZ, SPMD_DIRECT_MSG_ENDPOINT_ID,
1157 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1158 			 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1159 			 FFA_PARAM_MBZ);
1160 
1161 		break; /* not reached */
1162 
1163 	case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1164 		if (get_common_ffa_version() < MAKE_FFA_VERSION(U(1), U(2))) {
1165 			/* Call not supported at this version */
1166 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1167 		}
1168 		/* fallthrough */
1169 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1170 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1171 		/*
1172 		 * Regardless of secure_origin, SPMD logical partitions cannot
1173 		 * handle direct messages. They can only initiate direct
1174 		 * messages and consume direct responses or errors.
1175 		 */
1176 		if (is_spmd_lp_id(ffa_endpoint_source(x1)) ||
1177 				  is_spmd_lp_id(ffa_endpoint_destination(x1))) {
1178 			return spmd_ffa_error_return(handle,
1179 						     FFA_ERROR_INVALID_PARAMETER
1180 						     );
1181 		}
1182 
1183 		/*
1184 		 * When there is an ongoing SPMD logical partition direct
1185 		 * request, there cannot be another direct request. Return
1186 		 * error in this case. Panic'ing is an option but that does
1187 		 * not provide the opportunity for caller to abort based on
1188 		 * error codes.
1189 		 */
1190 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1191 			assert(secure_origin);
1192 			return spmd_ffa_error_return(handle,
1193 						     FFA_ERROR_DENIED);
1194 		}
1195 
1196 		if (!secure_origin) {
1197 			/* Validate source endpoint is non-secure for non-secure caller. */
1198 			if (ffa_is_secure_world_id(ffa_endpoint_source(x1))) {
1199 				return spmd_ffa_error_return(handle,
1200 						FFA_ERROR_INVALID_PARAMETER);
1201 			}
1202 		}
1203 		if (secure_origin && spmd_is_spmc_message(x1)) {
1204 				return spmd_ffa_error_return(handle,
1205 						FFA_ERROR_DENIED);
1206 		} else {
1207 			/* Forward direct message to the other world */
1208 			return spmd_smc_forward(smc_fid, secure_origin,
1209 						x1, x2, x3, x4, cookie,
1210 						handle, flags);
1211 		}
1212 		break; /* Not reached */
1213 
1214 	case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1215 		if (get_common_ffa_version() < MAKE_FFA_VERSION(U(1), U(2))) {
1216 			/* Call not supported at this version */
1217 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1218 		}
1219 		/* fallthrough */
1220 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1221 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1222 		if (secure_origin && (spmd_is_spmc_message(x1) ||
1223 		    is_spmd_logical_sp_dir_req_in_progress(ctx))) {
1224 			spmd_spm_core_sync_exit(0ULL);
1225 		} else {
1226 			/* Forward direct message to the other world */
1227 			return spmd_smc_forward(smc_fid, secure_origin,
1228 						x1, x2, x3, x4, cookie,
1229 						handle, flags);
1230 		}
1231 		break; /* Not reached */
1232 	case FFA_RX_RELEASE:
1233 	case FFA_RXTX_MAP_SMC32:
1234 	case FFA_RXTX_MAP_SMC64:
1235 	case FFA_RXTX_UNMAP:
1236 	case FFA_PARTITION_INFO_GET:
1237 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1238 	case FFA_NOTIFICATION_BITMAP_CREATE:
1239 	case FFA_NOTIFICATION_BITMAP_DESTROY:
1240 	case FFA_NOTIFICATION_BIND:
1241 	case FFA_NOTIFICATION_UNBIND:
1242 	case FFA_NOTIFICATION_SET:
1243 	case FFA_NOTIFICATION_GET:
1244 	case FFA_NOTIFICATION_INFO_GET:
1245 	case FFA_NOTIFICATION_INFO_GET_SMC64:
1246 	case FFA_MSG_SEND2:
1247 	case FFA_RX_ACQUIRE:
1248 #endif
1249 	case FFA_MSG_RUN:
1250 		/*
1251 		 * Above calls should be invoked only by the Normal world and
1252 		 * must not be forwarded from Secure world to Normal world.
1253 		 */
1254 		if (secure_origin) {
1255 			return spmd_ffa_error_return(handle,
1256 						     FFA_ERROR_NOT_SUPPORTED);
1257 		}
1258 
1259 		/* Forward the call to the other world */
1260 		/* fallthrough */
1261 	case FFA_MSG_SEND:
1262 	case FFA_MEM_DONATE_SMC32:
1263 	case FFA_MEM_DONATE_SMC64:
1264 	case FFA_MEM_LEND_SMC32:
1265 	case FFA_MEM_LEND_SMC64:
1266 	case FFA_MEM_SHARE_SMC32:
1267 	case FFA_MEM_SHARE_SMC64:
1268 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1269 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1270 	case FFA_MEM_RETRIEVE_RESP:
1271 	case FFA_MEM_RELINQUISH:
1272 	case FFA_MEM_RECLAIM:
1273 	case FFA_MEM_FRAG_TX:
1274 	case FFA_MEM_FRAG_RX:
1275 	case FFA_SUCCESS_SMC32:
1276 	case FFA_SUCCESS_SMC64:
1277 		/*
1278 		 * If there is an ongoing direct request from an SPMD logical
1279 		 * partition, return an error.
1280 		 */
1281 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1282 			assert(secure_origin);
1283 			return spmd_ffa_error_return(handle,
1284 					FFA_ERROR_DENIED);
1285 		}
1286 
1287 		return spmd_smc_forward(smc_fid, secure_origin,
1288 					x1, x2, x3, x4, cookie,
1289 					handle, flags);
1290 		break; /* not reached */
1291 
1292 	case FFA_MSG_WAIT:
1293 		/*
1294 		 * Check if this is the first invocation of this interface on
1295 		 * this CPU from the Secure world. If so, then indicate that the
1296 		 * SPM Core initialised successfully.
1297 		 */
1298 		if (secure_origin && (ctx->state == SPMC_STATE_ON_PENDING)) {
1299 			spmd_spm_core_sync_exit(0ULL);
1300 		}
1301 
1302 		/* Forward the call to the other world */
1303 		/* fallthrough */
1304 	case FFA_INTERRUPT:
1305 	case FFA_MSG_YIELD:
1306 		/* This interface must be invoked only by the Secure world */
1307 		if (!secure_origin) {
1308 			return spmd_ffa_error_return(handle,
1309 						      FFA_ERROR_NOT_SUPPORTED);
1310 		}
1311 
1312 		if (is_spmd_logical_sp_dir_req_in_progress(ctx)) {
1313 			assert(secure_origin);
1314 			return spmd_ffa_error_return(handle,
1315 					FFA_ERROR_DENIED);
1316 		}
1317 
1318 		return spmd_smc_forward(smc_fid, secure_origin,
1319 					x1, x2, x3, x4, cookie,
1320 					handle, flags);
1321 		break; /* not reached */
1322 
1323 	case FFA_NORMAL_WORLD_RESUME:
1324 		if (secure_origin && ctx->secure_interrupt_ongoing) {
1325 			spmd_spm_core_sync_exit(0ULL);
1326 		} else {
1327 			return spmd_ffa_error_return(handle, FFA_ERROR_DENIED);
1328 		}
1329 		break; /* Not reached */
1330 #if MAKE_FFA_VERSION(1, 1) <= FFA_VERSION_COMPILED
1331 	case FFA_PARTITION_INFO_GET_REGS_SMC64:
1332 		if (secure_origin) {
1333 			return spmd_el3_populate_logical_partition_info(handle, x1,
1334 								   x2, x3);
1335 		}
1336 
1337 		/* Call only supported with SMCCC 1.2+ */
1338 		if (MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION) < 0x10002) {
1339 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1340 		}
1341 
1342 		return spmd_smc_forward(smc_fid, secure_origin,
1343 					x1, x2, x3, x4, cookie,
1344 					handle, flags);
1345 		break; /* Not reached */
1346 #endif
1347 	case FFA_CONSOLE_LOG_SMC32:
1348 	case FFA_CONSOLE_LOG_SMC64:
1349 		/* This interface must not be forwarded to other worlds. */
1350 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1351 		break; /* not reached */
1352 
1353 	case FFA_EL3_INTR_HANDLE:
1354 		if (secure_origin) {
1355 			return spmd_handle_group0_intr_swd(handle);
1356 		} else {
1357 			return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1358 		}
1359 	default:
1360 		WARN("SPM: Unsupported call 0x%08x\n", smc_fid);
1361 		return spmd_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1362 	}
1363 }
1364