1 /*
2 * Copyright (c) 2022-2024, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9 #include <stdio.h>
10
11 #include <arch_helpers.h>
12 #include <bl31/bl31.h>
13 #include <bl31/ehf.h>
14 #include <bl31/interrupt_mgmt.h>
15 #include <common/debug.h>
16 #include <common/fdt_wrappers.h>
17 #include <common/runtime_svc.h>
18 #include <common/uuid.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/smccc.h>
21 #include <lib/utils.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <libfdt.h>
24 #include <plat/common/platform.h>
25 #include <services/el3_spmc_logical_sp.h>
26 #include <services/ffa_svc.h>
27 #include <services/spmc_svc.h>
28 #include <services/spmd_svc.h>
29 #include "spmc.h"
30 #include "spmc_shared_mem.h"
31
32 #include <platform_def.h>
33
34 /* FFA_MEM_PERM_* helpers */
35 #define FFA_MEM_PERM_MASK U(7)
36 #define FFA_MEM_PERM_DATA_MASK U(3)
37 #define FFA_MEM_PERM_DATA_SHIFT U(0)
38 #define FFA_MEM_PERM_DATA_NA U(0)
39 #define FFA_MEM_PERM_DATA_RW U(1)
40 #define FFA_MEM_PERM_DATA_RES U(2)
41 #define FFA_MEM_PERM_DATA_RO U(3)
42 #define FFA_MEM_PERM_INST_EXEC (U(0) << 2)
43 #define FFA_MEM_PERM_INST_NON_EXEC (U(1) << 2)
44
45 /* Declare the maximum number of SPs and El3 LPs. */
46 #define MAX_SP_LP_PARTITIONS (SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT)
47
48 /*
49 * Allocate a secure partition descriptor to describe each SP in the system that
50 * does not reside at EL3.
51 */
52 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
53
54 /*
55 * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
56 * the system that interacts with a SP. It is used to track the Hypervisor
57 * buffer pair, version and ID for now. It could be extended to track VM
58 * properties when the SPMC supports indirect messaging.
59 */
60 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
61
62 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
63 uint32_t flags,
64 void *handle,
65 void *cookie);
66
67 /*
68 * Helper function to obtain the array storing the EL3
69 * Logical Partition descriptors.
70 */
get_el3_lp_array(void)71 struct el3_lp_desc *get_el3_lp_array(void)
72 {
73 return (struct el3_lp_desc *) EL3_LP_DESCS_START;
74 }
75
76 /*
77 * Helper function to obtain the descriptor of the last SP to whom control was
78 * handed to on this physical cpu. Currently, we assume there is only one SP.
79 * TODO: Expand to track multiple partitions when required.
80 */
spmc_get_current_sp_ctx(void)81 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
82 {
83 return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
84 }
85
86 /*
87 * Helper function to obtain the execution context of an SP on the
88 * current physical cpu.
89 */
spmc_get_sp_ec(struct secure_partition_desc * sp)90 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
91 {
92 return &(sp->ec[get_ec_index(sp)]);
93 }
94
95 /* Helper function to get pointer to SP context from its ID. */
spmc_get_sp_ctx(uint16_t id)96 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
97 {
98 /* Check for Secure World Partitions. */
99 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
100 if (sp_desc[i].sp_id == id) {
101 return &(sp_desc[i]);
102 }
103 }
104 return NULL;
105 }
106
107 /*
108 * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
109 * We assume that the first descriptor is reserved for this entity.
110 */
spmc_get_hyp_ctx(void)111 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
112 {
113 return &(ns_ep_desc[0]);
114 }
115
116 /*
117 * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
118 * or OS kernel in the normal world or the last SP that was run.
119 */
spmc_get_mbox_desc(bool secure_origin)120 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
121 {
122 /* Obtain the RX/TX buffer pair descriptor. */
123 if (secure_origin) {
124 return &(spmc_get_current_sp_ctx()->mailbox);
125 } else {
126 return &(spmc_get_hyp_ctx()->mailbox);
127 }
128 }
129
130 /******************************************************************************
131 * This function returns to the place where spmc_sp_synchronous_entry() was
132 * called originally.
133 ******************************************************************************/
spmc_sp_synchronous_exit(struct sp_exec_ctx * ec,uint64_t rc)134 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
135 {
136 /*
137 * The SPM must have initiated the original request through a
138 * synchronous entry into the secure partition. Jump back to the
139 * original C runtime context with the value of rc in x0;
140 */
141 spm_secure_partition_exit(ec->c_rt_ctx, rc);
142
143 panic();
144 }
145
146 /*******************************************************************************
147 * Return FFA_ERROR with specified error code.
148 ******************************************************************************/
spmc_ffa_error_return(void * handle,int error_code)149 uint64_t spmc_ffa_error_return(void *handle, int error_code)
150 {
151 SMC_RET8(handle, FFA_ERROR,
152 FFA_TARGET_INFO_MBZ, error_code,
153 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
154 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
155 }
156
157 /******************************************************************************
158 * Helper function to validate a secure partition ID to ensure it does not
159 * conflict with any other FF-A component and follows the convention to
160 * indicate it resides within the secure world.
161 ******************************************************************************/
is_ffa_secure_id_valid(uint16_t partition_id)162 bool is_ffa_secure_id_valid(uint16_t partition_id)
163 {
164 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
165
166 /* Ensure the ID is not the invalid partition ID. */
167 if (partition_id == INV_SP_ID) {
168 return false;
169 }
170
171 /* Ensure the ID is not the SPMD ID. */
172 if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
173 return false;
174 }
175
176 /*
177 * Ensure the ID follows the convention to indicate it resides
178 * in the secure world.
179 */
180 if (!ffa_is_secure_world_id(partition_id)) {
181 return false;
182 }
183
184 /* Ensure we don't conflict with the SPMC partition ID. */
185 if (partition_id == FFA_SPMC_ID) {
186 return false;
187 }
188
189 /* Ensure we do not already have an SP context with this ID. */
190 if (spmc_get_sp_ctx(partition_id)) {
191 return false;
192 }
193
194 /* Ensure we don't clash with any Logical SP's. */
195 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
196 if (el3_lp_descs[i].sp_id == partition_id) {
197 return false;
198 }
199 }
200
201 return true;
202 }
203
204 /*******************************************************************************
205 * This function either forwards the request to the other world or returns
206 * with an ERET depending on the source of the call.
207 * We can assume that the destination is for an entity at a lower exception
208 * level as any messages destined for a logical SP resident in EL3 will have
209 * already been taken care of by the SPMC before entering this function.
210 ******************************************************************************/
spmc_smc_return(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,void * cookie,uint64_t flags,uint16_t dst_id)211 static uint64_t spmc_smc_return(uint32_t smc_fid,
212 bool secure_origin,
213 uint64_t x1,
214 uint64_t x2,
215 uint64_t x3,
216 uint64_t x4,
217 void *handle,
218 void *cookie,
219 uint64_t flags,
220 uint16_t dst_id)
221 {
222 /* If the destination is in the normal world always go via the SPMD. */
223 if (ffa_is_normal_world_id(dst_id)) {
224 return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
225 cookie, handle, flags);
226 }
227 /*
228 * If the caller is secure and we want to return to the secure world,
229 * ERET directly.
230 */
231 else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
232 SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
233 }
234 /* If we originated in the normal world then switch contexts. */
235 else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
236 return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
237 x3, x4, handle, flags);
238 } else {
239 /* Unknown State. */
240 panic();
241 }
242
243 /* Shouldn't be Reached. */
244 return 0;
245 }
246
247 /*******************************************************************************
248 * FF-A ABI Handlers.
249 ******************************************************************************/
250
251 /*******************************************************************************
252 * Helper function to validate arg2 as part of a direct message.
253 ******************************************************************************/
direct_msg_validate_arg2(uint64_t x2)254 static inline bool direct_msg_validate_arg2(uint64_t x2)
255 {
256 /* Check message type. */
257 if (x2 & FFA_FWK_MSG_BIT) {
258 /* We have a framework message, ensure it is a known message. */
259 if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
260 VERBOSE("Invalid message format 0x%lx.\n", x2);
261 return false;
262 }
263 } else {
264 /* We have a partition messages, ensure x2 is not set. */
265 if (x2 != (uint64_t) 0) {
266 VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
267 x2);
268 return false;
269 }
270 }
271 return true;
272 }
273
274 /*******************************************************************************
275 * Helper function to validate the destination ID of a direct response.
276 ******************************************************************************/
direct_msg_validate_dst_id(uint16_t dst_id)277 static bool direct_msg_validate_dst_id(uint16_t dst_id)
278 {
279 struct secure_partition_desc *sp;
280
281 /* Check if we're targeting a normal world partition. */
282 if (ffa_is_normal_world_id(dst_id)) {
283 return true;
284 }
285
286 /* Or directed to the SPMC itself.*/
287 if (dst_id == FFA_SPMC_ID) {
288 return true;
289 }
290
291 /* Otherwise ensure the SP exists. */
292 sp = spmc_get_sp_ctx(dst_id);
293 if (sp != NULL) {
294 return true;
295 }
296
297 return false;
298 }
299
300 /*******************************************************************************
301 * Helper function to validate the response from a Logical Partition.
302 ******************************************************************************/
direct_msg_validate_lp_resp(uint16_t origin_id,uint16_t lp_id,void * handle)303 static bool direct_msg_validate_lp_resp(uint16_t origin_id, uint16_t lp_id,
304 void *handle)
305 {
306 /* Retrieve populated Direct Response Arguments. */
307 uint64_t smc_fid = SMC_GET_GP(handle, CTX_GPREG_X0);
308 uint64_t x1 = SMC_GET_GP(handle, CTX_GPREG_X1);
309 uint64_t x2 = SMC_GET_GP(handle, CTX_GPREG_X2);
310 uint16_t src_id = ffa_endpoint_source(x1);
311 uint16_t dst_id = ffa_endpoint_destination(x1);
312
313 if (src_id != lp_id) {
314 ERROR("Invalid EL3 LP source ID (0x%x).\n", src_id);
315 return false;
316 }
317
318 /*
319 * Check the destination ID is valid and ensure the LP is responding to
320 * the original request.
321 */
322 if ((!direct_msg_validate_dst_id(dst_id)) || (dst_id != origin_id)) {
323 ERROR("Invalid EL3 LP destination ID (0x%x).\n", dst_id);
324 return false;
325 }
326
327 if ((smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) &&
328 !direct_msg_validate_arg2(x2)) {
329 ERROR("Invalid EL3 LP message encoding.\n");
330 return false;
331 }
332 return true;
333 }
334
335 /*******************************************************************************
336 * Helper function to check that partition can receive direct msg or not.
337 ******************************************************************************/
direct_msg_receivable(uint32_t properties,uint16_t dir_req_fnum)338 static bool direct_msg_receivable(uint32_t properties, uint16_t dir_req_fnum)
339 {
340 if ((dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ &&
341 ((properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0U)) ||
342 (dir_req_fnum == FFA_FNUM_MSG_SEND_DIRECT_REQ2 &&
343 ((properties & FFA_PARTITION_DIRECT_REQ2_RECV) == 0U))) {
344 return false;
345 }
346
347 return true;
348 }
349
350 /*******************************************************************************
351 * Handle direct request messages and route to the appropriate destination.
352 ******************************************************************************/
direct_req_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)353 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
354 bool secure_origin,
355 uint64_t x1,
356 uint64_t x2,
357 uint64_t x3,
358 uint64_t x4,
359 void *cookie,
360 void *handle,
361 uint64_t flags)
362 {
363 uint16_t src_id = ffa_endpoint_source(x1);
364 uint16_t dst_id = ffa_endpoint_destination(x1);
365 uint16_t dir_req_funcid;
366 struct el3_lp_desc *el3_lp_descs;
367 struct secure_partition_desc *sp;
368 unsigned int idx;
369
370 dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_REQ2_SMC64) ?
371 FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
372
373 /*
374 * Sanity check for DIRECT_REQ:
375 * Check if arg2 has been populated correctly based on message type
376 */
377 if ((dir_req_funcid == FFA_FNUM_MSG_SEND_DIRECT_REQ) &&
378 !direct_msg_validate_arg2(x2)) {
379 return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
380 }
381
382 /* Validate Sender is either the current SP or from the normal world. */
383 if ((secure_origin && src_id != spmc_get_current_sp_ctx()->sp_id) ||
384 (!secure_origin && !ffa_is_normal_world_id(src_id))) {
385 ERROR("Invalid direct request source ID (0x%x).\n", src_id);
386 return spmc_ffa_error_return(handle,
387 FFA_ERROR_INVALID_PARAMETER);
388 }
389
390 el3_lp_descs = get_el3_lp_array();
391
392 /* Check if the request is destined for a Logical Partition. */
393 for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
394 if (el3_lp_descs[i].sp_id == dst_id) {
395 if (!direct_msg_receivable(el3_lp_descs[i].properties, dir_req_funcid)) {
396 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
397 }
398
399 uint64_t ret = el3_lp_descs[i].direct_req(
400 smc_fid, secure_origin, x1, x2,
401 x3, x4, cookie, handle, flags);
402 if (!direct_msg_validate_lp_resp(src_id, dst_id,
403 handle)) {
404 panic();
405 }
406
407 /* Message checks out. */
408 return ret;
409 }
410 }
411
412 /*
413 * If the request was not targeted to a LSP and from the secure world
414 * then it is invalid since a SP cannot call into the Normal world and
415 * there is no other SP to call into. If there are other SPs in future
416 * then the partition runtime model would need to be validated as well.
417 */
418 if (secure_origin) {
419 VERBOSE("Direct request not supported to the Normal World.\n");
420 return spmc_ffa_error_return(handle,
421 FFA_ERROR_INVALID_PARAMETER);
422 }
423
424 /* Check if the SP ID is valid. */
425 sp = spmc_get_sp_ctx(dst_id);
426 if (sp == NULL) {
427 VERBOSE("Direct request to unknown partition ID (0x%x).\n",
428 dst_id);
429 return spmc_ffa_error_return(handle,
430 FFA_ERROR_INVALID_PARAMETER);
431 }
432
433 if (!direct_msg_receivable(sp->properties, dir_req_funcid)) {
434 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
435 }
436
437 /* Protect the runtime state of a UP S-EL0 SP with a lock. */
438 if (sp->runtime_el == S_EL0) {
439 spin_lock(&sp->rt_state_lock);
440 }
441
442 /*
443 * Check that the target execution context is in a waiting state before
444 * forwarding the direct request to it.
445 */
446 idx = get_ec_index(sp);
447 if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
448 VERBOSE("SP context on core%u is not waiting (%u).\n",
449 idx, sp->ec[idx].rt_model);
450
451 if (sp->runtime_el == S_EL0) {
452 spin_unlock(&sp->rt_state_lock);
453 }
454
455 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
456 }
457
458 /*
459 * Everything checks out so forward the request to the SP after updating
460 * its state and runtime model.
461 */
462 sp->ec[idx].rt_state = RT_STATE_RUNNING;
463 sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
464 sp->ec[idx].dir_req_origin_id = src_id;
465 sp->ec[idx].dir_req_funcid = dir_req_funcid;
466
467 if (sp->runtime_el == S_EL0) {
468 spin_unlock(&sp->rt_state_lock);
469 }
470
471 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
472 handle, cookie, flags, dst_id);
473 }
474
475 /*******************************************************************************
476 * Handle direct response messages and route to the appropriate destination.
477 ******************************************************************************/
direct_resp_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)478 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
479 bool secure_origin,
480 uint64_t x1,
481 uint64_t x2,
482 uint64_t x3,
483 uint64_t x4,
484 void *cookie,
485 void *handle,
486 uint64_t flags)
487 {
488 uint16_t dst_id = ffa_endpoint_destination(x1);
489 uint16_t dir_req_funcid;
490 struct secure_partition_desc *sp;
491 unsigned int idx;
492
493 dir_req_funcid = (smc_fid != FFA_MSG_SEND_DIRECT_RESP2_SMC64) ?
494 FFA_FNUM_MSG_SEND_DIRECT_REQ : FFA_FNUM_MSG_SEND_DIRECT_REQ2;
495
496 /* Check if arg2 has been populated correctly based on message type. */
497 if (!direct_msg_validate_arg2(x2)) {
498 return spmc_ffa_error_return(handle,
499 FFA_ERROR_INVALID_PARAMETER);
500 }
501
502 /* Check that the response did not originate from the Normal world. */
503 if (!secure_origin) {
504 VERBOSE("Direct Response not supported from Normal World.\n");
505 return spmc_ffa_error_return(handle,
506 FFA_ERROR_INVALID_PARAMETER);
507 }
508
509 /*
510 * Check that the response is either targeted to the Normal world or the
511 * SPMC e.g. a PM response.
512 */
513 if (!direct_msg_validate_dst_id(dst_id)) {
514 VERBOSE("Direct response to invalid partition ID (0x%x).\n",
515 dst_id);
516 return spmc_ffa_error_return(handle,
517 FFA_ERROR_INVALID_PARAMETER);
518 }
519
520 /* Obtain the SP descriptor and update its runtime state. */
521 sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
522 if (sp == NULL) {
523 VERBOSE("Direct response to unknown partition ID (0x%x).\n",
524 dst_id);
525 return spmc_ffa_error_return(handle,
526 FFA_ERROR_INVALID_PARAMETER);
527 }
528
529 if (sp->runtime_el == S_EL0) {
530 spin_lock(&sp->rt_state_lock);
531 }
532
533 /* Sanity check state is being tracked correctly in the SPMC. */
534 idx = get_ec_index(sp);
535 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
536
537 /* Ensure SP execution context was in the right runtime model. */
538 if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
539 VERBOSE("SP context on core%u not handling direct req (%u).\n",
540 idx, sp->ec[idx].rt_model);
541 if (sp->runtime_el == S_EL0) {
542 spin_unlock(&sp->rt_state_lock);
543 }
544 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
545 }
546
547 if (dir_req_funcid != sp->ec[idx].dir_req_funcid) {
548 WARN("Unmatched direct req/resp func id. req:%x, resp:%x on core%u.\n",
549 sp->ec[idx].dir_req_funcid, (smc_fid & FUNCID_NUM_MASK), idx);
550 if (sp->runtime_el == S_EL0) {
551 spin_unlock(&sp->rt_state_lock);
552 }
553 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
554 }
555
556 if (sp->ec[idx].dir_req_origin_id != dst_id) {
557 WARN("Invalid direct resp partition ID 0x%x != 0x%x on core%u.\n",
558 dst_id, sp->ec[idx].dir_req_origin_id, idx);
559 if (sp->runtime_el == S_EL0) {
560 spin_unlock(&sp->rt_state_lock);
561 }
562 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
563 }
564
565 /* Update the state of the SP execution context. */
566 sp->ec[idx].rt_state = RT_STATE_WAITING;
567
568 /* Clear the ongoing direct request ID. */
569 sp->ec[idx].dir_req_origin_id = INV_SP_ID;
570
571 /* Clear the ongoing direct request message version. */
572 sp->ec[idx].dir_req_funcid = 0U;
573
574 if (sp->runtime_el == S_EL0) {
575 spin_unlock(&sp->rt_state_lock);
576 }
577
578 /*
579 * If the receiver is not the SPMC then forward the response to the
580 * Normal world.
581 */
582 if (dst_id == FFA_SPMC_ID) {
583 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
584 /* Should not get here. */
585 panic();
586 }
587
588 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
589 handle, cookie, flags, dst_id);
590 }
591
592 /*******************************************************************************
593 * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
594 * cycles.
595 ******************************************************************************/
msg_wait_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)596 static uint64_t msg_wait_handler(uint32_t smc_fid,
597 bool secure_origin,
598 uint64_t x1,
599 uint64_t x2,
600 uint64_t x3,
601 uint64_t x4,
602 void *cookie,
603 void *handle,
604 uint64_t flags)
605 {
606 struct secure_partition_desc *sp;
607 unsigned int idx;
608
609 /*
610 * Check that the response did not originate from the Normal world as
611 * only the secure world can call this ABI.
612 */
613 if (!secure_origin) {
614 VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
615 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
616 }
617
618 /* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
619 sp = spmc_get_current_sp_ctx();
620 if (sp == NULL) {
621 return spmc_ffa_error_return(handle,
622 FFA_ERROR_INVALID_PARAMETER);
623 }
624
625 /*
626 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
627 */
628 idx = get_ec_index(sp);
629 if (sp->runtime_el == S_EL0) {
630 spin_lock(&sp->rt_state_lock);
631 }
632
633 /* Ensure SP execution context was in the right runtime model. */
634 if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
635 if (sp->runtime_el == S_EL0) {
636 spin_unlock(&sp->rt_state_lock);
637 }
638 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
639 }
640
641 /* Sanity check the state is being tracked correctly in the SPMC. */
642 assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
643
644 /*
645 * Perform a synchronous exit if the partition was initialising. The
646 * state is updated after the exit.
647 */
648 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
649 if (sp->runtime_el == S_EL0) {
650 spin_unlock(&sp->rt_state_lock);
651 }
652 spmc_sp_synchronous_exit(&sp->ec[idx], x4);
653 /* Should not get here */
654 panic();
655 }
656
657 /* Update the state of the SP execution context. */
658 sp->ec[idx].rt_state = RT_STATE_WAITING;
659
660 /* Resume normal world if a secure interrupt was handled. */
661 if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
662 if (sp->runtime_el == S_EL0) {
663 spin_unlock(&sp->rt_state_lock);
664 }
665
666 return spmd_smc_switch_state(FFA_NORMAL_WORLD_RESUME, secure_origin,
667 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
668 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
669 handle, flags);
670 }
671
672 /* Protect the runtime state of a S-EL0 SP with a lock. */
673 if (sp->runtime_el == S_EL0) {
674 spin_unlock(&sp->rt_state_lock);
675 }
676
677 /* Forward the response to the Normal world. */
678 return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
679 handle, cookie, flags, FFA_NWD_ID);
680 }
681
ffa_error_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)682 static uint64_t ffa_error_handler(uint32_t smc_fid,
683 bool secure_origin,
684 uint64_t x1,
685 uint64_t x2,
686 uint64_t x3,
687 uint64_t x4,
688 void *cookie,
689 void *handle,
690 uint64_t flags)
691 {
692 struct secure_partition_desc *sp;
693 unsigned int idx;
694
695 /* Check that the response did not originate from the Normal world. */
696 if (!secure_origin) {
697 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
698 }
699
700 /* Get the descriptor of the SP that invoked FFA_ERROR. */
701 sp = spmc_get_current_sp_ctx();
702 if (sp == NULL) {
703 return spmc_ffa_error_return(handle,
704 FFA_ERROR_INVALID_PARAMETER);
705 }
706
707 /* Get the execution context of the SP that invoked FFA_ERROR. */
708 idx = get_ec_index(sp);
709
710 /*
711 * We only expect FFA_ERROR to be received during SP initialisation
712 * otherwise this is an invalid call.
713 */
714 if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
715 ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
716 spmc_sp_synchronous_exit(&sp->ec[idx], x2);
717 /* Should not get here. */
718 panic();
719 }
720
721 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
722 }
723
ffa_version_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)724 static uint64_t ffa_version_handler(uint32_t smc_fid,
725 bool secure_origin,
726 uint64_t x1,
727 uint64_t x2,
728 uint64_t x3,
729 uint64_t x4,
730 void *cookie,
731 void *handle,
732 uint64_t flags)
733 {
734 uint32_t requested_version = x1 & FFA_VERSION_MASK;
735
736 if (requested_version & FFA_VERSION_BIT31_MASK) {
737 /* Invalid encoding, return an error. */
738 SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
739 /* Execution stops here. */
740 }
741
742 /* Determine the caller to store the requested version. */
743 if (secure_origin) {
744 /*
745 * Ensure that the SP is reporting the same version as
746 * specified in its manifest. If these do not match there is
747 * something wrong with the SP.
748 * TODO: Should we abort the SP? For now assert this is not
749 * case.
750 */
751 assert(requested_version ==
752 spmc_get_current_sp_ctx()->ffa_version);
753 } else {
754 /*
755 * If this is called by the normal world, record this
756 * information in its descriptor.
757 */
758 spmc_get_hyp_ctx()->ffa_version = requested_version;
759 }
760
761 SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
762 FFA_VERSION_MINOR));
763 }
764
765 /*******************************************************************************
766 * Helper function to obtain the FF-A version of the calling partition.
767 ******************************************************************************/
get_partition_ffa_version(bool secure_origin)768 uint32_t get_partition_ffa_version(bool secure_origin)
769 {
770 if (secure_origin) {
771 return spmc_get_current_sp_ctx()->ffa_version;
772 } else {
773 return spmc_get_hyp_ctx()->ffa_version;
774 }
775 }
776
rxtx_map_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)777 static uint64_t rxtx_map_handler(uint32_t smc_fid,
778 bool secure_origin,
779 uint64_t x1,
780 uint64_t x2,
781 uint64_t x3,
782 uint64_t x4,
783 void *cookie,
784 void *handle,
785 uint64_t flags)
786 {
787 int ret;
788 uint32_t error_code;
789 uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
790 struct mailbox *mbox;
791 uintptr_t tx_address = x1;
792 uintptr_t rx_address = x2;
793 uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
794 uint32_t buf_size = page_count * FFA_PAGE_SIZE;
795
796 /*
797 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
798 * indirect messaging with SPs. Check if the Hypervisor has invoked this
799 * ABI on behalf of a VM and reject it if this is the case.
800 */
801 if (tx_address == 0 || rx_address == 0) {
802 WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
803 return spmc_ffa_error_return(handle,
804 FFA_ERROR_INVALID_PARAMETER);
805 }
806
807 /* Ensure the specified buffers are not the same. */
808 if (tx_address == rx_address) {
809 WARN("TX Buffer must not be the same as RX Buffer.\n");
810 return spmc_ffa_error_return(handle,
811 FFA_ERROR_INVALID_PARAMETER);
812 }
813
814 /* Ensure the buffer size is not 0. */
815 if (buf_size == 0U) {
816 WARN("Buffer size must not be 0\n");
817 return spmc_ffa_error_return(handle,
818 FFA_ERROR_INVALID_PARAMETER);
819 }
820
821 /*
822 * Ensure the buffer size is a multiple of the translation granule size
823 * in TF-A.
824 */
825 if (buf_size % PAGE_SIZE != 0U) {
826 WARN("Buffer size must be aligned to translation granule.\n");
827 return spmc_ffa_error_return(handle,
828 FFA_ERROR_INVALID_PARAMETER);
829 }
830
831 /* Obtain the RX/TX buffer pair descriptor. */
832 mbox = spmc_get_mbox_desc(secure_origin);
833
834 spin_lock(&mbox->lock);
835
836 /* Check if buffers have already been mapped. */
837 if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
838 WARN("RX/TX Buffers already mapped (%p/%p)\n",
839 (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
840 error_code = FFA_ERROR_DENIED;
841 goto err;
842 }
843
844 /* memmap the TX buffer as read only. */
845 ret = mmap_add_dynamic_region(tx_address, /* PA */
846 tx_address, /* VA */
847 buf_size, /* size */
848 mem_atts | MT_RO_DATA); /* attrs */
849 if (ret != 0) {
850 /* Return the correct error code. */
851 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
852 FFA_ERROR_INVALID_PARAMETER;
853 WARN("Unable to map TX buffer: %d\n", error_code);
854 goto err;
855 }
856
857 /* memmap the RX buffer as read write. */
858 ret = mmap_add_dynamic_region(rx_address, /* PA */
859 rx_address, /* VA */
860 buf_size, /* size */
861 mem_atts | MT_RW_DATA); /* attrs */
862
863 if (ret != 0) {
864 error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
865 FFA_ERROR_INVALID_PARAMETER;
866 WARN("Unable to map RX buffer: %d\n", error_code);
867 /* Unmap the TX buffer again. */
868 mmap_remove_dynamic_region(tx_address, buf_size);
869 goto err;
870 }
871
872 mbox->tx_buffer = (void *) tx_address;
873 mbox->rx_buffer = (void *) rx_address;
874 mbox->rxtx_page_count = page_count;
875 spin_unlock(&mbox->lock);
876
877 SMC_RET1(handle, FFA_SUCCESS_SMC32);
878 /* Execution stops here. */
879 err:
880 spin_unlock(&mbox->lock);
881 return spmc_ffa_error_return(handle, error_code);
882 }
883
rxtx_unmap_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)884 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
885 bool secure_origin,
886 uint64_t x1,
887 uint64_t x2,
888 uint64_t x3,
889 uint64_t x4,
890 void *cookie,
891 void *handle,
892 uint64_t flags)
893 {
894 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
895 uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
896
897 /*
898 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
899 * indirect messaging with SPs. Check if the Hypervisor has invoked this
900 * ABI on behalf of a VM and reject it if this is the case.
901 */
902 if (x1 != 0UL) {
903 return spmc_ffa_error_return(handle,
904 FFA_ERROR_INVALID_PARAMETER);
905 }
906
907 spin_lock(&mbox->lock);
908
909 /* Check if buffers are currently mapped. */
910 if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
911 spin_unlock(&mbox->lock);
912 return spmc_ffa_error_return(handle,
913 FFA_ERROR_INVALID_PARAMETER);
914 }
915
916 /* Unmap RX Buffer */
917 if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
918 buf_size) != 0) {
919 WARN("Unable to unmap RX buffer!\n");
920 }
921
922 mbox->rx_buffer = 0;
923
924 /* Unmap TX Buffer */
925 if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
926 buf_size) != 0) {
927 WARN("Unable to unmap TX buffer!\n");
928 }
929
930 mbox->tx_buffer = 0;
931 mbox->rxtx_page_count = 0;
932
933 spin_unlock(&mbox->lock);
934 SMC_RET1(handle, FFA_SUCCESS_SMC32);
935 }
936
937 /*
938 * Helper function to populate the properties field of a Partition Info Get
939 * descriptor.
940 */
941 static uint32_t
partition_info_get_populate_properties(uint32_t sp_properties,enum sp_execution_state sp_ec_state)942 partition_info_get_populate_properties(uint32_t sp_properties,
943 enum sp_execution_state sp_ec_state)
944 {
945 uint32_t properties = sp_properties;
946 uint32_t ec_state;
947
948 /* Determine the execution state of the SP. */
949 ec_state = sp_ec_state == SP_STATE_AARCH64 ?
950 FFA_PARTITION_INFO_GET_AARCH64_STATE :
951 FFA_PARTITION_INFO_GET_AARCH32_STATE;
952
953 properties |= ec_state << FFA_PARTITION_INFO_GET_EXEC_STATE_SHIFT;
954
955 return properties;
956 }
957
958 /*
959 * Collate the partition information in a v1.1 partition information
960 * descriptor format, this will be converter later if required.
961 */
partition_info_get_handler_v1_1(uint32_t * uuid,struct ffa_partition_info_v1_1 * partitions,uint32_t max_partitions,uint32_t * partition_count)962 static int partition_info_get_handler_v1_1(uint32_t *uuid,
963 struct ffa_partition_info_v1_1
964 *partitions,
965 uint32_t max_partitions,
966 uint32_t *partition_count)
967 {
968 uint32_t index;
969 struct ffa_partition_info_v1_1 *desc;
970 bool null_uuid = is_null_uuid(uuid);
971 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
972
973 /* Deal with Logical Partitions. */
974 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
975 if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
976 /* Found a matching UUID, populate appropriately. */
977 if (*partition_count >= max_partitions) {
978 return FFA_ERROR_NO_MEMORY;
979 }
980
981 desc = &partitions[*partition_count];
982 desc->ep_id = el3_lp_descs[index].sp_id;
983 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
984 /* LSPs must be AArch64. */
985 desc->properties =
986 partition_info_get_populate_properties(
987 el3_lp_descs[index].properties,
988 SP_STATE_AARCH64);
989
990 if (null_uuid) {
991 copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
992 }
993 (*partition_count)++;
994 }
995 }
996
997 /* Deal with physical SP's. */
998 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
999 int uuid_index;
1000 uint32_t *sp_uuid;
1001
1002 for (uuid_index = 0;
1003 uuid_index < sp_desc[index].num_uuids;
1004 uuid_index++) {
1005 sp_uuid = sp_desc[index].uuid_array[uuid_index].uuid;
1006
1007 if (null_uuid || uuid_match(uuid, sp_uuid)) {
1008 /* Found a matching UUID, populate appropriately. */
1009
1010 if (*partition_count >= max_partitions) {
1011 return FFA_ERROR_NO_MEMORY;
1012 }
1013
1014 desc = &partitions[*partition_count];
1015 desc->ep_id = sp_desc[index].sp_id;
1016 /*
1017 * Execution context count must match No. cores for
1018 * S-EL1 SPs.
1019 */
1020 desc->execution_ctx_count = PLATFORM_CORE_COUNT;
1021 desc->properties =
1022 partition_info_get_populate_properties(
1023 sp_desc[index].properties,
1024 sp_desc[index].execution_state);
1025
1026 (*partition_count)++;
1027 if (null_uuid) {
1028 copy_uuid(desc->uuid, sp_uuid);
1029 } else {
1030 /* Found UUID in this SP, go to next SP */
1031 break;
1032 }
1033 }
1034 }
1035 }
1036 return 0;
1037 }
1038
1039 /*
1040 * Handle the case where that caller only wants the count of partitions
1041 * matching a given UUID and does not want the corresponding descriptors
1042 * populated.
1043 */
partition_info_get_handler_count_only(uint32_t * uuid)1044 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
1045 {
1046 uint32_t index = 0;
1047 uint32_t partition_count = 0;
1048 bool null_uuid = is_null_uuid(uuid);
1049 struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
1050
1051 /* Deal with Logical Partitions. */
1052 for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
1053 if (null_uuid ||
1054 uuid_match(uuid, el3_lp_descs[index].uuid)) {
1055 (partition_count)++;
1056 }
1057 }
1058
1059 /* Deal with physical SP's. */
1060 for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
1061 int uuid_index;
1062
1063 for (uuid_index = 0; uuid_index < sp_desc[index].num_uuids; uuid_index++) {
1064 uint32_t *sp_uuid = sp_desc[index].uuid_array[uuid_index].uuid;
1065
1066 if (null_uuid) {
1067 (partition_count)++;
1068 } else if (uuid_match(uuid, sp_uuid)) {
1069 (partition_count)++;
1070 /* Found a match, go to next SP */
1071 break;
1072 }
1073 }
1074 }
1075 return partition_count;
1076 }
1077
1078 /*
1079 * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
1080 * the corresponding descriptor format from the v1.1 descriptor array.
1081 */
partition_info_populate_v1_0(struct ffa_partition_info_v1_1 * partitions,struct mailbox * mbox,int partition_count)1082 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
1083 *partitions,
1084 struct mailbox *mbox,
1085 int partition_count)
1086 {
1087 uint32_t index;
1088 uint32_t buf_size;
1089 uint32_t descriptor_size;
1090 struct ffa_partition_info_v1_0 *v1_0_partitions =
1091 (struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
1092
1093 buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
1094 descriptor_size = partition_count *
1095 sizeof(struct ffa_partition_info_v1_0);
1096
1097 if (descriptor_size > buf_size) {
1098 return FFA_ERROR_NO_MEMORY;
1099 }
1100
1101 for (index = 0U; index < partition_count; index++) {
1102 v1_0_partitions[index].ep_id = partitions[index].ep_id;
1103 v1_0_partitions[index].execution_ctx_count =
1104 partitions[index].execution_ctx_count;
1105 /* Only report v1.0 properties. */
1106 v1_0_partitions[index].properties =
1107 (partitions[index].properties &
1108 FFA_PARTITION_INFO_GET_PROPERTIES_V1_0_MASK);
1109 }
1110 return 0;
1111 }
1112
1113 /*
1114 * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
1115 * v1.0 implementations.
1116 */
partition_info_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1117 static uint64_t partition_info_get_handler(uint32_t smc_fid,
1118 bool secure_origin,
1119 uint64_t x1,
1120 uint64_t x2,
1121 uint64_t x3,
1122 uint64_t x4,
1123 void *cookie,
1124 void *handle,
1125 uint64_t flags)
1126 {
1127 int ret;
1128 uint32_t partition_count = 0;
1129 uint32_t size = 0;
1130 uint32_t ffa_version = get_partition_ffa_version(secure_origin);
1131 struct mailbox *mbox;
1132 uint64_t info_get_flags;
1133 bool count_only;
1134 uint32_t uuid[4];
1135
1136 uuid[0] = x1;
1137 uuid[1] = x2;
1138 uuid[2] = x3;
1139 uuid[3] = x4;
1140
1141 /* Determine if the Partition descriptors should be populated. */
1142 info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
1143 count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
1144
1145 /* Handle the case where we don't need to populate the descriptors. */
1146 if (count_only) {
1147 partition_count = partition_info_get_handler_count_only(uuid);
1148 if (partition_count == 0) {
1149 return spmc_ffa_error_return(handle,
1150 FFA_ERROR_INVALID_PARAMETER);
1151 }
1152 } else {
1153 struct ffa_partition_info_v1_1
1154 partitions[MAX_SP_LP_PARTITIONS *
1155 SPMC_AT_EL3_PARTITION_MAX_UUIDS];
1156 /*
1157 * Handle the case where the partition descriptors are required,
1158 * check we have the buffers available and populate the
1159 * appropriate structure version.
1160 */
1161
1162 /* Obtain the v1.1 format of the descriptors. */
1163 ret = partition_info_get_handler_v1_1(uuid, partitions,
1164 (MAX_SP_LP_PARTITIONS *
1165 SPMC_AT_EL3_PARTITION_MAX_UUIDS),
1166 &partition_count);
1167
1168 /* Check if an error occurred during discovery. */
1169 if (ret != 0) {
1170 goto err;
1171 }
1172
1173 /* If we didn't find any matches the UUID is unknown. */
1174 if (partition_count == 0) {
1175 ret = FFA_ERROR_INVALID_PARAMETER;
1176 goto err;
1177 }
1178
1179 /* Obtain the partition mailbox RX/TX buffer pair descriptor. */
1180 mbox = spmc_get_mbox_desc(secure_origin);
1181
1182 /*
1183 * If the caller has not bothered registering its RX/TX pair
1184 * then return an error code.
1185 */
1186 spin_lock(&mbox->lock);
1187 if (mbox->rx_buffer == NULL) {
1188 ret = FFA_ERROR_BUSY;
1189 goto err_unlock;
1190 }
1191
1192 /* Ensure the RX buffer is currently free. */
1193 if (mbox->state != MAILBOX_STATE_EMPTY) {
1194 ret = FFA_ERROR_BUSY;
1195 goto err_unlock;
1196 }
1197
1198 /* Zero the RX buffer before populating. */
1199 (void)memset(mbox->rx_buffer, 0,
1200 mbox->rxtx_page_count * FFA_PAGE_SIZE);
1201
1202 /*
1203 * Depending on the FF-A version of the requesting partition
1204 * we may need to convert to a v1.0 format otherwise we can copy
1205 * directly.
1206 */
1207 if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
1208 ret = partition_info_populate_v1_0(partitions,
1209 mbox,
1210 partition_count);
1211 if (ret != 0) {
1212 goto err_unlock;
1213 }
1214 } else {
1215 uint32_t buf_size = mbox->rxtx_page_count *
1216 FFA_PAGE_SIZE;
1217
1218 /* Ensure the descriptor will fit in the buffer. */
1219 size = sizeof(struct ffa_partition_info_v1_1);
1220 if (partition_count * size > buf_size) {
1221 ret = FFA_ERROR_NO_MEMORY;
1222 goto err_unlock;
1223 }
1224 memcpy(mbox->rx_buffer, partitions,
1225 partition_count * size);
1226 }
1227
1228 mbox->state = MAILBOX_STATE_FULL;
1229 spin_unlock(&mbox->lock);
1230 }
1231 SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
1232
1233 err_unlock:
1234 spin_unlock(&mbox->lock);
1235 err:
1236 return spmc_ffa_error_return(handle, ret);
1237 }
1238
ffa_feature_success(void * handle,uint32_t arg2)1239 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
1240 {
1241 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
1242 }
1243
ffa_features_retrieve_request(bool secure_origin,uint32_t input_properties,void * handle)1244 static uint64_t ffa_features_retrieve_request(bool secure_origin,
1245 uint32_t input_properties,
1246 void *handle)
1247 {
1248 /*
1249 * If we're called by the normal world we don't support any
1250 * additional features.
1251 */
1252 if (!secure_origin) {
1253 if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1254 return spmc_ffa_error_return(handle,
1255 FFA_ERROR_NOT_SUPPORTED);
1256 }
1257
1258 } else {
1259 struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1260 /*
1261 * If v1.1 or higher the NS bit must be set otherwise it is
1262 * an invalid call. If v1.0 check and store whether the SP
1263 * has requested the use of the NS bit.
1264 */
1265 if (sp->ffa_version >= MAKE_FFA_VERSION(1, 1)) {
1266 if ((input_properties &
1267 FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1268 return spmc_ffa_error_return(handle,
1269 FFA_ERROR_NOT_SUPPORTED);
1270 }
1271 return ffa_feature_success(handle,
1272 FFA_FEATURES_RET_REQ_NS_BIT);
1273 } else {
1274 sp->ns_bit_requested = (input_properties &
1275 FFA_FEATURES_RET_REQ_NS_BIT) !=
1276 0U;
1277 }
1278 if (sp->ns_bit_requested) {
1279 return ffa_feature_success(handle,
1280 FFA_FEATURES_RET_REQ_NS_BIT);
1281 }
1282 }
1283 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1284 }
1285
ffa_features_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1286 static uint64_t ffa_features_handler(uint32_t smc_fid,
1287 bool secure_origin,
1288 uint64_t x1,
1289 uint64_t x2,
1290 uint64_t x3,
1291 uint64_t x4,
1292 void *cookie,
1293 void *handle,
1294 uint64_t flags)
1295 {
1296 uint32_t function_id = (uint32_t) x1;
1297 uint32_t input_properties = (uint32_t) x2;
1298
1299 /* Check if a Feature ID was requested. */
1300 if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1301 /* We currently don't support any additional features. */
1302 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1303 }
1304
1305 /*
1306 * Handle the cases where we have separate handlers due to additional
1307 * properties.
1308 */
1309 switch (function_id) {
1310 case FFA_MEM_RETRIEVE_REQ_SMC32:
1311 case FFA_MEM_RETRIEVE_REQ_SMC64:
1312 return ffa_features_retrieve_request(secure_origin,
1313 input_properties,
1314 handle);
1315 }
1316
1317 /*
1318 * We don't currently support additional input properties for these
1319 * other ABIs therefore ensure this value is set to 0.
1320 */
1321 if (input_properties != 0U) {
1322 return spmc_ffa_error_return(handle,
1323 FFA_ERROR_NOT_SUPPORTED);
1324 }
1325
1326 /* Report if any other FF-A ABI is supported. */
1327 switch (function_id) {
1328 /* Supported features from both worlds. */
1329 case FFA_ERROR:
1330 case FFA_SUCCESS_SMC32:
1331 case FFA_INTERRUPT:
1332 case FFA_SPM_ID_GET:
1333 case FFA_ID_GET:
1334 case FFA_FEATURES:
1335 case FFA_VERSION:
1336 case FFA_RX_RELEASE:
1337 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1338 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1339 case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
1340 case FFA_PARTITION_INFO_GET:
1341 case FFA_RXTX_MAP_SMC32:
1342 case FFA_RXTX_MAP_SMC64:
1343 case FFA_RXTX_UNMAP:
1344 case FFA_MEM_FRAG_TX:
1345 case FFA_MSG_RUN:
1346
1347 /*
1348 * We are relying on the fact that the other registers
1349 * will be set to 0 as these values align with the
1350 * currently implemented features of the SPMC. If this
1351 * changes this function must be extended to handle
1352 * reporting the additional functionality.
1353 */
1354
1355 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1356 /* Execution stops here. */
1357
1358 /* Supported ABIs only from the secure world. */
1359 case FFA_SECONDARY_EP_REGISTER_SMC64:
1360 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1361 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1362 case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
1363 case FFA_MEM_RELINQUISH:
1364 case FFA_MSG_WAIT:
1365 case FFA_CONSOLE_LOG_SMC32:
1366 case FFA_CONSOLE_LOG_SMC64:
1367
1368 if (!secure_origin) {
1369 return spmc_ffa_error_return(handle,
1370 FFA_ERROR_NOT_SUPPORTED);
1371 }
1372 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1373 /* Execution stops here. */
1374
1375 /* Supported features only from the normal world. */
1376 case FFA_MEM_SHARE_SMC32:
1377 case FFA_MEM_SHARE_SMC64:
1378 case FFA_MEM_LEND_SMC32:
1379 case FFA_MEM_LEND_SMC64:
1380 case FFA_MEM_RECLAIM:
1381 case FFA_MEM_FRAG_RX:
1382
1383 if (secure_origin) {
1384 return spmc_ffa_error_return(handle,
1385 FFA_ERROR_NOT_SUPPORTED);
1386 }
1387 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1388 /* Execution stops here. */
1389
1390 default:
1391 return spmc_ffa_error_return(handle,
1392 FFA_ERROR_NOT_SUPPORTED);
1393 }
1394 }
1395
ffa_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1396 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1397 bool secure_origin,
1398 uint64_t x1,
1399 uint64_t x2,
1400 uint64_t x3,
1401 uint64_t x4,
1402 void *cookie,
1403 void *handle,
1404 uint64_t flags)
1405 {
1406 if (secure_origin) {
1407 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1408 spmc_get_current_sp_ctx()->sp_id);
1409 } else {
1410 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1411 spmc_get_hyp_ctx()->ns_ep_id);
1412 }
1413 }
1414
1415 /*
1416 * Enable an SP to query the ID assigned to the SPMC.
1417 */
ffa_spm_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1418 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1419 bool secure_origin,
1420 uint64_t x1,
1421 uint64_t x2,
1422 uint64_t x3,
1423 uint64_t x4,
1424 void *cookie,
1425 void *handle,
1426 uint64_t flags)
1427 {
1428 assert(x1 == 0UL);
1429 assert(x2 == 0UL);
1430 assert(x3 == 0UL);
1431 assert(x4 == 0UL);
1432 assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1433 assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1434 assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1435
1436 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1437 }
1438
ffa_run_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1439 static uint64_t ffa_run_handler(uint32_t smc_fid,
1440 bool secure_origin,
1441 uint64_t x1,
1442 uint64_t x2,
1443 uint64_t x3,
1444 uint64_t x4,
1445 void *cookie,
1446 void *handle,
1447 uint64_t flags)
1448 {
1449 struct secure_partition_desc *sp;
1450 uint16_t target_id = FFA_RUN_EP_ID(x1);
1451 uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1452 unsigned int idx;
1453 unsigned int *rt_state;
1454 unsigned int *rt_model;
1455
1456 /* Can only be called from the normal world. */
1457 if (secure_origin) {
1458 ERROR("FFA_RUN can only be called from NWd.\n");
1459 return spmc_ffa_error_return(handle,
1460 FFA_ERROR_INVALID_PARAMETER);
1461 }
1462
1463 /* Cannot run a Normal world partition. */
1464 if (ffa_is_normal_world_id(target_id)) {
1465 ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1466 return spmc_ffa_error_return(handle,
1467 FFA_ERROR_INVALID_PARAMETER);
1468 }
1469
1470 /* Check that the target SP exists. */
1471 sp = spmc_get_sp_ctx(target_id);
1472 if (sp == NULL) {
1473 ERROR("Unknown partition ID (0x%x).\n", target_id);
1474 return spmc_ffa_error_return(handle,
1475 FFA_ERROR_INVALID_PARAMETER);
1476 }
1477
1478 idx = get_ec_index(sp);
1479
1480 if (idx != vcpu_id) {
1481 ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1482 return spmc_ffa_error_return(handle,
1483 FFA_ERROR_INVALID_PARAMETER);
1484 }
1485 if (sp->runtime_el == S_EL0) {
1486 spin_lock(&sp->rt_state_lock);
1487 }
1488 rt_state = &((sp->ec[idx]).rt_state);
1489 rt_model = &((sp->ec[idx]).rt_model);
1490 if (*rt_state == RT_STATE_RUNNING) {
1491 if (sp->runtime_el == S_EL0) {
1492 spin_unlock(&sp->rt_state_lock);
1493 }
1494 ERROR("Partition (0x%x) is already running.\n", target_id);
1495 return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1496 }
1497
1498 /*
1499 * Sanity check that if the execution context was not waiting then it
1500 * was either in the direct request or the run partition runtime model.
1501 */
1502 if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1503 assert(*rt_model == RT_MODEL_RUN ||
1504 *rt_model == RT_MODEL_DIR_REQ);
1505 }
1506
1507 /*
1508 * If the context was waiting then update the partition runtime model.
1509 */
1510 if (*rt_state == RT_STATE_WAITING) {
1511 *rt_model = RT_MODEL_RUN;
1512 }
1513
1514 /*
1515 * Forward the request to the correct SP vCPU after updating
1516 * its state.
1517 */
1518 *rt_state = RT_STATE_RUNNING;
1519
1520 if (sp->runtime_el == S_EL0) {
1521 spin_unlock(&sp->rt_state_lock);
1522 }
1523
1524 return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1525 handle, cookie, flags, target_id);
1526 }
1527
rx_release_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1528 static uint64_t rx_release_handler(uint32_t smc_fid,
1529 bool secure_origin,
1530 uint64_t x1,
1531 uint64_t x2,
1532 uint64_t x3,
1533 uint64_t x4,
1534 void *cookie,
1535 void *handle,
1536 uint64_t flags)
1537 {
1538 struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1539
1540 spin_lock(&mbox->lock);
1541
1542 if (mbox->state != MAILBOX_STATE_FULL) {
1543 spin_unlock(&mbox->lock);
1544 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1545 }
1546
1547 mbox->state = MAILBOX_STATE_EMPTY;
1548 spin_unlock(&mbox->lock);
1549
1550 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1551 }
1552
spmc_ffa_console_log(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1553 static uint64_t spmc_ffa_console_log(uint32_t smc_fid,
1554 bool secure_origin,
1555 uint64_t x1,
1556 uint64_t x2,
1557 uint64_t x3,
1558 uint64_t x4,
1559 void *cookie,
1560 void *handle,
1561 uint64_t flags)
1562 {
1563 /* Maximum number of characters is 48: 6 registers of 8 bytes each. */
1564 char chars[48] = {0};
1565 size_t chars_max;
1566 size_t chars_count = x1;
1567
1568 /* Does not support request from Nwd. */
1569 if (!secure_origin) {
1570 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1571 }
1572
1573 assert(smc_fid == FFA_CONSOLE_LOG_SMC32 || smc_fid == FFA_CONSOLE_LOG_SMC64);
1574 if (smc_fid == FFA_CONSOLE_LOG_SMC32) {
1575 uint32_t *registers = (uint32_t *)chars;
1576 registers[0] = (uint32_t)x2;
1577 registers[1] = (uint32_t)x3;
1578 registers[2] = (uint32_t)x4;
1579 registers[3] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X5);
1580 registers[4] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X6);
1581 registers[5] = (uint32_t)SMC_GET_GP(handle, CTX_GPREG_X7);
1582 chars_max = 6 * sizeof(uint32_t);
1583 } else {
1584 uint64_t *registers = (uint64_t *)chars;
1585 registers[0] = x2;
1586 registers[1] = x3;
1587 registers[2] = x4;
1588 registers[3] = SMC_GET_GP(handle, CTX_GPREG_X5);
1589 registers[4] = SMC_GET_GP(handle, CTX_GPREG_X6);
1590 registers[5] = SMC_GET_GP(handle, CTX_GPREG_X7);
1591 chars_max = 6 * sizeof(uint64_t);
1592 }
1593
1594 if ((chars_count == 0) || (chars_count > chars_max)) {
1595 return spmc_ffa_error_return(handle, FFA_ERROR_INVALID_PARAMETER);
1596 }
1597
1598 for (size_t i = 0; (i < chars_count) && (chars[i] != '\0'); i++) {
1599 putchar(chars[i]);
1600 }
1601
1602 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1603 }
1604
1605 /*
1606 * Perform initial validation on the provided secondary entry point.
1607 * For now ensure it does not lie within the BL31 Image or the SP's
1608 * RX/TX buffers as these are mapped within EL3.
1609 * TODO: perform validation for additional invalid memory regions.
1610 */
validate_secondary_ep(uintptr_t ep,struct secure_partition_desc * sp)1611 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1612 {
1613 struct mailbox *mb;
1614 uintptr_t buffer_size;
1615 uintptr_t sp_rx_buffer;
1616 uintptr_t sp_tx_buffer;
1617 uintptr_t sp_rx_buffer_limit;
1618 uintptr_t sp_tx_buffer_limit;
1619
1620 mb = &sp->mailbox;
1621 buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1622 sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1623 sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1624 sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1625 sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1626
1627 /*
1628 * Check if the entry point lies within BL31, or the
1629 * SP's RX or TX buffer.
1630 */
1631 if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1632 (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1633 (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1634 return -EINVAL;
1635 }
1636 return 0;
1637 }
1638
1639 /*******************************************************************************
1640 * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1641 * register an entry point for initialization during a secondary cold boot.
1642 ******************************************************************************/
ffa_sec_ep_register_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1643 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1644 bool secure_origin,
1645 uint64_t x1,
1646 uint64_t x2,
1647 uint64_t x3,
1648 uint64_t x4,
1649 void *cookie,
1650 void *handle,
1651 uint64_t flags)
1652 {
1653 struct secure_partition_desc *sp;
1654 struct sp_exec_ctx *sp_ctx;
1655
1656 /* This request cannot originate from the Normal world. */
1657 if (!secure_origin) {
1658 WARN("%s: Can only be called from SWd.\n", __func__);
1659 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1660 }
1661
1662 /* Get the context of the current SP. */
1663 sp = spmc_get_current_sp_ctx();
1664 if (sp == NULL) {
1665 WARN("%s: Cannot find SP context.\n", __func__);
1666 return spmc_ffa_error_return(handle,
1667 FFA_ERROR_INVALID_PARAMETER);
1668 }
1669
1670 /* Only an S-EL1 SP should be invoking this ABI. */
1671 if (sp->runtime_el != S_EL1) {
1672 WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1673 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1674 }
1675
1676 /* Ensure the SP is in its initialization state. */
1677 sp_ctx = spmc_get_sp_ec(sp);
1678 if (sp_ctx->rt_model != RT_MODEL_INIT) {
1679 WARN("%s: Can only be called during SP initialization.\n",
1680 __func__);
1681 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1682 }
1683
1684 /* Perform initial validation of the secondary entry point. */
1685 if (validate_secondary_ep(x1, sp)) {
1686 WARN("%s: Invalid entry point provided (0x%lx).\n",
1687 __func__, x1);
1688 return spmc_ffa_error_return(handle,
1689 FFA_ERROR_INVALID_PARAMETER);
1690 }
1691
1692 /*
1693 * Update the secondary entrypoint in SP context.
1694 * We don't need a lock here as during partition initialization there
1695 * will only be a single core online.
1696 */
1697 sp->secondary_ep = x1;
1698 VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1699
1700 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1701 }
1702
1703 /*******************************************************************************
1704 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1705 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1706 * function converts a permission value from the FF-A format to the mmap_attr_t
1707 * format by setting MT_RW/MT_RO, MT_USER/MT_PRIVILEGED and
1708 * MT_EXECUTE/MT_EXECUTE_NEVER. The other fields are left as 0 because they are
1709 * ignored by the function xlat_change_mem_attributes_ctx().
1710 ******************************************************************************/
ffa_perm_to_mmap_perm(unsigned int perms)1711 static unsigned int ffa_perm_to_mmap_perm(unsigned int perms)
1712 {
1713 unsigned int tf_attr = 0U;
1714 unsigned int access;
1715
1716 /* Deal with data access permissions first. */
1717 access = (perms & FFA_MEM_PERM_DATA_MASK) >> FFA_MEM_PERM_DATA_SHIFT;
1718
1719 switch (access) {
1720 case FFA_MEM_PERM_DATA_RW:
1721 /* Return 0 if the execute is set with RW. */
1722 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) != 0) {
1723 tf_attr |= MT_RW | MT_USER | MT_EXECUTE_NEVER;
1724 }
1725 break;
1726
1727 case FFA_MEM_PERM_DATA_RO:
1728 tf_attr |= MT_RO | MT_USER;
1729 /* Deal with the instruction access permissions next. */
1730 if ((perms & FFA_MEM_PERM_INST_NON_EXEC) == 0) {
1731 tf_attr |= MT_EXECUTE;
1732 } else {
1733 tf_attr |= MT_EXECUTE_NEVER;
1734 }
1735 break;
1736
1737 case FFA_MEM_PERM_DATA_NA:
1738 default:
1739 return tf_attr;
1740 }
1741
1742 return tf_attr;
1743 }
1744
1745 /*******************************************************************************
1746 * Handler to set the permissions of a set of contiguous pages of a S-EL0 SP
1747 ******************************************************************************/
ffa_mem_perm_set_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1748 static uint64_t ffa_mem_perm_set_handler(uint32_t smc_fid,
1749 bool secure_origin,
1750 uint64_t x1,
1751 uint64_t x2,
1752 uint64_t x3,
1753 uint64_t x4,
1754 void *cookie,
1755 void *handle,
1756 uint64_t flags)
1757 {
1758 struct secure_partition_desc *sp;
1759 unsigned int idx;
1760 uintptr_t base_va = (uintptr_t) x1;
1761 size_t size = (size_t)(x2 * PAGE_SIZE);
1762 uint32_t tf_attr;
1763 int ret;
1764
1765 /* This request cannot originate from the Normal world. */
1766 if (!secure_origin) {
1767 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1768 }
1769
1770 if (size == 0) {
1771 return spmc_ffa_error_return(handle,
1772 FFA_ERROR_INVALID_PARAMETER);
1773 }
1774
1775 /* Get the context of the current SP. */
1776 sp = spmc_get_current_sp_ctx();
1777 if (sp == NULL) {
1778 return spmc_ffa_error_return(handle,
1779 FFA_ERROR_INVALID_PARAMETER);
1780 }
1781
1782 /* A S-EL1 SP has no business invoking this ABI. */
1783 if (sp->runtime_el == S_EL1) {
1784 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1785 }
1786
1787 if ((x3 & ~((uint64_t)FFA_MEM_PERM_MASK)) != 0) {
1788 return spmc_ffa_error_return(handle,
1789 FFA_ERROR_INVALID_PARAMETER);
1790 }
1791
1792 /* Get the execution context of the calling SP. */
1793 idx = get_ec_index(sp);
1794
1795 /*
1796 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1797 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1798 * and can only be initialising on this cpu.
1799 */
1800 if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1801 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1802 }
1803
1804 VERBOSE("Setting memory permissions:\n");
1805 VERBOSE(" Start address : 0x%lx\n", base_va);
1806 VERBOSE(" Number of pages: %lu (%zu bytes)\n", x2, size);
1807 VERBOSE(" Attributes : 0x%x\n", (uint32_t)x3);
1808
1809 /* Convert inbound permissions to TF-A permission attributes */
1810 tf_attr = ffa_perm_to_mmap_perm((unsigned int)x3);
1811 if (tf_attr == 0U) {
1812 return spmc_ffa_error_return(handle,
1813 FFA_ERROR_INVALID_PARAMETER);
1814 }
1815
1816 /* Request the change in permissions */
1817 ret = xlat_change_mem_attributes_ctx(sp->xlat_ctx_handle,
1818 base_va, size, tf_attr);
1819 if (ret != 0) {
1820 return spmc_ffa_error_return(handle,
1821 FFA_ERROR_INVALID_PARAMETER);
1822 }
1823
1824 SMC_RET1(handle, FFA_SUCCESS_SMC32);
1825 }
1826
1827 /*******************************************************************************
1828 * Permissions are encoded using a different format in the FFA_MEM_PERM_* ABIs
1829 * than in the Trusted Firmware, where the mmap_attr_t enum type is used. This
1830 * function converts a permission value from the mmap_attr_t format to the FF-A
1831 * format.
1832 ******************************************************************************/
mmap_perm_to_ffa_perm(unsigned int attr)1833 static unsigned int mmap_perm_to_ffa_perm(unsigned int attr)
1834 {
1835 unsigned int perms = 0U;
1836 unsigned int data_access;
1837
1838 if ((attr & MT_USER) == 0) {
1839 /* No access from EL0. */
1840 data_access = FFA_MEM_PERM_DATA_NA;
1841 } else {
1842 if ((attr & MT_RW) != 0) {
1843 data_access = FFA_MEM_PERM_DATA_RW;
1844 } else {
1845 data_access = FFA_MEM_PERM_DATA_RO;
1846 }
1847 }
1848
1849 perms |= (data_access & FFA_MEM_PERM_DATA_MASK)
1850 << FFA_MEM_PERM_DATA_SHIFT;
1851
1852 if ((attr & MT_EXECUTE_NEVER) != 0U) {
1853 perms |= FFA_MEM_PERM_INST_NON_EXEC;
1854 }
1855
1856 return perms;
1857 }
1858
1859 /*******************************************************************************
1860 * Handler to get the permissions of a set of contiguous pages of a S-EL0 SP
1861 ******************************************************************************/
ffa_mem_perm_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1862 static uint64_t ffa_mem_perm_get_handler(uint32_t smc_fid,
1863 bool secure_origin,
1864 uint64_t x1,
1865 uint64_t x2,
1866 uint64_t x3,
1867 uint64_t x4,
1868 void *cookie,
1869 void *handle,
1870 uint64_t flags)
1871 {
1872 struct secure_partition_desc *sp;
1873 unsigned int idx;
1874 uintptr_t base_va = (uintptr_t)x1;
1875 uint32_t tf_attr = 0;
1876 int ret;
1877
1878 /* This request cannot originate from the Normal world. */
1879 if (!secure_origin) {
1880 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1881 }
1882
1883 /* Get the context of the current SP. */
1884 sp = spmc_get_current_sp_ctx();
1885 if (sp == NULL) {
1886 return spmc_ffa_error_return(handle,
1887 FFA_ERROR_INVALID_PARAMETER);
1888 }
1889
1890 /* A S-EL1 SP has no business invoking this ABI. */
1891 if (sp->runtime_el == S_EL1) {
1892 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1893 }
1894
1895 /* Get the execution context of the calling SP. */
1896 idx = get_ec_index(sp);
1897
1898 /*
1899 * Ensure that the S-EL0 SP is initialising itself. We do not need to
1900 * synchronise this operation through a spinlock since a S-EL0 SP is UP
1901 * and can only be initialising on this cpu.
1902 */
1903 if (sp->ec[idx].rt_model != RT_MODEL_INIT) {
1904 return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1905 }
1906
1907 /* Request the permissions */
1908 ret = xlat_get_mem_attributes_ctx(sp->xlat_ctx_handle, base_va, &tf_attr);
1909 if (ret != 0) {
1910 return spmc_ffa_error_return(handle,
1911 FFA_ERROR_INVALID_PARAMETER);
1912 }
1913
1914 /* Convert TF-A permission to FF-A permissions attributes. */
1915 x2 = mmap_perm_to_ffa_perm(tf_attr);
1916
1917 SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, x2);
1918 }
1919
1920 /*******************************************************************************
1921 * This function will parse the Secure Partition Manifest. From manifest, it
1922 * will fetch details for preparing Secure partition image context and secure
1923 * partition image boot arguments if any.
1924 ******************************************************************************/
sp_manifest_parse(void * sp_manifest,int offset,struct secure_partition_desc * sp,entry_point_info_t * ep_info,int32_t * boot_info_reg)1925 static int sp_manifest_parse(void *sp_manifest, int offset,
1926 struct secure_partition_desc *sp,
1927 entry_point_info_t *ep_info,
1928 int32_t *boot_info_reg)
1929 {
1930 int32_t ret, node;
1931 uint32_t config_32;
1932 int uuid_size;
1933 const fdt32_t *prop;
1934 int i;
1935
1936 /*
1937 * Look for the mandatory fields that are expected to be present in
1938 * the SP manifests.
1939 */
1940 node = fdt_path_offset(sp_manifest, "/");
1941 if (node < 0) {
1942 ERROR("Did not find root node.\n");
1943 return node;
1944 }
1945
1946 prop = fdt_getprop(sp_manifest, node, "uuid", &uuid_size);
1947 if (prop == NULL) {
1948 ERROR("Couldn't find property uuid in manifest\n");
1949 return -FDT_ERR_NOTFOUND;
1950 }
1951
1952 if (uuid_size > sizeof(sp->uuid_array)) {
1953 ERROR("Too many UUIDs in manifest, truncating list\n");
1954 uuid_size = sizeof(sp->uuid_array);
1955 }
1956
1957 sp->num_uuids = uuid_size / sizeof(struct ffa_uuid);
1958 ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1959 (uuid_size / sizeof(uint32_t)),
1960 sp->uuid_array[0].uuid);
1961 if (ret != 0) {
1962 ERROR("Missing Secure Partition UUID.\n");
1963 return ret;
1964 }
1965
1966 for (i = 0; i < sp->num_uuids; i++) {
1967 int j;
1968 for (j = 0; j < i; j++) {
1969 if (memcmp(&sp->uuid_array[i], &sp->uuid_array[j], sizeof(struct ffa_uuid)) == 0) {
1970 ERROR("Duplicate UUIDs in manifest: 0x%x 0x%x 0x%x 0x%x\n",
1971 sp->uuid_array[i].uuid[0], sp->uuid_array[i].uuid[1],
1972 sp->uuid_array[i].uuid[2], sp->uuid_array[i].uuid[3]);
1973 return -FDT_ERR_BADVALUE;
1974 }
1975 }
1976 }
1977
1978 ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1979 if (ret != 0) {
1980 ERROR("Missing SP Exception Level information.\n");
1981 return ret;
1982 }
1983
1984 sp->runtime_el = config_32;
1985
1986 ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1987 if (ret != 0) {
1988 ERROR("Missing Secure Partition FF-A Version.\n");
1989 return ret;
1990 }
1991
1992 sp->ffa_version = config_32;
1993
1994 ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1995 if (ret != 0) {
1996 ERROR("Missing Secure Partition Execution State.\n");
1997 return ret;
1998 }
1999
2000 sp->execution_state = config_32;
2001
2002 ret = fdt_read_uint32(sp_manifest, node,
2003 "messaging-method", &config_32);
2004 if (ret != 0) {
2005 ERROR("Missing Secure Partition messaging method.\n");
2006 return ret;
2007 }
2008
2009 /* Validate this entry, we currently only support direct messaging. */
2010 if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
2011 FFA_PARTITION_DIRECT_REQ_SEND |
2012 FFA_PARTITION_DIRECT_REQ2_RECV |
2013 FFA_PARTITION_DIRECT_REQ2_SEND)) != 0U) {
2014 WARN("Invalid Secure Partition messaging method (0x%x)\n",
2015 config_32);
2016 return -EINVAL;
2017 }
2018
2019 sp->properties = config_32;
2020
2021 ret = fdt_read_uint32(sp_manifest, node,
2022 "vm-availability-messages", &config_32);
2023 if (ret != 0) {
2024 WARN("Missing VM availability messaging.\n");
2025 } else if ((sp->properties & FFA_PARTITION_DIRECT_REQ_RECV) == 0) {
2026 ERROR("VM availability messaging requested without "
2027 "direct message receive support.\n");
2028 return -EINVAL;
2029 } else {
2030 /* Validate this entry. */
2031 if ((config_32 & ~(FFA_VM_AVAILABILITY_CREATED |
2032 FFA_VM_AVAILABILITY_DESTROYED)) != 0U) {
2033 WARN("Invalid VM availability messaging (0x%x)\n",
2034 config_32);
2035 return -EINVAL;
2036 }
2037
2038 if ((config_32 & FFA_VM_AVAILABILITY_CREATED) != 0U) {
2039 sp->properties |= FFA_PARTITION_VM_CREATED;
2040 }
2041 if ((config_32 & FFA_VM_AVAILABILITY_DESTROYED) != 0U) {
2042 sp->properties |= FFA_PARTITION_VM_DESTROYED;
2043 }
2044 }
2045
2046 ret = fdt_read_uint32(sp_manifest, node,
2047 "execution-ctx-count", &config_32);
2048
2049 if (ret != 0) {
2050 ERROR("Missing SP Execution Context Count.\n");
2051 return ret;
2052 }
2053
2054 /*
2055 * Ensure this field is set correctly in the manifest however
2056 * since this is currently a hardcoded value for S-EL1 partitions
2057 * we don't need to save it here, just validate.
2058 */
2059 if ((sp->runtime_el == S_EL1) && (config_32 != PLATFORM_CORE_COUNT)) {
2060 ERROR("SP Execution Context Count (%u) must be %u.\n",
2061 config_32, PLATFORM_CORE_COUNT);
2062 return -EINVAL;
2063 }
2064
2065 /*
2066 * Look for the optional fields that are expected to be present in
2067 * an SP manifest.
2068 */
2069 ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
2070 if (ret != 0) {
2071 WARN("Missing Secure Partition ID.\n");
2072 } else {
2073 if (!is_ffa_secure_id_valid(config_32)) {
2074 ERROR("Invalid Secure Partition ID (0x%x).\n",
2075 config_32);
2076 return -EINVAL;
2077 }
2078 sp->sp_id = config_32;
2079 }
2080
2081 ret = fdt_read_uint32(sp_manifest, node,
2082 "power-management-messages", &config_32);
2083 if (ret != 0) {
2084 WARN("Missing Power Management Messages entry.\n");
2085 } else {
2086 if ((sp->runtime_el == S_EL0) && (config_32 != 0)) {
2087 ERROR("Power messages not supported for S-EL0 SP\n");
2088 return -EINVAL;
2089 }
2090
2091 /*
2092 * Ensure only the currently supported power messages have
2093 * been requested.
2094 */
2095 if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
2096 FFA_PM_MSG_SUB_CPU_SUSPEND |
2097 FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
2098 ERROR("Requested unsupported PM messages (%x)\n",
2099 config_32);
2100 return -EINVAL;
2101 }
2102 sp->pwr_mgmt_msgs = config_32;
2103 }
2104
2105 ret = fdt_read_uint32(sp_manifest, node,
2106 "gp-register-num", &config_32);
2107 if (ret != 0) {
2108 WARN("Missing boot information register.\n");
2109 } else {
2110 /* Check if a register number between 0-3 is specified. */
2111 if (config_32 < 4) {
2112 *boot_info_reg = config_32;
2113 } else {
2114 WARN("Incorrect boot information register (%u).\n",
2115 config_32);
2116 }
2117 }
2118
2119 return 0;
2120 }
2121
2122 /*******************************************************************************
2123 * This function gets the Secure Partition Manifest base and maps the manifest
2124 * region.
2125 * Currently only one Secure Partition manifest is considered which is used to
2126 * prepare the context for the single Secure Partition.
2127 ******************************************************************************/
find_and_prepare_sp_context(void)2128 static int find_and_prepare_sp_context(void)
2129 {
2130 void *sp_manifest;
2131 uintptr_t manifest_base;
2132 uintptr_t manifest_base_align;
2133 entry_point_info_t *next_image_ep_info;
2134 int32_t ret, boot_info_reg = -1;
2135 struct secure_partition_desc *sp;
2136
2137 next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
2138 if (next_image_ep_info == NULL) {
2139 WARN("No Secure Partition image provided by BL2.\n");
2140 return -ENOENT;
2141 }
2142
2143 sp_manifest = (void *)next_image_ep_info->args.arg0;
2144 if (sp_manifest == NULL) {
2145 WARN("Secure Partition manifest absent.\n");
2146 return -ENOENT;
2147 }
2148
2149 manifest_base = (uintptr_t)sp_manifest;
2150 manifest_base_align = page_align(manifest_base, DOWN);
2151
2152 /*
2153 * Map the secure partition manifest region in the EL3 translation
2154 * regime.
2155 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
2156 * alignment the region of 1 PAGE_SIZE from manifest align base may
2157 * not completely accommodate the secure partition manifest region.
2158 */
2159 ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
2160 manifest_base_align,
2161 PAGE_SIZE * 2,
2162 MT_RO_DATA);
2163 if (ret != 0) {
2164 ERROR("Error while mapping SP manifest (%d).\n", ret);
2165 return ret;
2166 }
2167
2168 ret = fdt_node_offset_by_compatible(sp_manifest, -1,
2169 "arm,ffa-manifest-1.0");
2170 if (ret < 0) {
2171 ERROR("Error happened in SP manifest reading.\n");
2172 return -EINVAL;
2173 }
2174
2175 /*
2176 * Store the size of the manifest so that it can be used later to pass
2177 * the manifest as boot information later.
2178 */
2179 next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
2180 INFO("Manifest adr = %lx , size = %lu bytes\n", manifest_base,
2181 next_image_ep_info->args.arg1);
2182
2183 /*
2184 * Select an SP descriptor for initialising the partition's execution
2185 * context on the primary CPU.
2186 */
2187 sp = spmc_get_current_sp_ctx();
2188
2189 #if SPMC_AT_EL3_SEL0_SP
2190 /* Assign translation tables context. */
2191 sp_desc->xlat_ctx_handle = spm_get_sp_xlat_context();
2192
2193 #endif /* SPMC_AT_EL3_SEL0_SP */
2194 /* Initialize entry point information for the SP */
2195 SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
2196 SECURE | EP_ST_ENABLE);
2197
2198 /* Parse the SP manifest. */
2199 ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
2200 &boot_info_reg);
2201 if (ret != 0) {
2202 ERROR("Error in Secure Partition manifest parsing.\n");
2203 return ret;
2204 }
2205
2206 /* Check that the runtime EL in the manifest was correct. */
2207 if (sp->runtime_el != S_EL0 && sp->runtime_el != S_EL1) {
2208 ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
2209 return -EINVAL;
2210 }
2211
2212 /* Perform any common initialisation. */
2213 spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
2214
2215 /* Perform any initialisation specific to S-EL1 SPs. */
2216 if (sp->runtime_el == S_EL1) {
2217 spmc_el1_sp_setup(sp, next_image_ep_info);
2218 }
2219
2220 #if SPMC_AT_EL3_SEL0_SP
2221 /* Setup spsr in endpoint info for common context management routine. */
2222 if (sp->runtime_el == S_EL0) {
2223 spmc_el0_sp_spsr_setup(next_image_ep_info);
2224 }
2225 #endif /* SPMC_AT_EL3_SEL0_SP */
2226
2227 /* Initialize the SP context with the required ep info. */
2228 spmc_sp_common_ep_commit(sp, next_image_ep_info);
2229
2230 #if SPMC_AT_EL3_SEL0_SP
2231 /*
2232 * Perform any initialisation specific to S-EL0 not set by common
2233 * context management routine.
2234 */
2235 if (sp->runtime_el == S_EL0) {
2236 spmc_el0_sp_setup(sp, boot_info_reg, sp_manifest);
2237 }
2238 #endif /* SPMC_AT_EL3_SEL0_SP */
2239 return 0;
2240 }
2241
2242 /*******************************************************************************
2243 * This function takes an SP context pointer and performs a synchronous entry
2244 * into it.
2245 ******************************************************************************/
logical_sp_init(void)2246 static int32_t logical_sp_init(void)
2247 {
2248 int32_t rc = 0;
2249 struct el3_lp_desc *el3_lp_descs;
2250
2251 /* Perform initial validation of the Logical Partitions. */
2252 rc = el3_sp_desc_validate();
2253 if (rc != 0) {
2254 ERROR("Logical Partition validation failed!\n");
2255 return rc;
2256 }
2257
2258 el3_lp_descs = get_el3_lp_array();
2259
2260 INFO("Logical Secure Partition init start.\n");
2261 for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
2262 rc = el3_lp_descs[i].init();
2263 if (rc != 0) {
2264 ERROR("Logical SP (0x%x) Failed to Initialize\n",
2265 el3_lp_descs[i].sp_id);
2266 return rc;
2267 }
2268 VERBOSE("Logical SP (0x%x) Initialized\n",
2269 el3_lp_descs[i].sp_id);
2270 }
2271
2272 INFO("Logical Secure Partition init completed.\n");
2273
2274 return rc;
2275 }
2276
spmc_sp_synchronous_entry(struct sp_exec_ctx * ec)2277 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
2278 {
2279 uint64_t rc;
2280
2281 assert(ec != NULL);
2282
2283 /* Assign the context of the SP to this CPU */
2284 cm_set_context(&(ec->cpu_ctx), SECURE);
2285
2286 /* Restore the context assigned above */
2287 cm_el1_sysregs_context_restore(SECURE);
2288 cm_set_next_eret_context(SECURE);
2289
2290 /* Invalidate TLBs at EL1. */
2291 tlbivmalle1();
2292 dsbish();
2293
2294 /* Enter Secure Partition */
2295 rc = spm_secure_partition_enter(&ec->c_rt_ctx);
2296
2297 /* Save secure state */
2298 cm_el1_sysregs_context_save(SECURE);
2299
2300 return rc;
2301 }
2302
2303 /*******************************************************************************
2304 * SPMC Helper Functions.
2305 ******************************************************************************/
sp_init(void)2306 static int32_t sp_init(void)
2307 {
2308 uint64_t rc;
2309 struct secure_partition_desc *sp;
2310 struct sp_exec_ctx *ec;
2311
2312 sp = spmc_get_current_sp_ctx();
2313 ec = spmc_get_sp_ec(sp);
2314 ec->rt_model = RT_MODEL_INIT;
2315 ec->rt_state = RT_STATE_RUNNING;
2316
2317 INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
2318
2319 rc = spmc_sp_synchronous_entry(ec);
2320 if (rc != 0) {
2321 /* Indicate SP init was not successful. */
2322 ERROR("SP (0x%x) failed to initialize (%lu).\n",
2323 sp->sp_id, rc);
2324 return 0;
2325 }
2326
2327 ec->rt_state = RT_STATE_WAITING;
2328 INFO("Secure Partition initialized.\n");
2329
2330 return 1;
2331 }
2332
initalize_sp_descs(void)2333 static void initalize_sp_descs(void)
2334 {
2335 struct secure_partition_desc *sp;
2336
2337 for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
2338 sp = &sp_desc[i];
2339 sp->sp_id = INV_SP_ID;
2340 sp->mailbox.rx_buffer = NULL;
2341 sp->mailbox.tx_buffer = NULL;
2342 sp->mailbox.state = MAILBOX_STATE_EMPTY;
2343 sp->secondary_ep = 0;
2344 }
2345 }
2346
initalize_ns_ep_descs(void)2347 static void initalize_ns_ep_descs(void)
2348 {
2349 struct ns_endpoint_desc *ns_ep;
2350
2351 for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
2352 ns_ep = &ns_ep_desc[i];
2353 /*
2354 * Clashes with the Hypervisor ID but will not be a
2355 * problem in practice.
2356 */
2357 ns_ep->ns_ep_id = 0;
2358 ns_ep->ffa_version = 0;
2359 ns_ep->mailbox.rx_buffer = NULL;
2360 ns_ep->mailbox.tx_buffer = NULL;
2361 ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
2362 }
2363 }
2364
2365 /*******************************************************************************
2366 * Initialize SPMC attributes for the SPMD.
2367 ******************************************************************************/
spmc_populate_attrs(spmc_manifest_attribute_t * spmc_attrs)2368 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
2369 {
2370 spmc_attrs->major_version = FFA_VERSION_MAJOR;
2371 spmc_attrs->minor_version = FFA_VERSION_MINOR;
2372 spmc_attrs->exec_state = MODE_RW_64;
2373 spmc_attrs->spmc_id = FFA_SPMC_ID;
2374 spmc_attrs->sp_ffa_version = spmc_get_current_sp_ctx()->ffa_version;
2375 }
2376
2377 /*******************************************************************************
2378 * Initialize contexts of all Secure Partitions.
2379 ******************************************************************************/
spmc_setup(void)2380 int32_t spmc_setup(void)
2381 {
2382 int32_t ret;
2383 uint32_t flags;
2384
2385 /* Initialize endpoint descriptors */
2386 initalize_sp_descs();
2387 initalize_ns_ep_descs();
2388
2389 /*
2390 * Retrieve the information of the datastore for tracking shared memory
2391 * requests allocated by platform code and zero the region if available.
2392 */
2393 ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
2394 &spmc_shmem_obj_state.data_size);
2395 if (ret != 0) {
2396 ERROR("Failed to obtain memory descriptor backing store!\n");
2397 return ret;
2398 }
2399 memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
2400
2401 /* Setup logical SPs. */
2402 ret = logical_sp_init();
2403 if (ret != 0) {
2404 ERROR("Failed to initialize Logical Partitions.\n");
2405 return ret;
2406 }
2407
2408 /* Perform physical SP setup. */
2409
2410 /* Disable MMU at EL1 (initialized by BL2) */
2411 disable_mmu_icache_el1();
2412
2413 /* Initialize context of the SP */
2414 INFO("Secure Partition context setup start.\n");
2415
2416 ret = find_and_prepare_sp_context();
2417 if (ret != 0) {
2418 ERROR("Error in SP finding and context preparation.\n");
2419 return ret;
2420 }
2421
2422 /* Register power management hooks with PSCI */
2423 psci_register_spd_pm_hook(&spmc_pm);
2424
2425 /*
2426 * Register an interrupt handler for S-EL1 interrupts
2427 * when generated during code executing in the
2428 * non-secure state.
2429 */
2430 flags = 0;
2431 set_interrupt_rm_flag(flags, NON_SECURE);
2432 ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
2433 spmc_sp_interrupt_handler,
2434 flags);
2435 if (ret != 0) {
2436 ERROR("Failed to register interrupt handler! (%d)\n", ret);
2437 panic();
2438 }
2439
2440 /* Register init function for deferred init. */
2441 bl31_register_bl32_init(&sp_init);
2442
2443 INFO("Secure Partition setup done.\n");
2444
2445 return 0;
2446 }
2447
2448 /*******************************************************************************
2449 * Secure Partition Manager SMC handler.
2450 ******************************************************************************/
spmc_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)2451 uint64_t spmc_smc_handler(uint32_t smc_fid,
2452 bool secure_origin,
2453 uint64_t x1,
2454 uint64_t x2,
2455 uint64_t x3,
2456 uint64_t x4,
2457 void *cookie,
2458 void *handle,
2459 uint64_t flags)
2460 {
2461 switch (smc_fid) {
2462
2463 case FFA_VERSION:
2464 return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
2465 x4, cookie, handle, flags);
2466
2467 case FFA_SPM_ID_GET:
2468 return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
2469 x3, x4, cookie, handle, flags);
2470
2471 case FFA_ID_GET:
2472 return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
2473 x4, cookie, handle, flags);
2474
2475 case FFA_FEATURES:
2476 return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
2477 x4, cookie, handle, flags);
2478
2479 case FFA_SECONDARY_EP_REGISTER_SMC64:
2480 return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
2481 x2, x3, x4, cookie, handle,
2482 flags);
2483
2484 case FFA_MSG_SEND_DIRECT_REQ_SMC32:
2485 case FFA_MSG_SEND_DIRECT_REQ_SMC64:
2486 case FFA_MSG_SEND_DIRECT_REQ2_SMC64:
2487 return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
2488 x3, x4, cookie, handle, flags);
2489
2490 case FFA_MSG_SEND_DIRECT_RESP_SMC32:
2491 case FFA_MSG_SEND_DIRECT_RESP_SMC64:
2492 case FFA_MSG_SEND_DIRECT_RESP2_SMC64:
2493 return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
2494 x3, x4, cookie, handle, flags);
2495
2496 case FFA_RXTX_MAP_SMC32:
2497 case FFA_RXTX_MAP_SMC64:
2498 return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2499 cookie, handle, flags);
2500
2501 case FFA_RXTX_UNMAP:
2502 return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
2503 x4, cookie, handle, flags);
2504
2505 case FFA_PARTITION_INFO_GET:
2506 return partition_info_get_handler(smc_fid, secure_origin, x1,
2507 x2, x3, x4, cookie, handle,
2508 flags);
2509
2510 case FFA_RX_RELEASE:
2511 return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
2512 x4, cookie, handle, flags);
2513
2514 case FFA_MSG_WAIT:
2515 return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2516 cookie, handle, flags);
2517
2518 case FFA_ERROR:
2519 return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2520 cookie, handle, flags);
2521
2522 case FFA_MSG_RUN:
2523 return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
2524 cookie, handle, flags);
2525
2526 case FFA_MEM_SHARE_SMC32:
2527 case FFA_MEM_SHARE_SMC64:
2528 case FFA_MEM_LEND_SMC32:
2529 case FFA_MEM_LEND_SMC64:
2530 return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
2531 cookie, handle, flags);
2532
2533 case FFA_MEM_FRAG_TX:
2534 return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
2535 x4, cookie, handle, flags);
2536
2537 case FFA_MEM_FRAG_RX:
2538 return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
2539 x4, cookie, handle, flags);
2540
2541 case FFA_MEM_RETRIEVE_REQ_SMC32:
2542 case FFA_MEM_RETRIEVE_REQ_SMC64:
2543 return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
2544 x3, x4, cookie, handle, flags);
2545
2546 case FFA_MEM_RELINQUISH:
2547 return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
2548 x3, x4, cookie, handle, flags);
2549
2550 case FFA_MEM_RECLAIM:
2551 return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
2552 x4, cookie, handle, flags);
2553 case FFA_CONSOLE_LOG_SMC32:
2554 case FFA_CONSOLE_LOG_SMC64:
2555 return spmc_ffa_console_log(smc_fid, secure_origin, x1, x2, x3,
2556 x4, cookie, handle, flags);
2557
2558 case FFA_MEM_PERM_GET:
2559 return ffa_mem_perm_get_handler(smc_fid, secure_origin, x1, x2,
2560 x3, x4, cookie, handle, flags);
2561
2562 case FFA_MEM_PERM_SET:
2563 return ffa_mem_perm_set_handler(smc_fid, secure_origin, x1, x2,
2564 x3, x4, cookie, handle, flags);
2565
2566 default:
2567 WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
2568 break;
2569 }
2570 return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
2571 }
2572
2573 /*******************************************************************************
2574 * This function is the handler registered for S-EL1 interrupts by the SPMC. It
2575 * validates the interrupt and upon success arranges entry into the SP for
2576 * handling the interrupt.
2577 ******************************************************************************/
spmc_sp_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)2578 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
2579 uint32_t flags,
2580 void *handle,
2581 void *cookie)
2582 {
2583 struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
2584 struct sp_exec_ctx *ec;
2585 uint32_t linear_id = plat_my_core_pos();
2586
2587 /* Sanity check for a NULL pointer dereference. */
2588 assert(sp != NULL);
2589
2590 /* Check the security state when the exception was generated. */
2591 assert(get_interrupt_src_ss(flags) == NON_SECURE);
2592
2593 /* Panic if not an S-EL1 Partition. */
2594 if (sp->runtime_el != S_EL1) {
2595 ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
2596 linear_id);
2597 panic();
2598 }
2599
2600 /* Obtain a reference to the SP execution context. */
2601 ec = spmc_get_sp_ec(sp);
2602
2603 /* Ensure that the execution context is in waiting state else panic. */
2604 if (ec->rt_state != RT_STATE_WAITING) {
2605 ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
2606 linear_id, RT_STATE_WAITING, ec->rt_state);
2607 panic();
2608 }
2609
2610 /* Update the runtime model and state of the partition. */
2611 ec->rt_model = RT_MODEL_INTR;
2612 ec->rt_state = RT_STATE_RUNNING;
2613
2614 VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
2615
2616 /*
2617 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
2618 * populated as the SP can determine this by itself.
2619 * The flags field is forced to 0 mainly to pass the SVE hint bit
2620 * cleared for consumption by the lower EL.
2621 */
2622 return spmd_smc_switch_state(FFA_INTERRUPT, false,
2623 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2624 FFA_PARAM_MBZ, FFA_PARAM_MBZ,
2625 handle, 0ULL);
2626 }
2627