1 /*
2 * Copyright (c) 2019-2020 LK Trusty Authors. All Rights Reserved.
3 * Copyright (c) 2022, Arm Limited. All rights reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #define LOCAL_TRACE 0
26
27 #include <assert.h>
28 #include <endian.h>
29 #include <err.h>
30 #include <interface/arm_ffa/arm_ffa.h>
31 #include <inttypes.h>
32 #include <kernel/mutex.h>
33 #include <kernel/vm.h>
34 #include <lib/arm_ffa/arm_ffa.h>
35 #include <lib/smc/smc.h>
36 #include <lib/trusty/uuid.h>
37 #include <lk/init.h>
38 #include <lk/macros.h>
39 #include <string.h>
40 #include <sys/types.h>
41 #include <trace.h>
42
43 static enum arm_ffa_init_state ffa_init_state = ARM_FFA_INIT_UNINIT;
44 static uint32_t ffa_version;
45 static uint16_t ffa_local_id;
46 static size_t ffa_buf_size;
47 static void* ffa_tx;
48 static void* ffa_rx;
49 static bool supports_ns_bit = false;
50 static bool supports_rx_release = false;
51 static bool console_log_is_unsupported;
52 static mutex_t ffa_rxtx_buffer_lock = MUTEX_INITIAL_VALUE(ffa_rxtx_buffer_lock);
53 #if ARCH_ARM64
54 static bool send_direct_req2_is_unsupported;
55
56 static struct bst_root arm_ffa_direct_req2_handler_tree =
57 BST_ROOT_INITIAL_VALUE;
58 static spin_lock_t arm_ffa_direct_req2_tree_lock = SPIN_LOCK_INITIAL_VALUE;
59
60 /**
61 * struct arm_ffa_direct_req2_bst_obj - Binary search tree object for
62 * ffa_direct_req2 handler
63 * @bst_node: BST node
64 * @uuid_lo_hi: Array that holds UUID as two 64 bit words
65 * uuid_lo_hi[0] is what the FFA spec labels "Lo" - bytes [0-7]
66 * uuid_lo_hi[1] is what the FFA spec labels "Hi" - bytes [8-15]
67 * @handler: Pointer to FFA_DIRECT_REQ2 handler function
68 */
69 struct arm_ffa_direct_req2_bst_obj {
70 struct bst_node bst_node;
71 uint64_t uuid_lo_hi[2];
72 arm_ffa_direct_req2_handler_t handler;
73 };
74 static int arm_ffa_direct_req2_handler_compare(struct bst_node* a,
75 struct bst_node* b);
76 #endif
77
78 /**
79 * uuid_to_le64_pair() - convert uuid_t to (lo, hi)-pair per FFA spec.
80 *
81 * @uuid_lo_hi: Must be an array large enough to store a pair of 64-bit values.
82 * These output elements are little-endian encoded. Upon function return,
83 * uuid_lo_hi[0] contains what the FFA spec labels "Lo" - bytes [0-7], and
84 * uuid_lo_hi[1] contains what the FFA spec labels "Hi" - bytes [8-15].
85 */
uuid_to_le64_pair(uuid_t uuid_obj,uint64_t uuid_lo_hi[static2])86 static inline void uuid_to_le64_pair(uuid_t uuid_obj,
87 uint64_t uuid_lo_hi[static 2]) {
88 uuid_lo_hi[0] = (((uint64_t)__bswap16(uuid_obj.time_hi_and_version) << 48) |
89 ((uint64_t)__bswap16(uuid_obj.time_mid) << 32) |
90 ((uint64_t)__bswap32(uuid_obj.time_low)));
91
92 for (int i = 0; i < 8; i++) {
93 uuid_lo_hi[1] |= ((uint64_t)uuid_obj.clock_seq_and_node[i]) << (i * 8);
94 }
95 }
96
97 #if ARCH_ARM64
arm_ffa_register_direct_req2_handler(uuid_t uuid,arm_ffa_direct_req2_handler_t handler)98 status_t arm_ffa_register_direct_req2_handler(
99 uuid_t uuid,
100 arm_ffa_direct_req2_handler_t handler) {
101 struct arm_ffa_direct_req2_bst_obj* obj;
102
103 obj = calloc(1, sizeof(*obj));
104 if (!obj) {
105 LTRACEF("ERROR: not enough memory for direct_req2 handler\n");
106 return ERR_NO_MEMORY;
107 }
108
109 uuid_to_le64_pair(uuid, obj->uuid_lo_hi);
110 obj->handler = handler;
111
112 spin_lock(&arm_ffa_direct_req2_tree_lock);
113 if (!bst_insert(&arm_ffa_direct_req2_handler_tree, &obj->bst_node,
114 arm_ffa_direct_req2_handler_compare)) {
115 spin_unlock(&arm_ffa_direct_req2_tree_lock);
116 free(obj);
117 LTRACEF("ERROR: couldn't insert direct_req2 hander into BST\n");
118 return ERR_ALREADY_EXISTS;
119 } else {
120 spin_unlock(&arm_ffa_direct_req2_tree_lock);
121 return 0;
122 }
123 }
124
arm_ffa_direct_req2_handler_compare(struct bst_node * a,struct bst_node * b)125 static int arm_ffa_direct_req2_handler_compare(struct bst_node* a,
126 struct bst_node* b) {
127 struct arm_ffa_direct_req2_bst_obj* obj_a =
128 containerof(a, struct arm_ffa_direct_req2_bst_obj, bst_node);
129 struct arm_ffa_direct_req2_bst_obj* obj_b =
130 containerof(b, struct arm_ffa_direct_req2_bst_obj, bst_node);
131
132 return memcmp(obj_a->uuid_lo_hi, obj_b->uuid_lo_hi,
133 sizeof(obj_a->uuid_lo_hi));
134 }
135
arm_ffa_handle_direct_req2(struct smc_ret18 * regs)136 status_t arm_ffa_handle_direct_req2(struct smc_ret18* regs) {
137 struct arm_ffa_direct_req2_bst_obj search_obj;
138 struct arm_ffa_direct_req2_bst_obj* found_obj;
139 uint16_t sender_id = (regs->r1 >> 16) & 0xffff;
140 search_obj.uuid_lo_hi[0] = regs->r2;
141 search_obj.uuid_lo_hi[1] = regs->r3;
142
143 spin_lock(&arm_ffa_direct_req2_tree_lock);
144 found_obj = bst_search_type(&arm_ffa_direct_req2_handler_tree, &search_obj,
145 arm_ffa_direct_req2_handler_compare,
146 struct arm_ffa_direct_req2_bst_obj, bst_node);
147 spin_unlock(&arm_ffa_direct_req2_tree_lock);
148
149 if (found_obj) {
150 return found_obj->handler(sender_id, ®s->r4);
151 } else {
152 LTRACEF("Error: No handler for UUID 0x%016lx 0x%016lx for sender %d\n",
153 regs->r2, regs->r3, sender_id);
154 return ERR_NOT_FOUND;
155 }
156 }
157 #endif
158
arm_ffa_init_state(void)159 enum arm_ffa_init_state arm_ffa_init_state(void) {
160 return ffa_init_state;
161 }
162
arm_ffa_call_id_get(uint16_t * id)163 static status_t arm_ffa_call_id_get(uint16_t* id) {
164 struct smc_ret8 smc_ret;
165
166 smc_ret = smc8(SMC_FC_FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
167
168 switch ((uint32_t)smc_ret.r0) {
169 case SMC_FC_FFA_SUCCESS:
170 case SMC_FC64_FFA_SUCCESS:
171 if (smc_ret.r2 & ~0xFFFFUL) {
172 TRACEF("Unexpected FFA_ID_GET result: %lx\n", smc_ret.r2);
173 return ERR_NOT_VALID;
174 }
175 *id = (uint16_t)(smc_ret.r2 & 0xFFFF);
176 return NO_ERROR;
177
178 case SMC_FC_FFA_ERROR:
179 if ((int32_t)smc_ret.r2 == FFA_ERROR_NOT_SUPPORTED) {
180 return ERR_NOT_SUPPORTED;
181 } else {
182 TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
183 return ERR_NOT_VALID;
184 }
185
186 default:
187 TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
188 return ERR_NOT_VALID;
189 }
190 }
191
arm_ffa_call_version(uint16_t major,uint16_t minor,uint16_t * major_ret,uint16_t * minor_ret)192 static status_t arm_ffa_call_version(uint16_t major,
193 uint16_t minor,
194 uint16_t* major_ret,
195 uint16_t* minor_ret) {
196 struct smc_ret8 smc_ret;
197
198 uint32_t version = FFA_VERSION(major, minor);
199 /* Bit 31 must be cleared. */
200 ASSERT(!(version >> 31));
201 smc_ret = smc8(SMC_FC_FFA_VERSION, version, 0, 0, 0, 0, 0, 0);
202 if ((int32_t)smc_ret.r0 == FFA_ERROR_NOT_SUPPORTED) {
203 return ERR_NOT_SUPPORTED;
204 }
205 *major_ret = FFA_VERSION_TO_MAJOR(smc_ret.r0);
206 *minor_ret = FFA_VERSION_TO_MINOR(smc_ret.r0);
207
208 return NO_ERROR;
209 }
210
211 /* TODO: When adding support for FFA version 1.1 feature ids should be added. */
arm_ffa_call_features(ulong id,bool * is_implemented,ffa_features2_t * features2,ffa_features3_t * features3)212 static status_t arm_ffa_call_features(ulong id,
213 bool* is_implemented,
214 ffa_features2_t* features2,
215 ffa_features3_t* features3) {
216 struct smc_ret8 smc_ret;
217
218 ASSERT(is_implemented);
219
220 /*
221 * According to the FF-A spec section "Discovery of NS bit usage",
222 * NS_BIT is optionally set by a v1.0 SP such as Trusty, and must
223 * be set by a v1.1+ SP. Here, we set it unconditionally for the
224 * relevant feature.
225 */
226 bool request_ns_bit = (id == SMC_FC_FFA_MEM_RETRIEVE_REQ) ||
227 (id == SMC_FC64_FFA_MEM_RETRIEVE_REQ);
228 smc_ret = smc8(SMC_FC_FFA_FEATURES, id,
229 request_ns_bit ? FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT : 0,
230 0, 0, 0, 0, 0);
231
232 switch ((uint32_t)smc_ret.r0) {
233 case SMC_FC_FFA_SUCCESS:
234 case SMC_FC64_FFA_SUCCESS:
235 *is_implemented = true;
236 if (features2) {
237 *features2 = (ffa_features2_t)smc_ret.r2;
238 }
239 if (features3) {
240 *features3 = (ffa_features3_t)smc_ret.r3;
241 }
242 return NO_ERROR;
243
244 case SMC_FC_FFA_ERROR:
245 if ((int32_t)smc_ret.r2 == FFA_ERROR_NOT_SUPPORTED) {
246 *is_implemented = false;
247 return NO_ERROR;
248 } else {
249 TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
250 return ERR_NOT_VALID;
251 }
252
253 default:
254 TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
255 return ERR_NOT_VALID;
256 }
257 }
258
259 /*
260 * Call with ffa_rxtx_buffer_lock acquired and the ffa_tx buffer already
261 * populated with struct ffa_mtd_common. Transmit in a single fragment.
262 */
arm_ffa_call_mem_retrieve_req(uint32_t * total_len,uint32_t * fragment_len)263 static status_t arm_ffa_call_mem_retrieve_req(uint32_t* total_len,
264 uint32_t* fragment_len) {
265 struct smc_ret8 smc_ret;
266 struct ffa_mtd_v1_0* req_v1_0 = ffa_tx;
267 struct ffa_mtd_v1_1* req_v1_1 = ffa_tx;
268 size_t len;
269
270 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
271
272 if (ffa_version < FFA_VERSION(1, 1)) {
273 len = offsetof(struct ffa_mtd_v1_0, emad[0]) +
274 req_v1_0->emad_count * sizeof(struct ffa_emad);
275 } else {
276 len = req_v1_1->emad_offset +
277 req_v1_1->emad_count * req_v1_1->emad_size;
278 }
279
280 smc_ret = smc8(SMC_FC_FFA_MEM_RETRIEVE_REQ, len, len, 0, 0, 0, 0, 0);
281
282 int32_t error;
283 switch ((uint32_t)smc_ret.r0) {
284 case SMC_FC_FFA_MEM_RETRIEVE_RESP:
285 if (total_len) {
286 *total_len = (uint32_t)smc_ret.r1;
287 }
288 if (fragment_len) {
289 *fragment_len = (uint32_t)smc_ret.r2;
290 }
291 return NO_ERROR;
292 case SMC_FC_FFA_ERROR:
293 error = (int32_t)smc_ret.r2;
294 switch (error) {
295 case FFA_ERROR_NOT_SUPPORTED:
296 return ERR_NOT_SUPPORTED;
297 case FFA_ERROR_INVALID_PARAMETERS:
298 return ERR_INVALID_ARGS;
299 case FFA_ERROR_NO_MEMORY:
300 return ERR_NO_MEMORY;
301 case FFA_ERROR_DENIED:
302 return ERR_BAD_STATE;
303 case FFA_ERROR_ABORTED:
304 return ERR_CANCELLED;
305 default:
306 TRACEF("Unknown error: 0x%x\n", error);
307 return ERR_NOT_VALID;
308 }
309 default:
310 return ERR_NOT_VALID;
311 }
312 }
313
arm_ffa_call_mem_frag_rx(uint64_t handle,uint32_t offset,uint32_t * fragment_len)314 static status_t arm_ffa_call_mem_frag_rx(uint64_t handle,
315 uint32_t offset,
316 uint32_t* fragment_len) {
317 struct smc_ret8 smc_ret;
318
319 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
320
321 smc_ret = smc8(SMC_FC_FFA_MEM_FRAG_RX, (uint32_t)handle, handle >> 32,
322 offset, 0, 0, 0, 0);
323
324 /* FRAG_RX is followed by FRAG_TX on successful completion. */
325 switch ((uint32_t)smc_ret.r0) {
326 case SMC_FC_FFA_MEM_FRAG_TX: {
327 uint64_t handle_out =
328 (uint32_t)smc_ret.r1 | ((uint64_t)(uint32_t)smc_ret.r2 << 32);
329 if (handle != handle_out) {
330 TRACEF("Handle for response doesn't match the request, %" PRId64
331 " != %" PRId64,
332 handle, handle_out);
333 return ERR_NOT_VALID;
334 }
335 *fragment_len = smc_ret.r3;
336 return NO_ERROR;
337 }
338 case SMC_FC_FFA_ERROR:
339 switch ((int32_t)smc_ret.r2) {
340 case FFA_ERROR_NOT_SUPPORTED:
341 return ERR_NOT_SUPPORTED;
342 case FFA_ERROR_INVALID_PARAMETERS:
343 return ERR_INVALID_ARGS;
344 case FFA_ERROR_ABORTED:
345 return ERR_CANCELLED;
346 default:
347 TRACEF("Unexpected error %d\n", (int32_t)smc_ret.r2);
348 return ERR_NOT_VALID;
349 }
350 default:
351 TRACEF("Unexpected function id returned 0x%08lx\n", smc_ret.r0);
352 return ERR_NOT_VALID;
353 }
354 }
355
arm_ffa_call_mem_share(size_t num_comp_mrd,size_t num_cons_mrd,uint32_t * total_len,uint32_t * fragment_len,uint64_t * handle)356 static status_t arm_ffa_call_mem_share(size_t num_comp_mrd,
357 size_t num_cons_mrd,
358 uint32_t* total_len,
359 uint32_t* fragment_len,
360 uint64_t* handle) {
361 struct smc_ret8 smc_ret;
362 struct ffa_mtd_v1_0* req_v1_0 = ffa_tx;
363 struct ffa_mtd_v1_1* req_v1_1 = ffa_tx;
364 size_t len;
365 int32_t error;
366
367 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
368
369 if (ffa_version < FFA_VERSION(1, 1)) {
370 len = offsetof(struct ffa_mtd_v1_0, emad[0]) +
371 (req_v1_0->emad_count * sizeof(struct ffa_emad)) +
372 (num_comp_mrd * sizeof(struct ffa_comp_mrd)) +
373 (num_cons_mrd * sizeof(struct ffa_cons_mrd));
374 } else {
375 len = req_v1_1->emad_offset +
376 (req_v1_1->emad_count * req_v1_1->emad_size) +
377 (num_comp_mrd * sizeof(struct ffa_comp_mrd)) +
378 (num_cons_mrd * sizeof(struct ffa_cons_mrd));
379 }
380
381 /* w3 and w4 MBZ since tx buffer is used, the rest SBZ */
382 smc_ret = smc8(SMC_FC64_FFA_MEM_SHARE, len, len, 0, 0, 0, 0, 0);
383 switch ((uint32_t)smc_ret.r0) {
384 case SMC_FC_FFA_SUCCESS:
385 if (total_len) {
386 *total_len = (uint32_t)smc_ret.r1;
387 }
388 if (fragment_len) {
389 *fragment_len = (uint32_t)smc_ret.r2;
390 }
391 if (handle) {
392 *handle = (uint32_t)smc_ret.r2;
393 *handle |= ((uint64_t)smc_ret.r3) << 32;
394 }
395 return NO_ERROR;
396 case SMC_FC_FFA_ERROR:
397 error = (int32_t)smc_ret.r2;
398 switch (error) {
399 case FFA_ERROR_INVALID_PARAMETERS:
400 return ERR_NOT_SUPPORTED;
401 case FFA_ERROR_DENIED:
402 return ERR_BAD_STATE;
403 case FFA_ERROR_NO_MEMORY:
404 return ERR_NO_MEMORY;
405 case FFA_ERROR_BUSY:
406 return ERR_BUSY;
407 case FFA_ERROR_ABORTED:
408 return ERR_CANCELLED;
409 default:
410 TRACEF("Unexpected error: 0x%x\n", error);
411 return ERR_NOT_VALID;
412 }
413 default:
414 return ERR_NOT_VALID;
415 }
416 }
417
arm_ffa_call_mem_relinquish(uint64_t handle,uint32_t flags,uint32_t endpoint_count,const ffa_endpoint_id16_t * endpoints)418 static status_t arm_ffa_call_mem_relinquish(
419 uint64_t handle,
420 uint32_t flags,
421 uint32_t endpoint_count,
422 const ffa_endpoint_id16_t* endpoints) {
423 struct smc_ret8 smc_ret;
424 struct ffa_mem_relinquish_descriptor* req = ffa_tx;
425
426 if (!req) {
427 TRACEF("ERROR: no FF-A tx buffer\n");
428 return ERR_NOT_CONFIGURED;
429 }
430 ASSERT(endpoint_count <=
431 (ffa_buf_size - sizeof(struct ffa_mem_relinquish_descriptor)) /
432 sizeof(ffa_endpoint_id16_t));
433
434 mutex_acquire(&ffa_rxtx_buffer_lock);
435
436 req->handle = handle;
437 req->flags = flags;
438 req->endpoint_count = endpoint_count;
439
440 memcpy(req->endpoint_array, endpoints,
441 endpoint_count * sizeof(ffa_endpoint_id16_t));
442
443 smc_ret = smc8(SMC_FC_FFA_MEM_RELINQUISH, 0, 0, 0, 0, 0, 0, 0);
444
445 mutex_release(&ffa_rxtx_buffer_lock);
446
447 switch ((uint32_t)smc_ret.r0) {
448 case SMC_FC_FFA_SUCCESS:
449 case SMC_FC64_FFA_SUCCESS:
450 return NO_ERROR;
451
452 case SMC_FC_FFA_ERROR:
453 switch ((int32_t)smc_ret.r2) {
454 case FFA_ERROR_NOT_SUPPORTED:
455 return ERR_NOT_SUPPORTED;
456 case FFA_ERROR_INVALID_PARAMETERS:
457 return ERR_INVALID_ARGS;
458 case FFA_ERROR_NO_MEMORY:
459 return ERR_NO_MEMORY;
460 case FFA_ERROR_DENIED:
461 return ERR_BAD_STATE;
462 case FFA_ERROR_ABORTED:
463 return ERR_CANCELLED;
464 default:
465 TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
466 return ERR_NOT_VALID;
467 }
468 default:
469 TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
470 return ERR_NOT_VALID;
471 }
472 }
473
arm_ffa_call_rxtx_map(paddr_t tx_paddr,paddr_t rx_paddr,size_t page_count)474 static status_t arm_ffa_call_rxtx_map(paddr_t tx_paddr,
475 paddr_t rx_paddr,
476 size_t page_count) {
477 struct smc_ret8 smc_ret;
478
479 /* Page count specified in bits [0:5] */
480 ASSERT(page_count);
481 ASSERT(page_count < (1 << 6));
482
483 #if ARCH_ARM64
484 smc_ret = smc8(SMC_FC64_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0,
485 0, 0);
486 #else
487 smc_ret = smc8(SMC_FC_FFA_RXTX_MAP, tx_paddr, rx_paddr, page_count, 0, 0, 0,
488 0);
489 #endif
490 switch ((uint32_t)smc_ret.r0) {
491 case SMC_FC_FFA_SUCCESS:
492 case SMC_FC64_FFA_SUCCESS:
493 return NO_ERROR;
494
495 case SMC_FC_FFA_ERROR:
496 switch ((int32_t)smc_ret.r2) {
497 case FFA_ERROR_NOT_SUPPORTED:
498 return ERR_NOT_SUPPORTED;
499 case FFA_ERROR_INVALID_PARAMETERS:
500 return ERR_INVALID_ARGS;
501 case FFA_ERROR_NO_MEMORY:
502 return ERR_NO_MEMORY;
503 case FFA_ERROR_DENIED:
504 return ERR_ALREADY_EXISTS;
505 default:
506 TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
507 return ERR_NOT_VALID;
508 }
509 default:
510 TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
511 return ERR_NOT_VALID;
512 }
513 }
514
arm_ffa_call_rx_release(void)515 static status_t arm_ffa_call_rx_release(void) {
516 struct smc_ret8 smc_ret;
517
518 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
519
520 smc_ret = smc8(SMC_FC_FFA_RX_RELEASE, 0, 0, 0, 0, 0, 0, 0);
521 switch ((uint32_t)smc_ret.r0) {
522 case SMC_FC_FFA_SUCCESS:
523 case SMC_FC64_FFA_SUCCESS:
524 return NO_ERROR;
525
526 case SMC_FC_FFA_ERROR:
527 switch ((int32_t)smc_ret.r2) {
528 case FFA_ERROR_NOT_SUPPORTED:
529 return ERR_NOT_SUPPORTED;
530 case FFA_ERROR_DENIED:
531 return ERR_BAD_STATE;
532 default:
533 return ERR_NOT_VALID;
534 }
535 default:
536 return ERR_NOT_VALID;
537 }
538 }
539
540 #if WITH_SMP
ffa_call_secondary_ep_register(void)541 static status_t ffa_call_secondary_ep_register(void) {
542 struct smc_ret8 smc_ret;
543 paddr_t secondary_ep_paddr;
544 extern char _start[];
545
546 secondary_ep_paddr = vaddr_to_paddr(_start);
547
548 smc_ret = smc8(SMC_FC64_FFA_SECONDARY_EP_REGISTER, secondary_ep_paddr, 0, 0,
549 0, 0, 0, 0);
550 switch ((uint32_t)smc_ret.r0) {
551 case SMC_FC_FFA_SUCCESS:
552 case SMC_FC64_FFA_SUCCESS:
553 return NO_ERROR;
554
555 case SMC_FC_FFA_ERROR:
556 switch ((int32_t)smc_ret.r2) {
557 case FFA_ERROR_NOT_SUPPORTED:
558 return ERR_NOT_SUPPORTED;
559 case FFA_ERROR_INVALID_PARAMETERS:
560 return ERR_INVALID_ARGS;
561 default:
562 return ERR_NOT_VALID;
563 }
564
565 case SMC_UNKNOWN:
566 return ERR_NOT_SUPPORTED;
567
568 default:
569 return ERR_NOT_VALID;
570 }
571 }
572 #endif /* WITH_SMP */
573
arm_ffa_call_error(enum ffa_error err)574 struct smc_ret18 arm_ffa_call_error(enum ffa_error err) {
575 long target = 0; /* Target must be zero (MBZ) at secure FF-A instances */
576 return smc8_ret18(SMC_FC_FFA_ERROR, target, (ulong)err, 0, 0, 0, 0, 0);
577 }
578
arm_ffa_call_msg_wait(void)579 struct smc_ret18 arm_ffa_call_msg_wait(void) {
580 return smc8_ret18(SMC_FC_FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
581 }
582
arm_ffa_msg_send_direct_resp(const struct smc_ret18 * direct_req_regs,ulong a0,ulong a1,ulong a2,ulong a3,ulong a4)583 struct smc_ret18 arm_ffa_msg_send_direct_resp(
584 const struct smc_ret18* direct_req_regs,
585 ulong a0,
586 ulong a1,
587 ulong a2,
588 ulong a3,
589 ulong a4) {
590 uint32_t sender_receiver_id;
591 uint32_t flags;
592
593 DEBUG_ASSERT(direct_req_regs);
594
595 /* Copy and flip the sender from the direct message request */
596 sender_receiver_id = ((uint32_t)direct_req_regs->r1 >> 16) |
597 ((uint32_t)ffa_local_id << 16);
598 /* Copy the flags as well */
599 flags = direct_req_regs->r2;
600
601 switch ((uint32_t)direct_req_regs->r0) {
602 case SMC_FC_FFA_MSG_SEND_DIRECT_REQ:
603 return smc8_ret18(SMC_FC_FFA_MSG_SEND_DIRECT_RESP, sender_receiver_id,
604 flags, a0, a1, a2, a3, a4);
605 case SMC_FC64_FFA_MSG_SEND_DIRECT_REQ:
606 return smc8_ret18(SMC_FC64_FFA_MSG_SEND_DIRECT_RESP, sender_receiver_id,
607 flags, a0, a1, a2, a3, a4);
608 default:
609 dprintf(CRITICAL, "Invalid direct request function id %lx\n",
610 direct_req_regs->r0);
611 return arm_ffa_call_error(FFA_ERROR_INVALID_PARAMETERS);
612 }
613
614 __UNREACHABLE;
615 }
616
arm_ffa_msg_send_direct_resp2(const struct smc_ret18 * direct_req_regs,uint64_t args[static ARM_FFA_MSG_EXTENDED_ARGS_COUNT])617 struct smc_ret18 arm_ffa_msg_send_direct_resp2(
618 const struct smc_ret18* direct_req_regs,
619 uint64_t args[static ARM_FFA_MSG_EXTENDED_ARGS_COUNT]) {
620 uint32_t sender_receiver_id;
621
622 DEBUG_ASSERT(direct_req_regs);
623 DEBUG_ASSERT(args);
624 if ((uint32_t)direct_req_regs->r0 != SMC_FC64_FFA_MSG_SEND_DIRECT_REQ2) {
625 dprintf(CRITICAL, "Invalid direct request function id %x\n",
626 (uint32_t)direct_req_regs->r0);
627 return arm_ffa_call_error(FFA_ERROR_INVALID_PARAMETERS);
628 }
629
630 /* Copy and flip the sender from the direct message request */
631 sender_receiver_id =
632 (direct_req_regs->r1 >> 16) | ((uint32_t)ffa_local_id << 16);
633
634 return smc18(SMC_FC64_FFA_MSG_SEND_DIRECT_RESP2, sender_receiver_id, 0, 0,
635 args[0], args[1], args[2], args[3], args[4], args[5], args[6],
636 args[7], args[8], args[9], args[10], args[11], args[12],
637 args[13]);
638 }
639
640 #if ARCH_ARM64
arm_ffa_msg_send_direct_req2(uuid_t uuid,uint16_t receiver_id,uint64_t args[static ARM_FFA_MSG_EXTENDED_ARGS_COUNT],struct smc_ret18 * resp)641 status_t arm_ffa_msg_send_direct_req2(
642 uuid_t uuid,
643 uint16_t receiver_id,
644 uint64_t args[static ARM_FFA_MSG_EXTENDED_ARGS_COUNT],
645 struct smc_ret18* resp) {
646 struct smc_ret18 smc_ret;
647 uint64_t uuid_lo_hi[2];
648 uint32_t fid = SMC_FC64_FFA_MSG_SEND_DIRECT_REQ2;
649 uint32_t sender_receiver_id = ((uint32_t)ffa_local_id << 16) | receiver_id;
650
651 if (send_direct_req2_is_unsupported) {
652 return FFA_ERROR_NOT_SUPPORTED;
653 }
654
655 if (!args || !resp) {
656 return ERR_INVALID_ARGS;
657 }
658
659 uuid_to_le64_pair(uuid, uuid_lo_hi);
660
661 smc_ret = smc18(fid, sender_receiver_id, uuid_lo_hi[0], uuid_lo_hi[1],
662 args[0], args[1], args[2], args[3], args[4], args[5],
663 args[6], args[7], args[8], args[9], args[10], args[11],
664 args[12], args[13]);
665
666 switch ((uint32_t)smc_ret.r0) {
667 case SMC_FC64_FFA_MSG_SEND_DIRECT_RESP2:
668 *resp = smc_ret;
669 return NO_ERROR;
670
671 case SMC_FC_FFA_ERROR:
672 switch ((int32_t)smc_ret.r2) {
673 case FFA_ERROR_NOT_SUPPORTED:
674 send_direct_req2_is_unsupported = true;
675 return ERR_NOT_SUPPORTED;
676 case FFA_ERROR_INVALID_PARAMETERS:
677 dprintf(CRITICAL, "Invalid parameters for direct request2\n");
678 return ERR_INVALID_ARGS;
679 default:
680 return ERR_NOT_VALID;
681 }
682
683 case SMC_UNKNOWN:
684 send_direct_req2_is_unsupported = true;
685 return ERR_NOT_SUPPORTED;
686
687 case SMC_FC_FFA_INTERRUPT:
688 /*
689 * SMC_FC_FFA_INTERRUPT or SMC_FC_FFA_YIELD can be returned per the FF-A
690 * spec but it shouldn't happen when Trusty is the receiver of requests.
691 */
692 panic("Received SMC_FC_FFA_INTERRUPT in response to direct request2");
693
694 case SMC_FC_FFA_YIELD:
695 /* See previous case */
696 panic("Received SMC_FC_FFA_YIELD in response to direct request2");
697
698 default:
699 dprintf(CRITICAL, "Unexpected response (%x) to direct request2\n",
700 (uint32_t)smc_ret.r0);
701 return ERR_NOT_VALID;
702 }
703 }
704 #endif
705
arm_ffa_console_log(const char * buf,size_t len)706 ssize_t arm_ffa_console_log(const char* buf, size_t len) {
707 struct smc_ret8 smc_ret;
708
709 if (console_log_is_unsupported) {
710 return ERR_NOT_SUPPORTED;
711 }
712 if (!len) {
713 /* Nothing to print, just return */
714 return 0;
715 }
716 if (len != 1) {
717 /* TODO: support more than one character */
718 len = 1;
719 }
720
721 smc_ret = smc8(SMC_FC_FFA_CONSOLE_LOG, len, (ulong)buf[0], 0, 0, 0, 0, 0);
722 switch ((uint32_t)smc_ret.r0) {
723 case SMC_FC_FFA_SUCCESS:
724 case SMC_FC64_FFA_SUCCESS:
725 return len;
726
727 case SMC_FC_FFA_ERROR:
728 switch ((int32_t)smc_ret.r2) {
729 case FFA_ERROR_NOT_SUPPORTED:
730 console_log_is_unsupported = true;
731 return ERR_NOT_SUPPORTED;
732 case FFA_ERROR_INVALID_PARAMETERS:
733 return ERR_INVALID_ARGS;
734 case FFA_ERROR_RETRY:
735 /* FFA_ERROR_RETRY returns how many characters were printed */
736 return (uint32_t)smc_ret.r3;
737 default:
738 return ERR_NOT_VALID;
739 }
740
741 case SMC_UNKNOWN:
742 console_log_is_unsupported = true;
743 return ERR_NOT_SUPPORTED;
744
745 default:
746 return ERR_NOT_VALID;
747 }
748 }
749
arm_ffa_rx_release_is_implemented(bool * is_implemented)750 static status_t arm_ffa_rx_release_is_implemented(bool* is_implemented) {
751 bool is_implemented_val;
752 status_t res = arm_ffa_call_features(SMC_FC_FFA_RX_RELEASE,
753 &is_implemented_val, NULL, NULL);
754 if (res != NO_ERROR) {
755 TRACEF("Failed to query for feature FFA_RX_RELEASE, err = %d\n", res);
756 return res;
757 }
758 if (is_implemented) {
759 *is_implemented = is_implemented_val;
760 }
761 return NO_ERROR;
762 }
763
arm_ffa_rxtx_map_is_implemented(bool * is_implemented,size_t * buf_size_log2)764 static status_t arm_ffa_rxtx_map_is_implemented(bool* is_implemented,
765 size_t* buf_size_log2) {
766 ffa_features2_t features2;
767 bool is_implemented_val = false;
768 status_t res;
769
770 ASSERT(is_implemented);
771 #if ARCH_ARM64
772 res = arm_ffa_call_features(SMC_FC64_FFA_RXTX_MAP, &is_implemented_val,
773 &features2, NULL);
774 #else
775 res = arm_ffa_call_features(SMC_FC_FFA_RXTX_MAP, &is_implemented_val,
776 &features2, NULL);
777 #endif
778 if (res != NO_ERROR) {
779 TRACEF("Failed to query for feature FFA_RXTX_MAP, err = %d\n", res);
780 return res;
781 }
782 if (!is_implemented_val) {
783 *is_implemented = false;
784 return NO_ERROR;
785 }
786 if (buf_size_log2) {
787 ulong buf_size_id = features2 & FFA_FEATURES2_RXTX_MAP_BUF_SIZE_MASK;
788 switch (buf_size_id) {
789 case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_4K:
790 *buf_size_log2 = 12;
791 break;
792 case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_16K:
793 *buf_size_log2 = 14;
794 break;
795 case FFA_FEATURES2_RXTX_MAP_BUF_SIZE_64K:
796 *buf_size_log2 = 16;
797 break;
798 default:
799 TRACEF("Unexpected rxtx buffer size identifier: %lx\n",
800 buf_size_id);
801 return ERR_NOT_VALID;
802 }
803 }
804
805 *is_implemented = true;
806 return NO_ERROR;
807 }
808
arm_ffa_mem_retrieve_req_is_implemented(bool * is_implemented,bool * dyn_alloc_supp,bool * has_ns_bit,size_t * ref_count_num_bits)809 static status_t arm_ffa_mem_retrieve_req_is_implemented(
810 bool* is_implemented,
811 bool* dyn_alloc_supp,
812 bool* has_ns_bit,
813 size_t* ref_count_num_bits) {
814 ffa_features2_t features2;
815 ffa_features3_t features3;
816 bool is_implemented_val = false;
817 status_t res;
818
819 ASSERT(is_implemented);
820
821 res = arm_ffa_call_features(SMC_FC_FFA_MEM_RETRIEVE_REQ,
822 &is_implemented_val, &features2, &features3);
823 if (res != NO_ERROR) {
824 TRACEF("Failed to query for feature FFA_MEM_RETRIEVE_REQ, err = %d\n",
825 res);
826 return res;
827 }
828 if (!is_implemented_val) {
829 *is_implemented = false;
830 return NO_ERROR;
831 }
832 if (dyn_alloc_supp) {
833 *dyn_alloc_supp = !!(features2 & FFA_FEATURES2_MEM_DYNAMIC_BUFFER);
834 }
835 if (has_ns_bit) {
836 *has_ns_bit = !!(features2 & FFA_FEATURES2_MEM_RETRIEVE_REQ_NS_BIT);
837 }
838 if (ref_count_num_bits) {
839 *ref_count_num_bits =
840 (features3 & FFA_FEATURES3_MEM_RETRIEVE_REQ_REFCOUNT_MASK) + 1;
841 }
842 *is_implemented = true;
843 return NO_ERROR;
844 }
845
846 /* Helper function to set up the tx buffer with standard values
847 before calling FFA_MEM_RETRIEVE_REQ. */
arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,uint64_t handle,uint64_t tag)848 static void arm_ffa_populate_receive_req_tx_buffer(uint16_t sender_id,
849 uint64_t handle,
850 uint64_t tag) {
851 struct ffa_mtd_v1_0* req_v1_0 = ffa_tx;
852 struct ffa_mtd_v1_1* req_v1_1 = ffa_tx;
853 struct ffa_mtd_common* req = ffa_tx;
854 struct ffa_emad* emad;
855 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
856
857 if (ffa_version < FFA_VERSION(1, 1)) {
858 memset(req_v1_0, 0, sizeof(struct ffa_mtd_v1_0));
859 } else {
860 memset(req_v1_1, 0, sizeof(struct ffa_mtd_v1_1));
861 }
862
863 req->sender_id = sender_id;
864 req->handle = handle;
865 /* We must use the same tag as the one used by the sender to retrieve. */
866 req->tag = tag;
867
868 if (ffa_version < FFA_VERSION(1, 1)) {
869 /*
870 * We only support retrieving memory for ourselves for now.
871 * TODO: Also support stream endpoints. Possibly more than one.
872 */
873 req_v1_0->emad_count = 1;
874 emad = req_v1_0->emad;
875 } else {
876 req_v1_1->emad_count = 1;
877 req_v1_1->emad_size = sizeof(struct ffa_emad);
878 req_v1_1->emad_offset = sizeof(struct ffa_mtd_v1_1);
879 emad = (struct ffa_emad*)((uint8_t*)req_v1_1 + req_v1_1->emad_offset);
880 }
881
882 memset(emad, 0, sizeof(struct ffa_emad));
883 emad[0].mapd.endpoint_id = ffa_local_id;
884 }
885
arm_ffa_populate_share_tx_buffer(uint16_t receiver_id,paddr_t buffer,size_t num_ffa_pages,uint arch_mmu_flags,uint64_t tag)886 static void arm_ffa_populate_share_tx_buffer(uint16_t receiver_id,
887 paddr_t buffer,
888 size_t num_ffa_pages,
889 uint arch_mmu_flags,
890 uint64_t tag) {
891 struct ffa_mtd_v1_0* req_v1_0 = ffa_tx;
892 struct ffa_mtd_v1_1* req_v1_1 = ffa_tx;
893 struct ffa_mtd_common* req = ffa_tx;
894 struct ffa_emad* emad;
895 ffa_mem_attr8_t attributes = 0;
896 ffa_mem_perm8_t permissions = 0;
897 uint32_t comp_mrd_offset = 0;
898 struct ffa_comp_mrd* comp_mrd;
899
900 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
901
902 if (ffa_version < FFA_VERSION(1, 1)) {
903 memset(req_v1_0, 0, sizeof(struct ffa_mtd_v1_0));
904 } else {
905 memset(req_v1_1, 0, sizeof(struct ffa_mtd_v1_1));
906 }
907
908 req->sender_id = ffa_local_id;
909
910 switch (arch_mmu_flags & ARCH_MMU_FLAG_CACHE_MASK) {
911 case ARCH_MMU_FLAG_UNCACHED_DEVICE:
912 attributes |= FFA_MEM_ATTR_DEVICE_NGNRE;
913 break;
914 case ARCH_MMU_FLAG_UNCACHED:
915 attributes |= FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED;
916 break;
917 case ARCH_MMU_FLAG_CACHED:
918 attributes |= FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB |
919 FFA_MEM_ATTR_INNER_SHAREABLE;
920 break;
921 }
922
923 req->memory_region_attributes = attributes;
924 req->flags = FFA_MTD_FLAG_TYPE_SHARE_MEMORY;
925 /* We must use the same tag as the one used by the receiver to share . */
926 req->tag = tag;
927 /* MBZ for MEM_SHARE */
928 req->handle = 0;
929
930 if (ffa_version < FFA_VERSION(1, 1)) {
931 /*
932 * We only support retrieving memory for ourselves for now.
933 * TODO: Also support stream endpoints. Possibly more than one.
934 */
935 req_v1_0->emad_count = 1;
936 emad = req_v1_0->emad;
937 } else {
938 req_v1_1->emad_count = 1;
939 req_v1_1->emad_size = sizeof(struct ffa_emad);
940 req_v1_1->emad_offset = sizeof(struct ffa_mtd_v1_1);
941 emad = (struct ffa_emad*)((uint8_t*)req_v1_1 + req_v1_1->emad_offset);
942 }
943
944 memset(emad, 0, sizeof(struct ffa_emad));
945 emad[0].mapd.endpoint_id = receiver_id;
946 permissions = FFA_MEM_PERM_NX;
947 if (arch_mmu_flags & ARCH_MMU_FLAG_PERM_RO) {
948 permissions |= FFA_MEM_PERM_RO;
949 } else {
950 permissions |= FFA_MEM_PERM_RW;
951 }
952 emad[0].mapd.memory_access_permissions = permissions;
953 if (ffa_version < FFA_VERSION(1, 1)) {
954 /* We only support one emad */
955 comp_mrd_offset = sizeof(struct ffa_mtd_v1_0) + sizeof(struct ffa_emad);
956 } else {
957 comp_mrd_offset = sizeof(struct ffa_mtd_v1_1) + sizeof(struct ffa_emad);
958 }
959 emad[0].comp_mrd_offset = comp_mrd_offset;
960
961 comp_mrd = (struct ffa_comp_mrd*)((uint8_t*)emad + sizeof(struct ffa_emad));
962 comp_mrd->total_page_count = num_ffa_pages;
963 comp_mrd->address_range_count = 1;
964 comp_mrd->address_range_array[0].address = buffer;
965 comp_mrd->address_range_array[0].page_count = num_ffa_pages;
966 }
967
968 /* *desc_buffer is malloc'd and on success passes responsibility to free to
969 the caller. Populate the tx buffer before calling. */
arm_ffa_mem_retrieve(uint16_t sender_id,uint64_t handle,uint32_t * len,uint32_t * fragment_len)970 static status_t arm_ffa_mem_retrieve(uint16_t sender_id,
971 uint64_t handle,
972 uint32_t* len,
973 uint32_t* fragment_len) {
974 status_t res = NO_ERROR;
975
976 DEBUG_ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
977 DEBUG_ASSERT(len);
978
979 uint32_t len_out, fragment_len_out;
980 res = arm_ffa_call_mem_retrieve_req(&len_out, &fragment_len_out);
981 LTRACEF("total_len: %u, fragment_len: %u\n", len_out, fragment_len_out);
982 if (res != NO_ERROR) {
983 TRACEF("FF-A memory retrieve request failed, err = %d\n", res);
984 return res;
985 }
986 if (fragment_len_out > len_out) {
987 TRACEF("Fragment length larger than total length %u > %u\n",
988 fragment_len_out, len_out);
989 return ERR_IO;
990 }
991
992 /* Check that the first fragment fits in our buffer */
993 if (fragment_len_out > ffa_buf_size) {
994 TRACEF("Fragment length %u larger than buffer size\n",
995 fragment_len_out);
996 return ERR_IO;
997 }
998
999 if (fragment_len) {
1000 *fragment_len = fragment_len_out;
1001 }
1002 if (len) {
1003 *len = len_out;
1004 }
1005
1006 return NO_ERROR;
1007 }
1008
arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info * frag_info,size_t index,paddr_t * addr,size_t * size)1009 status_t arm_ffa_mem_address_range_get(struct arm_ffa_mem_frag_info* frag_info,
1010 size_t index,
1011 paddr_t* addr,
1012 size_t* size) {
1013 uint32_t page_count;
1014 size_t frag_idx;
1015
1016 DEBUG_ASSERT(frag_info);
1017
1018 if (index < frag_info->start_index ||
1019 index >= frag_info->start_index + frag_info->count) {
1020 return ERR_OUT_OF_RANGE;
1021 }
1022
1023 frag_idx = index - frag_info->start_index;
1024
1025 page_count = frag_info->address_ranges[frag_idx].page_count;
1026 LTRACEF("address %p, page_count 0x%x\n",
1027 (void*)frag_info->address_ranges[frag_idx].address,
1028 frag_info->address_ranges[frag_idx].page_count);
1029 if (page_count < 1 || ((size_t)page_count > (SIZE_MAX / FFA_PAGE_SIZE))) {
1030 TRACEF("bad page count 0x%x at %zu\n", page_count, index);
1031 return ERR_IO;
1032 }
1033
1034 if (addr) {
1035 *addr = (paddr_t)frag_info->address_ranges[frag_idx].address;
1036 }
1037 if (size) {
1038 *size = page_count * FFA_PAGE_SIZE;
1039 }
1040
1041 return NO_ERROR;
1042 }
1043
arm_ffa_mem_retrieve_start(uint16_t sender_id,uint64_t handle,uint64_t tag,uint32_t * address_range_count,uint * arch_mmu_flags,struct arm_ffa_mem_frag_info * frag_info)1044 status_t arm_ffa_mem_retrieve_start(uint16_t sender_id,
1045 uint64_t handle,
1046 uint64_t tag,
1047 uint32_t* address_range_count,
1048 uint* arch_mmu_flags,
1049 struct arm_ffa_mem_frag_info* frag_info) {
1050 status_t res;
1051 struct ffa_mtd_v1_0* mtd_v1_0;
1052 struct ffa_mtd_v1_1* mtd_v1_1;
1053 struct ffa_mtd_common* mtd;
1054 struct ffa_emad* emad;
1055 struct ffa_comp_mrd* comp_mrd;
1056 uint32_t computed_len;
1057 uint32_t header_size;
1058
1059 uint32_t total_len;
1060 uint32_t fragment_len;
1061
1062 DEBUG_ASSERT(frag_info);
1063
1064 mutex_acquire(&ffa_rxtx_buffer_lock);
1065 arm_ffa_populate_receive_req_tx_buffer(sender_id, handle, tag);
1066 res = arm_ffa_mem_retrieve(sender_id, handle, &total_len, &fragment_len);
1067
1068 if (res != NO_ERROR) {
1069 TRACEF("FF-A memory retrieve failed err=%d\n", res);
1070 return res;
1071 }
1072
1073 mtd = ffa_rx;
1074 if (ffa_version < FFA_VERSION(1, 1)) {
1075 if (fragment_len < sizeof(struct ffa_mtd_v1_0)) {
1076 TRACEF("Fragment too short for memory transaction descriptor\n");
1077 return ERR_IO;
1078 }
1079
1080 mtd_v1_0 = ffa_rx;
1081 if (fragment_len <
1082 offsetof(struct ffa_mtd_v1_0, emad) + sizeof(struct ffa_emad)) {
1083 TRACEF("Fragment too short for endpoint memory access descriptor\n");
1084 return ERR_IO;
1085 }
1086 emad = mtd_v1_0->emad;
1087
1088 /*
1089 * We don't retrieve the memory on behalf of anyone else, so we only
1090 * expect one receiver address range descriptor.
1091 */
1092 if (mtd_v1_0->emad_count != 1) {
1093 TRACEF("unexpected response count %d != 1\n", mtd_v1_0->emad_count);
1094 return ERR_IO;
1095 }
1096 } else {
1097 if (fragment_len < sizeof(struct ffa_mtd_v1_1)) {
1098 TRACEF("Fragment too short for memory transaction descriptor\n");
1099 return ERR_IO;
1100 }
1101
1102 mtd_v1_1 = ffa_rx;
1103 /*
1104 * We know from the check above that
1105 * fragment_len >= sizeof(ffa_mtd_v1) >= sizeof(ffa_emad)
1106 * so we can rewrite the following
1107 * fragment_len < emad_offset + sizeof(ffa_emad)
1108 * into
1109 * fragment_len - sizeof(ffa_emad) < emad_offset
1110 * to avoid a potential overflow.
1111 */
1112 if (fragment_len - sizeof(struct ffa_emad) < mtd_v1_1->emad_offset) {
1113 TRACEF("Fragment too short for endpoint memory access descriptor\n");
1114 return ERR_IO;
1115 }
1116 if (mtd_v1_1->emad_offset < sizeof(struct ffa_mtd_v1_1)) {
1117 TRACEF("Endpoint memory access descriptor offset too short\n");
1118 return ERR_IO;
1119 }
1120 if (!IS_ALIGNED(mtd_v1_1->emad_offset, 16)) {
1121 TRACEF("Endpoint memory access descriptor not aligned to 16 bytes\n");
1122 return ERR_IO;
1123 }
1124 emad = (struct ffa_emad*)((uint8_t*)mtd_v1_1 + mtd_v1_1->emad_offset);
1125
1126 if (mtd_v1_1->emad_count != 1) {
1127 TRACEF("unexpected response count %d != 1\n", mtd_v1_1->emad_count);
1128 return ERR_IO;
1129 }
1130 }
1131
1132 LTRACEF("comp_mrd_offset: %u\n", emad->comp_mrd_offset);
1133 if (emad->comp_mrd_offset + sizeof(*comp_mrd) > fragment_len) {
1134 TRACEF("Fragment length %u too short for comp_mrd_offset %u\n",
1135 fragment_len, emad->comp_mrd_offset);
1136 return ERR_IO;
1137 }
1138
1139 comp_mrd = ffa_rx + emad->comp_mrd_offset;
1140
1141 uint32_t address_range_count_out = comp_mrd->address_range_count;
1142 frag_info->address_ranges = comp_mrd->address_range_array;
1143 LTRACEF("address_range_count: %u\n", address_range_count_out);
1144
1145 computed_len = emad->comp_mrd_offset +
1146 offsetof(struct ffa_comp_mrd, address_range_array) +
1147 sizeof(struct ffa_cons_mrd) * comp_mrd->address_range_count;
1148 if (total_len != computed_len) {
1149 TRACEF("Reported length %u != computed length %u\n", total_len,
1150 computed_len);
1151 return ERR_IO;
1152 }
1153
1154 header_size = emad->comp_mrd_offset +
1155 offsetof(struct ffa_comp_mrd, address_range_array);
1156 frag_info->count =
1157 (fragment_len - header_size) / sizeof(struct ffa_cons_mrd);
1158 LTRACEF("Descriptors in fragment %u\n", frag_info->count);
1159
1160 if (frag_info->count * sizeof(struct ffa_cons_mrd) + header_size !=
1161 fragment_len) {
1162 TRACEF("fragment length %u, contains partial descriptor\n",
1163 fragment_len);
1164 return ERR_IO;
1165 }
1166
1167 frag_info->received_len = fragment_len;
1168 frag_info->start_index = 0;
1169
1170 uint arch_mmu_flags_out = 0;
1171
1172 switch (mtd->flags & FFA_MTD_FLAG_TYPE_MASK) {
1173 case FFA_MTD_FLAG_TYPE_SHARE_MEMORY:
1174 /*
1175 * If memory is shared, assume it is not safe to execute out of. This
1176 * specifically indicates that another party may have access to the
1177 * memory.
1178 */
1179 arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1180 break;
1181 case FFA_MTD_FLAG_TYPE_LEND_MEMORY:
1182 break;
1183 case FFA_MTD_FLAG_TYPE_DONATE_MEMORY:
1184 TRACEF("Unexpected donate memory transaction type is not supported\n");
1185 return ERR_NOT_IMPLEMENTED;
1186 default:
1187 TRACEF("Unknown memory transaction type: 0x%x\n", mtd->flags);
1188 return ERR_NOT_VALID;
1189 }
1190
1191 switch (mtd->memory_region_attributes & ~FFA_MEM_ATTR_NONSECURE) {
1192 case FFA_MEM_ATTR_DEVICE_NGNRE:
1193 arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED_DEVICE;
1194 break;
1195 case FFA_MEM_ATTR_NORMAL_MEMORY_UNCACHED:
1196 arch_mmu_flags_out |= ARCH_MMU_FLAG_UNCACHED;
1197 break;
1198 case (FFA_MEM_ATTR_NORMAL_MEMORY_CACHED_WB | FFA_MEM_ATTR_INNER_SHAREABLE):
1199 arch_mmu_flags_out |= ARCH_MMU_FLAG_CACHED;
1200 break;
1201 default:
1202 TRACEF("Invalid memory attributes, 0x%x\n",
1203 mtd->memory_region_attributes);
1204 return ERR_NOT_VALID;
1205 }
1206
1207 if (!(emad->mapd.memory_access_permissions & FFA_MEM_PERM_RW)) {
1208 arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_RO;
1209 }
1210 if (emad->mapd.memory_access_permissions & FFA_MEM_PERM_NX) {
1211 /*
1212 * Don't allow executable mappings if the stage 2 page tables don't
1213 * allow it. The hardware allows the stage 2 NX bit to only apply to
1214 * EL1, not EL0, but neither FF-A nor LK can currently express this, so
1215 * disallow both if FFA_MEM_PERM_NX is set.
1216 */
1217 arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1218 }
1219
1220 if (!supports_ns_bit ||
1221 (mtd->memory_region_attributes & FFA_MEM_ATTR_NONSECURE)) {
1222 arch_mmu_flags_out |= ARCH_MMU_FLAG_NS;
1223 /* Regardless of origin, we don't want to execute out of NS memory. */
1224 arch_mmu_flags_out |= ARCH_MMU_FLAG_PERM_NO_EXECUTE;
1225 }
1226
1227 if (arch_mmu_flags) {
1228 *arch_mmu_flags = arch_mmu_flags_out;
1229 }
1230 if (address_range_count) {
1231 *address_range_count = address_range_count_out;
1232 }
1233
1234 return res;
1235 }
1236
1237 /* This assumes that the fragment is completely composed of memory
1238 region descriptors (struct ffa_cons_mrd) */
arm_ffa_mem_retrieve_next_frag(uint64_t handle,struct arm_ffa_mem_frag_info * frag_info)1239 status_t arm_ffa_mem_retrieve_next_frag(
1240 uint64_t handle,
1241 struct arm_ffa_mem_frag_info* frag_info) {
1242 status_t res;
1243 uint32_t fragment_len;
1244
1245 mutex_acquire(&ffa_rxtx_buffer_lock);
1246
1247 res = arm_ffa_call_mem_frag_rx(handle, frag_info->received_len,
1248 &fragment_len);
1249
1250 if (res != NO_ERROR) {
1251 TRACEF("Failed to get memory retrieve fragment, err = %d\n", res);
1252 return res;
1253 }
1254
1255 frag_info->received_len += fragment_len;
1256 frag_info->start_index += frag_info->count;
1257
1258 frag_info->count = fragment_len / sizeof(struct ffa_cons_mrd);
1259 if (frag_info->count * sizeof(struct ffa_cons_mrd) != fragment_len) {
1260 TRACEF("fragment length %u, contains partial descriptor\n",
1261 fragment_len);
1262 return ERR_IO;
1263 }
1264
1265 frag_info->address_ranges = ffa_rx;
1266
1267 return NO_ERROR;
1268 }
1269
arm_ffa_mem_share_kernel_buffer(uint16_t receiver_id,paddr_t buffer,size_t num_ffa_pages,uint arch_mmu_flags,uint64_t * handle)1270 status_t arm_ffa_mem_share_kernel_buffer(uint16_t receiver_id,
1271 paddr_t buffer,
1272 size_t num_ffa_pages,
1273 uint arch_mmu_flags,
1274 uint64_t* handle) {
1275 status_t res;
1276 uint32_t len_out, fragment_len_out;
1277
1278 DEBUG_ASSERT(handle);
1279
1280 if (buffer % FFA_PAGE_SIZE) {
1281 LTRACEF("Buffer address must be page-aligned\n");
1282 return ERR_INVALID_ARGS;
1283 }
1284 if (!(arch_mmu_flags & ARCH_MMU_FLAG_PERM_NO_EXECUTE)) {
1285 LTRACEF("Only non-executable buffers may be shared over FFA\n");
1286 return ERR_INVALID_ARGS;
1287 }
1288
1289 mutex_acquire(&ffa_rxtx_buffer_lock);
1290
1291 /* Populate the tx buffer with 1 composite mrd and 1 constituent mrd */
1292 arm_ffa_populate_share_tx_buffer(receiver_id, buffer, num_ffa_pages,
1293 arch_mmu_flags, 0);
1294 res = arm_ffa_call_mem_share(1, 1, &len_out, &fragment_len_out, handle);
1295 LTRACEF("total_len: %u, fragment_len: %u, handle: %" PRIx64 "\n", len_out,
1296 fragment_len_out, *handle);
1297 if (res != NO_ERROR) {
1298 TRACEF("FF-A memory share failed, err= %d\n", res);
1299 }
1300
1301 mutex_release(&ffa_rxtx_buffer_lock);
1302 return res;
1303 }
1304
arm_ffa_mem_reclaim(uint64_t handle)1305 status_t arm_ffa_mem_reclaim(uint64_t handle) {
1306 struct smc_ret8 smc_ret;
1307 uint32_t handle_lo = (uint32_t)handle;
1308 uint32_t handle_hi = (uint32_t)(handle >> 32);
1309 uint32_t flags = 0;
1310
1311 smc_ret = smc8(SMC_FC_FFA_MEM_RECLAIM, handle_lo, handle_hi, flags, 0, 0, 0,
1312 0);
1313
1314 switch ((uint32_t)smc_ret.r0) {
1315 case SMC_FC_FFA_SUCCESS:
1316 return NO_ERROR;
1317 case SMC_FC_FFA_ERROR:
1318 switch ((int32_t)smc_ret.r2) {
1319 case FFA_ERROR_INVALID_PARAMETERS:
1320 return ERR_INVALID_ARGS;
1321 case FFA_ERROR_NO_MEMORY:
1322 return ERR_NO_MEMORY;
1323 case FFA_ERROR_DENIED:
1324 return ERR_BAD_STATE;
1325 case FFA_ERROR_ABORTED:
1326 return ERR_CANCELLED;
1327 default:
1328 TRACEF("Unexpected FFA_ERROR: %lx\n", smc_ret.r2);
1329 return ERR_NOT_VALID;
1330 }
1331 default:
1332 TRACEF("Unexpected FFA SMC: %lx\n", smc_ret.r0);
1333 return ERR_NOT_VALID;
1334 }
1335 }
1336
arm_ffa_rx_release(void)1337 status_t arm_ffa_rx_release(void) {
1338 status_t res;
1339 ASSERT(is_mutex_held(&ffa_rxtx_buffer_lock));
1340
1341 if (!supports_rx_release) {
1342 res = NO_ERROR;
1343 } else {
1344 res = arm_ffa_call_rx_release();
1345 }
1346
1347 mutex_release(&ffa_rxtx_buffer_lock);
1348
1349 if (res == ERR_NOT_SUPPORTED) {
1350 TRACEF("Tried to release rx buffer when the operation is not supported!\n");
1351 } else if (res != NO_ERROR) {
1352 TRACEF("Failed to release rx buffer, err = %d\n", res);
1353 return res;
1354 }
1355 return NO_ERROR;
1356 }
1357
arm_ffa_mem_relinquish(uint64_t handle)1358 status_t arm_ffa_mem_relinquish(uint64_t handle) {
1359 status_t res;
1360
1361 /* As flags are set to 0 no request to zero the memory is made */
1362 res = arm_ffa_call_mem_relinquish(handle, 0, 1, &ffa_local_id);
1363 if (res != NO_ERROR) {
1364 TRACEF("Failed to relinquish memory region, err = %d\n", res);
1365 }
1366
1367 return res;
1368 }
1369
arm_ffa_setup(void)1370 static status_t arm_ffa_setup(void) {
1371 status_t res;
1372 uint16_t ver_major_ret;
1373 uint16_t ver_minor_ret;
1374 bool is_implemented;
1375 size_t buf_size_log2;
1376 size_t ref_count_num_bits;
1377 size_t arch_page_count;
1378 size_t ffa_page_count;
1379 size_t count;
1380 paddr_t tx_paddr;
1381 paddr_t rx_paddr;
1382 void* tx_vaddr;
1383 void* rx_vaddr;
1384 struct list_node page_list = LIST_INITIAL_VALUE(page_list);
1385
1386 res = arm_ffa_call_version(FFA_CURRENT_VERSION_MAJOR,
1387 FFA_CURRENT_VERSION_MINOR, &ver_major_ret,
1388 &ver_minor_ret);
1389 if (res != NO_ERROR) {
1390 TRACEF("No compatible FF-A version found\n");
1391 return res;
1392 } else if (FFA_CURRENT_VERSION_MAJOR != ver_major_ret) {
1393 /* Allow downgrade within the same major version */
1394 TRACEF("Incompatible FF-A interface version, %" PRIu16 ".%" PRIu16 "\n",
1395 ver_major_ret, ver_minor_ret);
1396 return ERR_NOT_SUPPORTED;
1397 }
1398
1399 ffa_version = FFA_VERSION(ver_major_ret, ver_minor_ret);
1400 if (ffa_version > FFA_CURRENT_VERSION) {
1401 /* The SPMC supports a newer version, downgrade us */
1402 ffa_version = FFA_CURRENT_VERSION;
1403 }
1404 LTRACEF("Negotiated FF-A version %" PRIu16 ".%" PRIu16 "\n",
1405 FFA_VERSION_TO_MAJOR(ffa_version),
1406 FFA_VERSION_TO_MINOR(ffa_version));
1407
1408 res = arm_ffa_call_id_get(&ffa_local_id);
1409 if (res != NO_ERROR) {
1410 TRACEF("Failed to get FF-A partition id (err=%d)\n", res);
1411 return res;
1412 }
1413
1414 res = arm_ffa_rx_release_is_implemented(&is_implemented);
1415 if (res != NO_ERROR) {
1416 TRACEF("Error checking if FFA_RX_RELEASE is implemented (err=%d)\n",
1417 res);
1418 return res;
1419 }
1420 if (is_implemented) {
1421 supports_rx_release = true;
1422 } else {
1423 LTRACEF("FFA_RX_RELEASE is not implemented\n");
1424 }
1425
1426 res = arm_ffa_rxtx_map_is_implemented(&is_implemented, &buf_size_log2);
1427 if (res != NO_ERROR) {
1428 TRACEF("Error checking if FFA_RXTX_MAP is implemented (err=%d)\n", res);
1429 return res;
1430 }
1431 if (!is_implemented) {
1432 TRACEF("FFA_RXTX_MAP is not implemented\n");
1433 return ERR_NOT_SUPPORTED;
1434 }
1435
1436 res = arm_ffa_mem_retrieve_req_is_implemented(
1437 &is_implemented, NULL, &supports_ns_bit, &ref_count_num_bits);
1438 if (res != NO_ERROR) {
1439 TRACEF("Error checking if FFA_MEM_RETRIEVE_REQ is implemented (err=%d)\n",
1440 res);
1441 return res;
1442 }
1443 if (!is_implemented) {
1444 TRACEF("FFA_MEM_RETRIEVE_REQ is not implemented\n");
1445 } else if (ref_count_num_bits < 64) {
1446 /*
1447 * Expect 64 bit reference count. If we don't have it, future calls to
1448 * SMC_FC_FFA_MEM_RETRIEVE_REQ can fail if we receive the same handle
1449 * multiple times. Warn about this, but don't return an error as we only
1450 * receive each handle once in the typical case.
1451 */
1452 TRACEF("Warning FFA_MEM_RETRIEVE_REQ does not have 64 bit reference count (%zu)\n",
1453 ref_count_num_bits);
1454 }
1455
1456 ffa_buf_size = 1U << buf_size_log2;
1457 ASSERT((ffa_buf_size % FFA_PAGE_SIZE) == 0);
1458
1459 arch_page_count = DIV_ROUND_UP(ffa_buf_size, PAGE_SIZE);
1460 ffa_page_count = ffa_buf_size / FFA_PAGE_SIZE;
1461 count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &tx_paddr,
1462 &page_list);
1463 if (count != arch_page_count) {
1464 TRACEF("Failed to allocate tx buffer %zx!=%zx\n", count,
1465 arch_page_count);
1466 res = ERR_NO_MEMORY;
1467 goto err_alloc_tx;
1468 }
1469 tx_vaddr = paddr_to_kvaddr(tx_paddr);
1470 ASSERT(tx_vaddr);
1471
1472 count = pmm_alloc_contiguous(arch_page_count, buf_size_log2, &rx_paddr,
1473 &page_list);
1474 if (count != arch_page_count) {
1475 TRACEF("Failed to allocate rx buffer %zx!=%zx\n", count,
1476 arch_page_count);
1477 res = ERR_NO_MEMORY;
1478 goto err_alloc_rx;
1479 }
1480 rx_vaddr = paddr_to_kvaddr(rx_paddr);
1481 ASSERT(rx_vaddr);
1482
1483 res = arm_ffa_call_rxtx_map(tx_paddr, rx_paddr, ffa_page_count);
1484 if (res != NO_ERROR) {
1485 TRACEF("Failed to map tx @ %p, rx @ %p, page count 0x%zx (err=%d)\n",
1486 (void*)tx_paddr, (void*)rx_paddr, ffa_page_count, res);
1487 goto err_rxtx_map;
1488 }
1489
1490 ffa_tx = tx_vaddr;
1491 ffa_rx = rx_vaddr;
1492
1493 return res;
1494
1495 err_rxtx_map:
1496 err_alloc_rx:
1497 pmm_free(&page_list);
1498 err_alloc_tx:
1499 /* pmm_alloc_contiguous leaves the page list unchanged on failure */
1500
1501 return res;
1502 }
1503
arm_ffa_init(uint level)1504 static void arm_ffa_init(uint level) {
1505 status_t res;
1506
1507 res = arm_ffa_setup();
1508
1509 if (res == NO_ERROR) {
1510 ffa_init_state = ARM_FFA_INIT_SUCCESS;
1511
1512 #if WITH_SMP
1513 res = ffa_call_secondary_ep_register();
1514 if (res == ERR_NOT_SUPPORTED) {
1515 LTRACEF("FFA_SECONDARY_EP_REGISTER is not supported\n");
1516 } else if (res != NO_ERROR) {
1517 TRACEF("Failed to register secondary core entry point (err=%d)\n",
1518 res);
1519 }
1520 #endif
1521 } else {
1522 TRACEF("Failed to initialize FF-A (err=%d)\n", res);
1523 ffa_init_state = ARM_FFA_INIT_FAILED;
1524 }
1525 }
1526
1527 LK_INIT_HOOK(arm_ffa_init, arm_ffa_init, LK_INIT_LEVEL_PLATFORM - 2);
1528