1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5 * Framework for Arm A-profile", which is specified by Arm in document
6 * number DEN0077.
7 *
8 * Copyright (C) 2022 - Google LLC
9 * Author: Andrew Walbran <qwandor@google.com>
10 *
11 * This driver hooks into the SMC trapping logic for the host and intercepts
12 * all calls falling within the FF-A range. Each call is either:
13 *
14 * - Forwarded on unmodified to the SPMD at EL3
15 * - Rejected as "unsupported"
16 * - Accompanied by a host stage-2 page-table check/update and reissued
17 *
18 * Consequently, any attempts by the host to make guest memory pages
19 * accessible to the secure world using FF-A will be detected either here
20 * (in the case that the memory is already owned by the guest) or during
21 * donation to the guest (in the case that the memory was previously shared
22 * with the secure world).
23 *
24 * To allow the rolling-back of page-table updates and FF-A calls in the
25 * event of failure, operations involving the RXTX buffers are locked for
26 * the duration and are therefore serialised.
27 */
28
29 #include <linux/arm-smccc.h>
30 #include <linux/arm_ffa.h>
31 #include <asm/kvm_pkvm.h>
32
33 #include <nvhe/ffa.h>
34 #include <nvhe/mem_protect.h>
35 #include <nvhe/memory.h>
36 #include <nvhe/trap_handler.h>
37
38 /*
39 * "ID value 0 must be returned at the Non-secure physical FF-A instance"
40 * We share this ID with the host.
41 */
42 #define HOST_FFA_ID 0
43
44 /*
45 * A buffer to hold the maximum descriptor size we can see from the host,
46 * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
47 * when resolving the handle on the reclaim path.
48 */
49 struct kvm_ffa_descriptor_buffer {
50 void *buf;
51 size_t len;
52 };
53
54 static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
55
56 /*
57 * Note that we don't currently lock these buffers explicitly, instead
58 * relying on the locking of the host FFA buffers as we only have one
59 * client.
60 */
61 static struct kvm_ffa_buffers ffa_buffers;
62
ffa_to_smccc_error(struct arm_smccc_res * res,u64 ffa_errno)63 static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
64 {
65 *res = (struct arm_smccc_res) {
66 .a0 = FFA_ERROR,
67 .a2 = ffa_errno,
68 };
69 }
70
ffa_to_smccc_res_prop(struct arm_smccc_res * res,int ret,u64 prop)71 static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
72 {
73 if (ret == FFA_RET_SUCCESS) {
74 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
75 .a2 = prop };
76 } else {
77 ffa_to_smccc_error(res, ret);
78 }
79 }
80
ffa_to_smccc_res(struct arm_smccc_res * res,int ret)81 static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
82 {
83 ffa_to_smccc_res_prop(res, ret, 0);
84 }
85
ffa_set_retval(struct kvm_cpu_context * ctxt,struct arm_smccc_res * res)86 static void ffa_set_retval(struct kvm_cpu_context *ctxt,
87 struct arm_smccc_res *res)
88 {
89 cpu_reg(ctxt, 0) = res->a0;
90 cpu_reg(ctxt, 1) = res->a1;
91 cpu_reg(ctxt, 2) = res->a2;
92 cpu_reg(ctxt, 3) = res->a3;
93 }
94
is_ffa_call(u64 func_id)95 static bool is_ffa_call(u64 func_id)
96 {
97 return ARM_SMCCC_IS_FAST_CALL(func_id) &&
98 ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
99 ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
100 ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
101 }
102
spmd_map_ffa_buffers(u64 ffa_page_count)103 static int spmd_map_ffa_buffers(u64 ffa_page_count)
104 {
105 struct arm_smccc_res res;
106
107 arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
108 hyp_virt_to_phys(ffa_buffers.tx),
109 hyp_virt_to_phys(ffa_buffers.rx),
110 ffa_page_count,
111 0, 0, 0, 0,
112 &res);
113
114 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
115 }
116
spmd_unmap_ffa_buffers(void)117 static int spmd_unmap_ffa_buffers(void)
118 {
119 struct arm_smccc_res res;
120
121 arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
122 HOST_FFA_ID,
123 0, 0, 0, 0, 0, 0,
124 &res);
125
126 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
127 }
128
spmd_mem_frag_tx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fraglen,u32 endpoint_id)129 static void spmd_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
130 u32 handle_hi, u32 fraglen, u32 endpoint_id)
131 {
132 arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
133 handle_lo, handle_hi, fraglen, endpoint_id,
134 0, 0, 0,
135 res);
136 }
137
spmd_mem_frag_rx(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 fragoff)138 static void spmd_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
139 u32 handle_hi, u32 fragoff)
140 {
141 arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
142 handle_lo, handle_hi, fragoff, HOST_FFA_ID,
143 0, 0, 0,
144 res);
145 }
146
spmd_mem_xfer(struct arm_smccc_res * res,u64 func_id,u32 len,u32 fraglen)147 static void spmd_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
148 u32 fraglen)
149 {
150 arm_smccc_1_1_smc(func_id, len, fraglen,
151 0, 0, 0, 0, 0,
152 res);
153 }
154
spmd_mem_reclaim(struct arm_smccc_res * res,u32 handle_lo,u32 handle_hi,u32 flags)155 static void spmd_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
156 u32 handle_hi, u32 flags)
157 {
158 arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
159 handle_lo, handle_hi, flags,
160 0, 0, 0, 0,
161 res);
162 }
163
spmd_retrieve_req(struct arm_smccc_res * res,u32 len)164 static void spmd_retrieve_req(struct arm_smccc_res *res, u32 len)
165 {
166 arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
167 len, len,
168 0, 0, 0, 0, 0,
169 res);
170 }
171
do_ffa_rxtx_map(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)172 static void do_ffa_rxtx_map(struct arm_smccc_res *res,
173 struct kvm_cpu_context *ctxt)
174 {
175 DECLARE_REG(phys_addr_t, tx, ctxt, 1);
176 DECLARE_REG(phys_addr_t, rx, ctxt, 2);
177 DECLARE_REG(u32, npages, ctxt, 3);
178 int ret = 0;
179 void *rx_virt, *tx_virt;
180
181 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
182 ret = FFA_RET_INVALID_PARAMETERS;
183 goto out;
184 }
185
186 if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
187 ret = FFA_RET_INVALID_PARAMETERS;
188 goto out;
189 }
190
191 hyp_spin_lock(&host_kvm.ffa.lock);
192 if (host_kvm.ffa.tx) {
193 ret = FFA_RET_DENIED;
194 goto out_unlock;
195 }
196
197 ret = spmd_map_ffa_buffers(npages);
198 if (ret)
199 goto out_unlock;
200
201 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
202 if (ret) {
203 ret = FFA_RET_INVALID_PARAMETERS;
204 goto err_unmap;
205 }
206
207 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
208 if (ret) {
209 ret = FFA_RET_INVALID_PARAMETERS;
210 goto err_unshare_tx;
211 }
212
213 tx_virt = hyp_phys_to_virt(tx);
214 ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
215 if (ret) {
216 ret = FFA_RET_INVALID_PARAMETERS;
217 goto err_unshare_rx;
218 }
219
220 rx_virt = hyp_phys_to_virt(rx);
221 ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
222 if (ret) {
223 ret = FFA_RET_INVALID_PARAMETERS;
224 goto err_unpin_tx;
225 }
226
227 host_kvm.ffa.tx = tx_virt;
228 host_kvm.ffa.rx = rx_virt;
229
230 out_unlock:
231 hyp_spin_unlock(&host_kvm.ffa.lock);
232 out:
233 ffa_to_smccc_res(res, ret);
234 return;
235
236 err_unpin_tx:
237 hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
238 err_unshare_rx:
239 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
240 err_unshare_tx:
241 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
242 err_unmap:
243 spmd_unmap_ffa_buffers();
244 goto out_unlock;
245 }
246
do_ffa_rxtx_unmap(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)247 static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
248 struct kvm_cpu_context *ctxt)
249 {
250 DECLARE_REG(u32, id, ctxt, 1);
251 int ret = 0;
252
253 if (id != HOST_FFA_ID) {
254 ret = FFA_RET_INVALID_PARAMETERS;
255 goto out;
256 }
257
258 hyp_spin_lock(&host_kvm.ffa.lock);
259 if (!host_kvm.ffa.tx) {
260 ret = FFA_RET_INVALID_PARAMETERS;
261 goto out_unlock;
262 }
263
264 hyp_unpin_shared_mem(host_kvm.ffa.tx, host_kvm.ffa.tx + 1);
265 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_kvm.ffa.tx)));
266 host_kvm.ffa.tx = NULL;
267
268 hyp_unpin_shared_mem(host_kvm.ffa.rx, host_kvm.ffa.rx + 1);
269 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_kvm.ffa.rx)));
270 host_kvm.ffa.rx = NULL;
271
272 spmd_unmap_ffa_buffers();
273
274 out_unlock:
275 hyp_spin_unlock(&host_kvm.ffa.lock);
276 out:
277 ffa_to_smccc_res(res, ret);
278 }
279
__ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)280 static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
281 u32 nranges)
282 {
283 u32 i;
284
285 for (i = 0; i < nranges; ++i) {
286 struct ffa_mem_region_addr_range *range = &ranges[i];
287 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
288 u64 pfn = hyp_phys_to_pfn(range->address);
289
290 if (!PAGE_ALIGNED(sz))
291 break;
292
293 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
294 break;
295 }
296
297 return i;
298 }
299
__ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)300 static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
301 u32 nranges)
302 {
303 u32 i;
304
305 for (i = 0; i < nranges; ++i) {
306 struct ffa_mem_region_addr_range *range = &ranges[i];
307 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
308 u64 pfn = hyp_phys_to_pfn(range->address);
309
310 if (!PAGE_ALIGNED(sz))
311 break;
312
313 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
314 break;
315 }
316
317 return i;
318 }
319
ffa_host_share_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)320 static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
321 u32 nranges)
322 {
323 u32 nshared = __ffa_host_share_ranges(ranges, nranges);
324 int ret = 0;
325
326 if (nshared != nranges) {
327 WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
328 ret = FFA_RET_DENIED;
329 }
330
331 return ret;
332 }
333
ffa_host_unshare_ranges(struct ffa_mem_region_addr_range * ranges,u32 nranges)334 static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
335 u32 nranges)
336 {
337 u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
338 int ret = 0;
339
340 if (nunshared != nranges) {
341 WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
342 ret = FFA_RET_DENIED;
343 }
344
345 return ret;
346 }
347
do_ffa_mem_frag_tx(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)348 static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
349 struct kvm_cpu_context *ctxt)
350 {
351 DECLARE_REG(u32, handle_lo, ctxt, 1);
352 DECLARE_REG(u32, handle_hi, ctxt, 2);
353 DECLARE_REG(u32, fraglen, ctxt, 3);
354 DECLARE_REG(u32, endpoint_id, ctxt, 4);
355 struct ffa_mem_region_addr_range *buf;
356 int ret = FFA_RET_INVALID_PARAMETERS;
357 u32 nr_ranges;
358
359 if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
360 goto out;
361
362 if (fraglen % sizeof(*buf))
363 goto out;
364
365 hyp_spin_lock(&host_kvm.ffa.lock);
366 if (!host_kvm.ffa.tx)
367 goto out_unlock;
368
369 buf = ffa_buffers.tx;
370 memcpy(buf, host_kvm.ffa.tx, fraglen);
371 nr_ranges = fraglen / sizeof(*buf);
372
373 ret = ffa_host_share_ranges(buf, nr_ranges);
374 if (ret) {
375 /*
376 * We're effectively aborting the transaction, so we need
377 * to restore the global state back to what it was prior to
378 * transmission of the first fragment.
379 */
380 spmd_mem_reclaim(res, handle_lo, handle_hi, 0);
381 WARN_ON(res->a0 != FFA_SUCCESS);
382 goto out_unlock;
383 }
384
385 spmd_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
386 if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
387 WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
388
389 out_unlock:
390 hyp_spin_unlock(&host_kvm.ffa.lock);
391 out:
392 if (ret)
393 ffa_to_smccc_res(res, ret);
394
395 /*
396 * If for any reason this did not succeed, we're in trouble as we have
397 * now lost the content of the previous fragments and we can't rollback
398 * the host stage-2 changes. The pages previously marked as shared will
399 * remain stuck in that state forever, hence preventing the host from
400 * sharing/donating them again and may possibly lead to subsequent
401 * failures, but this will not compromise confidentiality.
402 */
403 return;
404 }
405
do_ffa_mem_xfer(const u64 func_id,struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)406 static __always_inline void do_ffa_mem_xfer(const u64 func_id,
407 struct arm_smccc_res *res,
408 struct kvm_cpu_context *ctxt)
409 {
410 DECLARE_REG(u32, len, ctxt, 1);
411 DECLARE_REG(u32, fraglen, ctxt, 2);
412 DECLARE_REG(u64, addr_mbz, ctxt, 3);
413 DECLARE_REG(u32, npages_mbz, ctxt, 4);
414 struct ffa_composite_mem_region *reg;
415 struct ffa_mem_region *buf;
416 u32 offset, nr_ranges;
417 int ret = 0;
418
419 BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
420 func_id != FFA_FN64_MEM_LEND);
421
422 if (addr_mbz || npages_mbz || fraglen > len ||
423 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
424 ret = FFA_RET_INVALID_PARAMETERS;
425 goto out;
426 }
427
428 if (fraglen < sizeof(struct ffa_mem_region) +
429 sizeof(struct ffa_mem_region_attributes)) {
430 ret = FFA_RET_INVALID_PARAMETERS;
431 goto out;
432 }
433
434 hyp_spin_lock(&host_kvm.ffa.lock);
435 if (!host_kvm.ffa.tx) {
436 ret = FFA_RET_INVALID_PARAMETERS;
437 goto out_unlock;
438 }
439
440 buf = ffa_buffers.tx;
441 memcpy(buf, host_kvm.ffa.tx, fraglen);
442
443 offset = buf->ep_mem_access[0].composite_off;
444 if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
445 ret = FFA_RET_INVALID_PARAMETERS;
446 goto out_unlock;
447 }
448
449 if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
450 ret = FFA_RET_INVALID_PARAMETERS;
451 goto out_unlock;
452 }
453
454 reg = (void *)buf + offset;
455 nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
456 if (nr_ranges % sizeof(reg->constituents[0])) {
457 ret = FFA_RET_INVALID_PARAMETERS;
458 goto out_unlock;
459 }
460
461 nr_ranges /= sizeof(reg->constituents[0]);
462 ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
463 if (ret)
464 goto out_unlock;
465
466 spmd_mem_xfer(res, func_id, len, fraglen);
467 if (fraglen != len) {
468 if (res->a0 != FFA_MEM_FRAG_RX)
469 goto err_unshare;
470
471 if (res->a3 != fraglen)
472 goto err_unshare;
473 } else if (res->a0 != FFA_SUCCESS) {
474 goto err_unshare;
475 }
476
477 out_unlock:
478 hyp_spin_unlock(&host_kvm.ffa.lock);
479 out:
480 if (ret)
481 ffa_to_smccc_res(res, ret);
482 return;
483
484 err_unshare:
485 WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
486 goto out_unlock;
487 }
488
do_ffa_mem_reclaim(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)489 static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
490 struct kvm_cpu_context *ctxt)
491 {
492 DECLARE_REG(u32, handle_lo, ctxt, 1);
493 DECLARE_REG(u32, handle_hi, ctxt, 2);
494 DECLARE_REG(u32, flags, ctxt, 3);
495 struct ffa_composite_mem_region *reg;
496 u32 offset, len, fraglen, fragoff;
497 struct ffa_mem_region *buf;
498 int ret = 0;
499 u64 handle;
500
501 handle = PACK_HANDLE(handle_lo, handle_hi);
502
503 hyp_spin_lock(&host_kvm.ffa.lock);
504
505 buf = ffa_buffers.tx;
506 *buf = (struct ffa_mem_region) {
507 .sender_id = HOST_FFA_ID,
508 .handle = handle,
509 };
510
511 spmd_retrieve_req(res, sizeof(*buf));
512 buf = ffa_buffers.rx;
513 if (res->a0 != FFA_MEM_RETRIEVE_RESP)
514 goto out_unlock;
515
516 len = res->a1;
517 fraglen = res->a2;
518
519 offset = buf->ep_mem_access[0].composite_off;
520 /*
521 * We can trust the SPMD to get this right, but let's at least
522 * check that we end up with something that doesn't look _completely_
523 * bogus.
524 */
525 if (WARN_ON(offset > len ||
526 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
527 ret = FFA_RET_ABORTED;
528 goto out_unlock;
529 }
530
531 if (len > ffa_desc_buf.len) {
532 ret = FFA_RET_NO_MEMORY;
533 goto out_unlock;
534 }
535
536 buf = ffa_desc_buf.buf;
537 memcpy(buf, ffa_buffers.rx, fraglen);
538
539 for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
540 spmd_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
541 if (res->a0 != FFA_MEM_FRAG_TX) {
542 ret = FFA_RET_INVALID_PARAMETERS;
543 goto out_unlock;
544 }
545
546 fraglen = res->a3;
547 memcpy((void *)buf + fragoff, ffa_buffers.rx, fraglen);
548 }
549
550 spmd_mem_reclaim(res, handle_lo, handle_hi, flags);
551 if (res->a0 != FFA_SUCCESS)
552 goto out_unlock;
553
554 reg = (void *)buf + offset;
555 /* If the SPMD was happy, then we should be too. */
556 WARN_ON(ffa_host_unshare_ranges(reg->constituents,
557 reg->addr_range_cnt));
558 out_unlock:
559 hyp_spin_unlock(&host_kvm.ffa.lock);
560
561 if (ret)
562 ffa_to_smccc_res(res, ret);
563 return;
564 }
565
ffa_call_unsupported(u64 func_id)566 static bool ffa_call_unsupported(u64 func_id)
567 {
568 switch (func_id) {
569 /* Unsupported memory management calls */
570 case FFA_FN64_MEM_RETRIEVE_REQ:
571 case FFA_MEM_RETRIEVE_RESP:
572 case FFA_MEM_RELINQUISH:
573 case FFA_MEM_OP_PAUSE:
574 case FFA_MEM_OP_RESUME:
575 case FFA_MEM_FRAG_RX:
576 case FFA_FN64_MEM_DONATE:
577 /* Indirect message passing via RX/TX buffers */
578 case FFA_MSG_SEND:
579 case FFA_MSG_POLL:
580 case FFA_MSG_WAIT:
581 /* 32-bit variants of 64-bit calls */
582 case FFA_MSG_SEND_DIRECT_REQ:
583 case FFA_MSG_SEND_DIRECT_RESP:
584 case FFA_RXTX_MAP:
585 case FFA_MEM_DONATE:
586 case FFA_MEM_RETRIEVE_REQ:
587 return true;
588 }
589
590 return false;
591 }
592
do_ffa_features(struct arm_smccc_res * res,struct kvm_cpu_context * ctxt)593 static bool do_ffa_features(struct arm_smccc_res *res,
594 struct kvm_cpu_context *ctxt)
595 {
596 DECLARE_REG(u32, id, ctxt, 1);
597 u64 prop = 0;
598 int ret = 0;
599
600 if (ffa_call_unsupported(id)) {
601 ret = FFA_RET_NOT_SUPPORTED;
602 goto out_handled;
603 }
604
605 switch (id) {
606 case FFA_MEM_SHARE:
607 case FFA_FN64_MEM_SHARE:
608 case FFA_MEM_LEND:
609 case FFA_FN64_MEM_LEND:
610 ret = FFA_RET_SUCCESS;
611 prop = 0; /* No support for dynamic buffers */
612 goto out_handled;
613 default:
614 return false;
615 }
616
617 out_handled:
618 ffa_to_smccc_res_prop(res, ret, prop);
619 return true;
620 }
621
kvm_host_ffa_handler(struct kvm_cpu_context * host_ctxt)622 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
623 {
624 DECLARE_REG(u64, func_id, host_ctxt, 0);
625 struct arm_smccc_res res;
626
627 if (!is_ffa_call(func_id))
628 return false;
629
630 switch (func_id) {
631 case FFA_FEATURES:
632 if (!do_ffa_features(&res, host_ctxt))
633 return false;
634 goto out_handled;
635 /* Memory management */
636 case FFA_FN64_RXTX_MAP:
637 do_ffa_rxtx_map(&res, host_ctxt);
638 goto out_handled;
639 case FFA_RXTX_UNMAP:
640 do_ffa_rxtx_unmap(&res, host_ctxt);
641 goto out_handled;
642 case FFA_MEM_SHARE:
643 case FFA_FN64_MEM_SHARE:
644 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
645 goto out_handled;
646 case FFA_MEM_RECLAIM:
647 do_ffa_mem_reclaim(&res, host_ctxt);
648 goto out_handled;
649 case FFA_MEM_LEND:
650 case FFA_FN64_MEM_LEND:
651 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
652 goto out_handled;
653 case FFA_MEM_FRAG_TX:
654 do_ffa_mem_frag_tx(&res, host_ctxt);
655 goto out_handled;
656 }
657
658 if (!ffa_call_unsupported(func_id))
659 return false; /* Pass through */
660
661 ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
662 out_handled:
663 ffa_set_retval(host_ctxt, &res);
664 return true;
665 }
666
hyp_ffa_init(void * pages)667 int hyp_ffa_init(void *pages)
668 {
669 struct arm_smccc_res res;
670 size_t min_rxtx_sz;
671 void *tx, *rx;
672
673 if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_1)
674 return 0;
675
676 arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
677 if (res.a0 == FFA_RET_NOT_SUPPORTED)
678 return 0;
679
680 if (res.a0 != FFA_VERSION_1_0)
681 return -EOPNOTSUPP;
682
683 arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
684 if (res.a0 != FFA_SUCCESS)
685 return -EOPNOTSUPP;
686
687 if (res.a2 != HOST_FFA_ID)
688 return -EINVAL;
689
690 arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
691 0, 0, 0, 0, 0, 0, &res);
692 if (res.a0 != FFA_SUCCESS)
693 return -EOPNOTSUPP;
694
695 switch (res.a2) {
696 case FFA_FEAT_RXTX_MIN_SZ_4K:
697 min_rxtx_sz = SZ_4K;
698 break;
699 case FFA_FEAT_RXTX_MIN_SZ_16K:
700 min_rxtx_sz = SZ_16K;
701 break;
702 case FFA_FEAT_RXTX_MIN_SZ_64K:
703 min_rxtx_sz = SZ_64K;
704 break;
705 default:
706 return -EINVAL;
707 }
708
709 if (min_rxtx_sz > PAGE_SIZE)
710 return -EOPNOTSUPP;
711
712 tx = pages;
713 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
714 rx = pages;
715 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
716
717 ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
718 .buf = pages,
719 .len = PAGE_SIZE *
720 (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
721 };
722
723 ffa_buffers = (struct kvm_ffa_buffers) {
724 .lock = __HYP_SPIN_LOCK_UNLOCKED,
725 .tx = tx,
726 .rx = rx,
727 };
728
729 host_kvm.ffa = (struct kvm_ffa_buffers) {
730 .lock = __HYP_SPIN_LOCK_UNLOCKED,
731 };
732
733 return 0;
734 }
735