1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
4 * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
5 * Framework for Arm A-profile", which is specified by Arm in document
6 * number DEN0077.
7 *
8 * Copyright (C) 2022 - Google LLC
9 * Author: Andrew Walbran <qwandor@google.com>
10 *
11 * This driver hooks into the SMC trapping logic for the host and intercepts
12 * all calls falling within the FF-A range. Each call is either:
13 *
14 * - Forwarded on unmodified to the SPMD at EL3
15 * - Rejected as "unsupported"
16 * - Accompanied by a host stage-2 page-table check/update and reissued
17 *
18 * Consequently, any attempts by the host to make guest memory pages
19 * accessible to the secure world using FF-A will be detected either here
20 * (in the case that the memory is already owned by the guest) or during
21 * donation to the guest (in the case that the memory was previously shared
22 * with the secure world).
23 *
24 * To allow the rolling-back of page-table updates and FF-A calls in the
25 * event of failure, operations involving the RXTX buffers are locked for
26 * the duration and are therefore serialised.
27 */
28
29#include <linux/arm-smccc.h>
30#include <linux/arm_ffa.h>
31#include <asm/kvm_pkvm.h>
32
33#include <nvhe/ffa.h>
34#include <nvhe/mem_protect.h>
35#include <nvhe/memory.h>
36#include <nvhe/trap_handler.h>
37#include <nvhe/spinlock.h>
38
39/*
40 * "ID value 0 must be returned at the Non-secure physical FF-A instance"
41 * We share this ID with the host.
42 */
43#define HOST_FFA_ID 0
44
45/*
46 * A buffer to hold the maximum descriptor size we can see from the host,
47 * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
48 * when resolving the handle on the reclaim path.
49 */
50struct kvm_ffa_descriptor_buffer {
51 void *buf;
52 size_t len;
53};
54
55static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
56
57struct kvm_ffa_buffers {
58 hyp_spinlock_t lock;
59 void *tx;
60 void *rx;
61};
62
63/*
64 * Note that we don't currently lock these buffers explicitly, instead
65 * relying on the locking of the host FFA buffers as we only have one
66 * client.
67 */
68static struct kvm_ffa_buffers hyp_buffers;
69static struct kvm_ffa_buffers host_buffers;
70
71static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
72{
73 *res = (struct arm_smccc_res) {
74 .a0 = FFA_ERROR,
75 .a2 = ffa_errno,
76 };
77}
78
79static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
80{
81 if (ret == FFA_RET_SUCCESS) {
82 *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
83 .a2 = prop };
84 } else {
85 ffa_to_smccc_error(res, ffa_errno: ret);
86 }
87}
88
89static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
90{
91 ffa_to_smccc_res_prop(res, ret, prop: 0);
92}
93
94static void ffa_set_retval(struct kvm_cpu_context *ctxt,
95 struct arm_smccc_res *res)
96{
97 cpu_reg(ctxt, 0) = res->a0;
98 cpu_reg(ctxt, 1) = res->a1;
99 cpu_reg(ctxt, 2) = res->a2;
100 cpu_reg(ctxt, 3) = res->a3;
101}
102
103static bool is_ffa_call(u64 func_id)
104{
105 return ARM_SMCCC_IS_FAST_CALL(func_id) &&
106 ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
107 ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
108 ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
109}
110
111static int ffa_map_hyp_buffers(u64 ffa_page_count)
112{
113 struct arm_smccc_res res;
114
115 arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
116 hyp_virt_to_phys(hyp_buffers.tx),
117 hyp_virt_to_phys(hyp_buffers.rx),
118 ffa_page_count,
119 0, 0, 0, 0,
120 &res);
121
122 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
123}
124
125static int ffa_unmap_hyp_buffers(void)
126{
127 struct arm_smccc_res res;
128
129 arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
130 HOST_FFA_ID,
131 0, 0, 0, 0, 0, 0,
132 &res);
133
134 return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
135}
136
137static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
138 u32 handle_hi, u32 fraglen, u32 endpoint_id)
139{
140 arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
141 handle_lo, handle_hi, fraglen, endpoint_id,
142 0, 0, 0,
143 res);
144}
145
146static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
147 u32 handle_hi, u32 fragoff)
148{
149 arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
150 handle_lo, handle_hi, fragoff, HOST_FFA_ID,
151 0, 0, 0,
152 res);
153}
154
155static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
156 u32 fraglen)
157{
158 arm_smccc_1_1_smc(func_id, len, fraglen,
159 0, 0, 0, 0, 0,
160 res);
161}
162
163static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
164 u32 handle_hi, u32 flags)
165{
166 arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
167 handle_lo, handle_hi, flags,
168 0, 0, 0, 0,
169 res);
170}
171
172static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
173{
174 arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
175 len, len,
176 0, 0, 0, 0, 0,
177 res);
178}
179
180static void do_ffa_rxtx_map(struct arm_smccc_res *res,
181 struct kvm_cpu_context *ctxt)
182{
183 DECLARE_REG(phys_addr_t, tx, ctxt, 1);
184 DECLARE_REG(phys_addr_t, rx, ctxt, 2);
185 DECLARE_REG(u32, npages, ctxt, 3);
186 int ret = 0;
187 void *rx_virt, *tx_virt;
188
189 if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
190 ret = FFA_RET_INVALID_PARAMETERS;
191 goto out;
192 }
193
194 if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
195 ret = FFA_RET_INVALID_PARAMETERS;
196 goto out;
197 }
198
199 hyp_spin_lock(&host_buffers.lock);
200 if (host_buffers.tx) {
201 ret = FFA_RET_DENIED;
202 goto out_unlock;
203 }
204
205 /*
206 * Map our hypervisor buffers into the SPMD before mapping and
207 * pinning the host buffers in our own address space.
208 */
209 ret = ffa_map_hyp_buffers(ffa_page_count: npages);
210 if (ret)
211 goto out_unlock;
212
213 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
214 if (ret) {
215 ret = FFA_RET_INVALID_PARAMETERS;
216 goto err_unmap;
217 }
218
219 ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
220 if (ret) {
221 ret = FFA_RET_INVALID_PARAMETERS;
222 goto err_unshare_tx;
223 }
224
225 tx_virt = hyp_phys_to_virt(tx);
226 ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
227 if (ret) {
228 ret = FFA_RET_INVALID_PARAMETERS;
229 goto err_unshare_rx;
230 }
231
232 rx_virt = hyp_phys_to_virt(rx);
233 ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
234 if (ret) {
235 ret = FFA_RET_INVALID_PARAMETERS;
236 goto err_unpin_tx;
237 }
238
239 host_buffers.tx = tx_virt;
240 host_buffers.rx = rx_virt;
241
242out_unlock:
243 hyp_spin_unlock(&host_buffers.lock);
244out:
245 ffa_to_smccc_res(res, ret);
246 return;
247
248err_unpin_tx:
249 hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
250err_unshare_rx:
251 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
252err_unshare_tx:
253 __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
254err_unmap:
255 ffa_unmap_hyp_buffers();
256 goto out_unlock;
257}
258
259static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
260 struct kvm_cpu_context *ctxt)
261{
262 DECLARE_REG(u32, id, ctxt, 1);
263 int ret = 0;
264
265 if (id != HOST_FFA_ID) {
266 ret = FFA_RET_INVALID_PARAMETERS;
267 goto out;
268 }
269
270 hyp_spin_lock(&host_buffers.lock);
271 if (!host_buffers.tx) {
272 ret = FFA_RET_INVALID_PARAMETERS;
273 goto out_unlock;
274 }
275
276 hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
277 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
278 host_buffers.tx = NULL;
279
280 hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
281 WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
282 host_buffers.rx = NULL;
283
284 ffa_unmap_hyp_buffers();
285
286out_unlock:
287 hyp_spin_unlock(&host_buffers.lock);
288out:
289 ffa_to_smccc_res(res, ret);
290}
291
292static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
293 u32 nranges)
294{
295 u32 i;
296
297 for (i = 0; i < nranges; ++i) {
298 struct ffa_mem_region_addr_range *range = &ranges[i];
299 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
300 u64 pfn = hyp_phys_to_pfn(range->address);
301
302 if (!PAGE_ALIGNED(sz))
303 break;
304
305 if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
306 break;
307 }
308
309 return i;
310}
311
312static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
313 u32 nranges)
314{
315 u32 i;
316
317 for (i = 0; i < nranges; ++i) {
318 struct ffa_mem_region_addr_range *range = &ranges[i];
319 u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
320 u64 pfn = hyp_phys_to_pfn(range->address);
321
322 if (!PAGE_ALIGNED(sz))
323 break;
324
325 if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
326 break;
327 }
328
329 return i;
330}
331
332static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
333 u32 nranges)
334{
335 u32 nshared = __ffa_host_share_ranges(ranges, nranges);
336 int ret = 0;
337
338 if (nshared != nranges) {
339 WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
340 ret = FFA_RET_DENIED;
341 }
342
343 return ret;
344}
345
346static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
347 u32 nranges)
348{
349 u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
350 int ret = 0;
351
352 if (nunshared != nranges) {
353 WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
354 ret = FFA_RET_DENIED;
355 }
356
357 return ret;
358}
359
360static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
361 struct kvm_cpu_context *ctxt)
362{
363 DECLARE_REG(u32, handle_lo, ctxt, 1);
364 DECLARE_REG(u32, handle_hi, ctxt, 2);
365 DECLARE_REG(u32, fraglen, ctxt, 3);
366 DECLARE_REG(u32, endpoint_id, ctxt, 4);
367 struct ffa_mem_region_addr_range *buf;
368 int ret = FFA_RET_INVALID_PARAMETERS;
369 u32 nr_ranges;
370
371 if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
372 goto out;
373
374 if (fraglen % sizeof(*buf))
375 goto out;
376
377 hyp_spin_lock(&host_buffers.lock);
378 if (!host_buffers.tx)
379 goto out_unlock;
380
381 buf = hyp_buffers.tx;
382 memcpy(buf, host_buffers.tx, fraglen);
383 nr_ranges = fraglen / sizeof(*buf);
384
385 ret = ffa_host_share_ranges(ranges: buf, nranges: nr_ranges);
386 if (ret) {
387 /*
388 * We're effectively aborting the transaction, so we need
389 * to restore the global state back to what it was prior to
390 * transmission of the first fragment.
391 */
392 ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
393 WARN_ON(res->a0 != FFA_SUCCESS);
394 goto out_unlock;
395 }
396
397 ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
398 if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
399 WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
400
401out_unlock:
402 hyp_spin_unlock(&host_buffers.lock);
403out:
404 if (ret)
405 ffa_to_smccc_res(res, ret);
406
407 /*
408 * If for any reason this did not succeed, we're in trouble as we have
409 * now lost the content of the previous fragments and we can't rollback
410 * the host stage-2 changes. The pages previously marked as shared will
411 * remain stuck in that state forever, hence preventing the host from
412 * sharing/donating them again and may possibly lead to subsequent
413 * failures, but this will not compromise confidentiality.
414 */
415 return;
416}
417
418static __always_inline void do_ffa_mem_xfer(const u64 func_id,
419 struct arm_smccc_res *res,
420 struct kvm_cpu_context *ctxt)
421{
422 DECLARE_REG(u32, len, ctxt, 1);
423 DECLARE_REG(u32, fraglen, ctxt, 2);
424 DECLARE_REG(u64, addr_mbz, ctxt, 3);
425 DECLARE_REG(u32, npages_mbz, ctxt, 4);
426 struct ffa_mem_region_attributes *ep_mem_access;
427 struct ffa_composite_mem_region *reg;
428 struct ffa_mem_region *buf;
429 u32 offset, nr_ranges;
430 int ret = 0;
431
432 BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
433 func_id != FFA_FN64_MEM_LEND);
434
435 if (addr_mbz || npages_mbz || fraglen > len ||
436 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
437 ret = FFA_RET_INVALID_PARAMETERS;
438 goto out;
439 }
440
441 if (fraglen < sizeof(struct ffa_mem_region) +
442 sizeof(struct ffa_mem_region_attributes)) {
443 ret = FFA_RET_INVALID_PARAMETERS;
444 goto out;
445 }
446
447 hyp_spin_lock(&host_buffers.lock);
448 if (!host_buffers.tx) {
449 ret = FFA_RET_INVALID_PARAMETERS;
450 goto out_unlock;
451 }
452
453 buf = hyp_buffers.tx;
454 memcpy(buf, host_buffers.tx, fraglen);
455
456 ep_mem_access = (void *)buf +
457 ffa_mem_desc_offset(buf, count: 0, FFA_VERSION_1_0);
458 offset = ep_mem_access->composite_off;
459 if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
460 ret = FFA_RET_INVALID_PARAMETERS;
461 goto out_unlock;
462 }
463
464 if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
465 ret = FFA_RET_INVALID_PARAMETERS;
466 goto out_unlock;
467 }
468
469 reg = (void *)buf + offset;
470 nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
471 if (nr_ranges % sizeof(reg->constituents[0])) {
472 ret = FFA_RET_INVALID_PARAMETERS;
473 goto out_unlock;
474 }
475
476 nr_ranges /= sizeof(reg->constituents[0]);
477 ret = ffa_host_share_ranges(ranges: reg->constituents, nranges: nr_ranges);
478 if (ret)
479 goto out_unlock;
480
481 ffa_mem_xfer(res, func_id, len, fraglen);
482 if (fraglen != len) {
483 if (res->a0 != FFA_MEM_FRAG_RX)
484 goto err_unshare;
485
486 if (res->a3 != fraglen)
487 goto err_unshare;
488 } else if (res->a0 != FFA_SUCCESS) {
489 goto err_unshare;
490 }
491
492out_unlock:
493 hyp_spin_unlock(&host_buffers.lock);
494out:
495 if (ret)
496 ffa_to_smccc_res(res, ret);
497 return;
498
499err_unshare:
500 WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
501 goto out_unlock;
502}
503
504static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
505 struct kvm_cpu_context *ctxt)
506{
507 DECLARE_REG(u32, handle_lo, ctxt, 1);
508 DECLARE_REG(u32, handle_hi, ctxt, 2);
509 DECLARE_REG(u32, flags, ctxt, 3);
510 struct ffa_mem_region_attributes *ep_mem_access;
511 struct ffa_composite_mem_region *reg;
512 u32 offset, len, fraglen, fragoff;
513 struct ffa_mem_region *buf;
514 int ret = 0;
515 u64 handle;
516
517 handle = PACK_HANDLE(handle_lo, handle_hi);
518
519 hyp_spin_lock(&host_buffers.lock);
520
521 buf = hyp_buffers.tx;
522 *buf = (struct ffa_mem_region) {
523 .sender_id = HOST_FFA_ID,
524 .handle = handle,
525 };
526
527 ffa_retrieve_req(res, len: sizeof(*buf));
528 buf = hyp_buffers.rx;
529 if (res->a0 != FFA_MEM_RETRIEVE_RESP)
530 goto out_unlock;
531
532 len = res->a1;
533 fraglen = res->a2;
534
535 ep_mem_access = (void *)buf +
536 ffa_mem_desc_offset(buf, count: 0, FFA_VERSION_1_0);
537 offset = ep_mem_access->composite_off;
538 /*
539 * We can trust the SPMD to get this right, but let's at least
540 * check that we end up with something that doesn't look _completely_
541 * bogus.
542 */
543 if (WARN_ON(offset > len ||
544 fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
545 ret = FFA_RET_ABORTED;
546 goto out_unlock;
547 }
548
549 if (len > ffa_desc_buf.len) {
550 ret = FFA_RET_NO_MEMORY;
551 goto out_unlock;
552 }
553
554 buf = ffa_desc_buf.buf;
555 memcpy(buf, hyp_buffers.rx, fraglen);
556
557 for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
558 ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
559 if (res->a0 != FFA_MEM_FRAG_TX) {
560 ret = FFA_RET_INVALID_PARAMETERS;
561 goto out_unlock;
562 }
563
564 fraglen = res->a3;
565 memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
566 }
567
568 ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
569 if (res->a0 != FFA_SUCCESS)
570 goto out_unlock;
571
572 reg = (void *)buf + offset;
573 /* If the SPMD was happy, then we should be too. */
574 WARN_ON(ffa_host_unshare_ranges(reg->constituents,
575 reg->addr_range_cnt));
576out_unlock:
577 hyp_spin_unlock(&host_buffers.lock);
578
579 if (ret)
580 ffa_to_smccc_res(res, ret);
581}
582
583/*
584 * Is a given FFA function supported, either by forwarding on directly
585 * or by handling at EL2?
586 */
587static bool ffa_call_supported(u64 func_id)
588{
589 switch (func_id) {
590 /* Unsupported memory management calls */
591 case FFA_FN64_MEM_RETRIEVE_REQ:
592 case FFA_MEM_RETRIEVE_RESP:
593 case FFA_MEM_RELINQUISH:
594 case FFA_MEM_OP_PAUSE:
595 case FFA_MEM_OP_RESUME:
596 case FFA_MEM_FRAG_RX:
597 case FFA_FN64_MEM_DONATE:
598 /* Indirect message passing via RX/TX buffers */
599 case FFA_MSG_SEND:
600 case FFA_MSG_POLL:
601 case FFA_MSG_WAIT:
602 /* 32-bit variants of 64-bit calls */
603 case FFA_MSG_SEND_DIRECT_REQ:
604 case FFA_MSG_SEND_DIRECT_RESP:
605 case FFA_RXTX_MAP:
606 case FFA_MEM_DONATE:
607 case FFA_MEM_RETRIEVE_REQ:
608 return false;
609 }
610
611 return true;
612}
613
614static bool do_ffa_features(struct arm_smccc_res *res,
615 struct kvm_cpu_context *ctxt)
616{
617 DECLARE_REG(u32, id, ctxt, 1);
618 u64 prop = 0;
619 int ret = 0;
620
621 if (!ffa_call_supported(id)) {
622 ret = FFA_RET_NOT_SUPPORTED;
623 goto out_handled;
624 }
625
626 switch (id) {
627 case FFA_MEM_SHARE:
628 case FFA_FN64_MEM_SHARE:
629 case FFA_MEM_LEND:
630 case FFA_FN64_MEM_LEND:
631 ret = FFA_RET_SUCCESS;
632 prop = 0; /* No support for dynamic buffers */
633 goto out_handled;
634 default:
635 return false;
636 }
637
638out_handled:
639 ffa_to_smccc_res_prop(res, ret, prop);
640 return true;
641}
642
643bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
644{
645 struct arm_smccc_res res;
646
647 /*
648 * There's no way we can tell what a non-standard SMC call might
649 * be up to. Ideally, we would terminate these here and return
650 * an error to the host, but sadly devices make use of custom
651 * firmware calls for things like power management, debugging,
652 * RNG access and crash reporting.
653 *
654 * Given that the architecture requires us to trust EL3 anyway,
655 * we forward unrecognised calls on under the assumption that
656 * the firmware doesn't expose a mechanism to access arbitrary
657 * non-secure memory. Short of a per-device table of SMCs, this
658 * is the best we can do.
659 */
660 if (!is_ffa_call(func_id))
661 return false;
662
663 switch (func_id) {
664 case FFA_FEATURES:
665 if (!do_ffa_features(res: &res, ctxt: host_ctxt))
666 return false;
667 goto out_handled;
668 /* Memory management */
669 case FFA_FN64_RXTX_MAP:
670 do_ffa_rxtx_map(res: &res, ctxt: host_ctxt);
671 goto out_handled;
672 case FFA_RXTX_UNMAP:
673 do_ffa_rxtx_unmap(res: &res, ctxt: host_ctxt);
674 goto out_handled;
675 case FFA_MEM_SHARE:
676 case FFA_FN64_MEM_SHARE:
677 do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, res: &res, ctxt: host_ctxt);
678 goto out_handled;
679 case FFA_MEM_RECLAIM:
680 do_ffa_mem_reclaim(res: &res, ctxt: host_ctxt);
681 goto out_handled;
682 case FFA_MEM_LEND:
683 case FFA_FN64_MEM_LEND:
684 do_ffa_mem_xfer(FFA_FN64_MEM_LEND, res: &res, ctxt: host_ctxt);
685 goto out_handled;
686 case FFA_MEM_FRAG_TX:
687 do_ffa_mem_frag_tx(res: &res, ctxt: host_ctxt);
688 goto out_handled;
689 }
690
691 if (ffa_call_supported(func_id))
692 return false; /* Pass through */
693
694 ffa_to_smccc_error(res: &res, FFA_RET_NOT_SUPPORTED);
695out_handled:
696 ffa_set_retval(ctxt: host_ctxt, res: &res);
697 return true;
698}
699
700int hyp_ffa_init(void *pages)
701{
702 struct arm_smccc_res res;
703 size_t min_rxtx_sz;
704 void *tx, *rx;
705
706 if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
707 return 0;
708
709 arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
710 if (res.a0 == FFA_RET_NOT_SUPPORTED)
711 return 0;
712
713 /*
714 * Firmware returns the maximum supported version of the FF-A
715 * implementation. Check that the returned version is
716 * backwards-compatible with the hyp according to the rules in DEN0077A
717 * v1.1 REL0 13.2.1.
718 *
719 * Of course, things are never simple when dealing with firmware. v1.1
720 * broke ABI with v1.0 on several structures, which is itself
721 * incompatible with the aforementioned versioning scheme. The
722 * expectation is that v1.x implementations that do not support the v1.0
723 * ABI return NOT_SUPPORTED rather than a version number, according to
724 * DEN0077A v1.1 REL0 18.6.4.
725 */
726 if (FFA_MAJOR_VERSION(res.a0) != 1)
727 return -EOPNOTSUPP;
728
729 arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
730 if (res.a0 != FFA_SUCCESS)
731 return -EOPNOTSUPP;
732
733 if (res.a2 != HOST_FFA_ID)
734 return -EINVAL;
735
736 arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
737 0, 0, 0, 0, 0, 0, &res);
738 if (res.a0 != FFA_SUCCESS)
739 return -EOPNOTSUPP;
740
741 switch (res.a2) {
742 case FFA_FEAT_RXTX_MIN_SZ_4K:
743 min_rxtx_sz = SZ_4K;
744 break;
745 case FFA_FEAT_RXTX_MIN_SZ_16K:
746 min_rxtx_sz = SZ_16K;
747 break;
748 case FFA_FEAT_RXTX_MIN_SZ_64K:
749 min_rxtx_sz = SZ_64K;
750 break;
751 default:
752 return -EINVAL;
753 }
754
755 if (min_rxtx_sz > PAGE_SIZE)
756 return -EOPNOTSUPP;
757
758 tx = pages;
759 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
760 rx = pages;
761 pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
762
763 ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
764 .buf = pages,
765 .len = PAGE_SIZE *
766 (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
767 };
768
769 hyp_buffers = (struct kvm_ffa_buffers) {
770 .lock = __HYP_SPIN_LOCK_UNLOCKED,
771 .tx = tx,
772 .rx = rx,
773 };
774
775 host_buffers = (struct kvm_ffa_buffers) {
776 .lock = __HYP_SPIN_LOCK_UNLOCKED,
777 };
778
779 return 0;
780}
781

source code of linux/arch/arm64/kvm/hyp/nvhe/ffa.c