feat(indirect message): add FFA_RXTX_UNMAP forwarding to SPMC
FFA_RXTX_UNMAP has to be forwarded to SPMC to allow it to unmap VM's
buffers, mapped through FFA_RXTX_MAP forwarding.
Change-Id: Ieb8dc04cc61b197023da0f5aa3612cf5a649d9c8
Signed-off-by: Federico Recanati <federico.recanati@arm.com>
diff --git a/src/api.c b/src/api.c
index 07b23b1..54988fc 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1354,20 +1354,28 @@
{
struct vm *vm = current->vm;
struct vm_locked vm_locked;
+ ffa_vm_id_t owner_vm_id;
struct mm_stage1_locked mm_stage1_locked;
paddr_t send_pa_begin;
paddr_t send_pa_end;
paddr_t recv_pa_begin;
paddr_t recv_pa_end;
+ struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
- /*
- * Check there is a buffer pair registered on behalf of the caller.
- * Since forwarding is not yet supported the allocator ID MBZ.
- */
- if (allocator_id != 0) {
- dlog_error(
- "Forwarding MAP/UNMAP from the hypervisor is not yet "
- "supported so vm id must be zero.\n");
+ /* Ensure `allocator_id` is set only at Non-Secure Physical instance. */
+ if (vm_id_is_current_world(vm->id) && (allocator_id != 0)) {
+ dlog_error("`allocator_id` must be 0 at virtual instances.\n");
+ return ffa_error(FFA_INVALID_PARAMETERS);
+ }
+
+ /* VM ID of which buffers have to be unmapped. */
+ owner_vm_id = (allocator_id != 0) ? allocator_id : vm->id;
+
+ vm_locked = plat_ffa_vm_find_locked(owner_vm_id);
+ vm = vm_locked.vm;
+ if (vm == NULL) {
+ dlog_error("Cannot unmap RX/TX for VM ID %#x, not found.\n",
+ owner_vm_id);
return ffa_error(FFA_INVALID_PARAMETERS);
}
@@ -1375,7 +1383,8 @@
if (vm->mailbox.send == NULL || vm->mailbox.recv == NULL) {
dlog_verbose(
"No buffer pair registered on behalf of the caller.\n");
- return ffa_error(FFA_INVALID_PARAMETERS);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
}
/* Currently a mailbox size of 1 page is assumed. */
@@ -1384,21 +1393,23 @@
recv_pa_begin = pa_from_va(va_from_ptr(vm->mailbox.recv));
recv_pa_end = pa_add(recv_pa_begin, HF_MAILBOX_SIZE);
- vm_locked = vm_lock(vm);
mm_stage1_locked = mm_lock_stage1();
- /*
- * Set the memory region of the buffers back to the default mode
- * for the VM. Since this memory region was already mapped for the
- * RXTX buffers we can safely remap them.
- */
- CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
- MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
- NULL));
+ /* Reset stage 2 mapping only for virtual FF-A instances. */
+ if (vm_id_is_current_world(owner_vm_id)) {
+ /*
+ * Set the memory region of the buffers back to the default mode
+ * for the VM. Since this memory region was already mapped for
+ * the RXTX buffers we can safely remap them.
+ */
+ CHECK(vm_identity_map(vm_locked, send_pa_begin, send_pa_end,
+ MM_MODE_R | MM_MODE_W | MM_MODE_X,
+ &api_page_pool, NULL));
- CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
- MM_MODE_R | MM_MODE_W | MM_MODE_X, &api_page_pool,
- NULL));
+ CHECK(vm_identity_map(vm_locked, recv_pa_begin, recv_pa_end,
+ MM_MODE_R | MM_MODE_W | MM_MODE_X,
+ &api_page_pool, NULL));
+ }
/* Unmap the buffers in the partition manager. */
CHECK(mm_unmap(mm_stage1_locked, send_pa_begin, send_pa_end,
@@ -1409,10 +1420,15 @@
vm->mailbox.send = NULL;
vm->mailbox.recv = NULL;
+ /* Forward buffer unmapping to SPMC if coming from a VM. */
+ plat_ffa_rxtx_unmap_forward(owner_vm_id);
+
mm_unlock_stage1(&mm_stage1_locked);
+
+out:
vm_unlock(&vm_locked);
- return (struct ffa_value){.func = FFA_SUCCESS_32};
+ return ret;
}
/**
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index c8e7d60..1a759f1 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -104,6 +104,11 @@
(void)vm_locked;
}
+void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+{
+ (void)id;
+}
+
bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index c64e0c6..6dcb117 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -562,6 +562,30 @@
plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
}
+void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+{
+ struct ffa_value ret;
+ uint64_t func;
+
+ if (!ffa_tee_enabled) {
+ return;
+ }
+
+ /* Hypervisor always forwards forward RXTX_UNMAP to SPMC. */
+ ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_RXTX_UNMAP_32, .arg1 = id << 16});
+ func = ret.func & ~SMCCC_CONVENTION_MASK;
+ if (ret.func == SMCCC_ERROR_UNKNOWN) {
+ panic("Unknown error forwarding RXTX_UNMAP.\n");
+ } else if (func == FFA_ERROR_32) {
+ panic("Error %d forwarding RX/TX buffers.\n", ret.arg2);
+ } else if (func != FFA_SUCCESS_32) {
+ panic("Unexpected function %#x returned forwarding RX/TX "
+ "buffers.",
+ ret.func);
+ }
+}
+
bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 2ba25d4..7cfbbd9 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -415,6 +415,11 @@
(void)vm_locked;
}
+void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+{
+ (void)id;
+}
+
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
ffa_vm_id_t receiver_id, uint32_t flags)
{
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index ab84b72..830544a 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -79,6 +79,11 @@
return false;
}
+void plat_ffa_rxtx_unmap_forward(ffa_vm_id_t id)
+{
+ (void)id;
+}
+
bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)