feat(indirect message): add FFA_RXTX_MAP forwarding to SPMC
Hypervisor must forward FFA_RXTX_MAP ABI to SPMC, to allow it
to map RX/TX buffers in its stage 1 translation level.
Mailbox forwarding is needed for indirect messages with SPs, the SPMC
is in charge of message delivery when one of the two endpoints is an SP.
Change-Id: I3714a7203cb23cc3af7e613f3eb76c14648790ff
Signed-off-by: Federico Recanati <federico.recanati@arm.com>
diff --git a/src/api.c b/src/api.c
index 8ce12c0..07b23b1 100644
--- a/src/api.c
+++ b/src/api.c
@@ -1103,8 +1103,8 @@
paddr_t pa_send_end;
paddr_t pa_recv_begin;
paddr_t pa_recv_end;
- uint32_t orig_send_mode;
- uint32_t orig_recv_mode;
+ uint32_t orig_send_mode = 0;
+ uint32_t orig_recv_mode = 0;
uint32_t extra_attributes;
/* We only allow these to be setup once. */
@@ -1138,51 +1138,63 @@
goto out;
}
- /*
- * Ensure the pages are valid, owned and exclusive to the VM and that
- * the VM has the required access to the memory.
- */
- if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
- &orig_send_mode) ||
- !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
- (orig_send_mode & MM_MODE_R) == 0 ||
- (orig_send_mode & MM_MODE_W) == 0) {
- ret = ffa_error(FFA_INVALID_PARAMETERS);
- goto out;
- }
+ /* Set stage 2 translation tables only for virtual FF-A instances. */
+ if (vm_id_is_current_world(vm_locked.vm->id)) {
+ /*
+ * Ensure the pages are valid, owned and exclusive to the VM and
+ * that the VM has the required access to the memory.
+ */
+ if (!vm_mem_get_mode(vm_locked, send, ipa_add(send, PAGE_SIZE),
+ &orig_send_mode) ||
+ !api_mode_valid_owned_and_exclusive(orig_send_mode) ||
+ (orig_send_mode & MM_MODE_R) == 0 ||
+ (orig_send_mode & MM_MODE_W) == 0) {
+ dlog_error(
+ "VM doesn't have required access rights to map "
+ "TX buffer in stage 2.\n");
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
- if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
- &orig_recv_mode) ||
- !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
- (orig_recv_mode & MM_MODE_R) == 0) {
- ret = ffa_error(FFA_INVALID_PARAMETERS);
- goto out;
- }
+ if (!vm_mem_get_mode(vm_locked, recv, ipa_add(recv, PAGE_SIZE),
+ &orig_recv_mode) ||
+ !api_mode_valid_owned_and_exclusive(orig_recv_mode) ||
+ (orig_recv_mode & MM_MODE_R) == 0) {
+ dlog_error(
+ "VM doesn't have required access rights to map "
+ "RX buffer in stage 2.\n");
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
- /* Take memory ownership away from the VM and mark as shared. */
- uint32_t mode =
- MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R | MM_MODE_W;
- if (vm_locked.vm->el0_partition) {
- mode |= MM_MODE_USER | MM_MODE_NG;
- }
+ /* Take memory ownership away from the VM and mark as shared. */
+ uint32_t mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R |
+ MM_MODE_W;
+ if (vm_locked.vm->el0_partition) {
+ mode |= MM_MODE_USER | MM_MODE_NG;
+ }
- if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end, mode,
- local_page_pool, NULL)) {
- ret = ffa_error(FFA_NO_MEMORY);
- goto out;
- }
+ if (!vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
+ mode, local_page_pool, NULL)) {
+ dlog_error(
+ "Cannot allocate a new entry in stage 2 "
+ "translation table.\n");
+ ret = ffa_error(FFA_NO_MEMORY);
+ goto out;
+ }
- mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
- if (vm_locked.vm->el0_partition) {
- mode |= MM_MODE_USER | MM_MODE_NG;
- }
+ mode = MM_MODE_UNOWNED | MM_MODE_SHARED | MM_MODE_R;
+ if (vm_locked.vm->el0_partition) {
+ mode |= MM_MODE_USER | MM_MODE_NG;
+ }
- if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end, mode,
- local_page_pool, NULL)) {
- /* TODO: partial defrag of failed range. */
- /* Recover any memory consumed in failed mapping. */
- vm_ptable_defrag(vm_locked, local_page_pool);
- goto fail_undo_send;
+ if (!vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
+ mode, local_page_pool, NULL)) {
+ /* TODO: partial defrag of failed range. */
+ /* Recover any memory consumed in failed mapping. */
+ vm_ptable_defrag(vm_locked, local_page_pool);
+ goto fail_undo_send;
+ }
}
/* Get extra send/recv pages mapping attributes for the given VM ID. */
@@ -1214,7 +1226,7 @@
fail_undo_send_and_recv:
CHECK(vm_identity_map(vm_locked, pa_recv_begin, pa_recv_end,
- orig_send_mode, local_page_pool, NULL));
+ orig_recv_mode, local_page_pool, NULL));
fail_undo_send:
CHECK(vm_identity_map(vm_locked, pa_send_begin, pa_send_end,
@@ -1225,6 +1237,35 @@
return ret;
}
+static void api_get_rxtx_description(struct vm_locked vm_locked, ipaddr_t *send,
+ ipaddr_t *recv, uint32_t *page_count,
+ ffa_vm_id_t *owner_vm_id)
+{
+ /*
+ * If the message has been forwarded the effective addresses are in
+ * hypervisor's TX buffer.
+ */
+ bool forwarded = (vm_locked.vm->id == HF_OTHER_WORLD_ID) &&
+ (ipa_addr(*send) == 0) && (ipa_addr(*recv) == 0) &&
+ (*page_count == 0);
+
+ if (forwarded) {
+ struct ffa_endpoint_rx_tx_descriptor *endpoint_desc =
+ (struct ffa_endpoint_rx_tx_descriptor *)
+ vm_locked.vm->mailbox.send;
+ struct ffa_composite_memory_region *rx_region =
+ ffa_enpoint_get_rx_memory_region(endpoint_desc);
+ struct ffa_composite_memory_region *tx_region =
+ ffa_enpoint_get_tx_memory_region(endpoint_desc);
+
+ *owner_vm_id = endpoint_desc->endpoint_id;
+ *recv = ipa_init(rx_region->constituents[0].address);
+ *send = ipa_init(tx_region->constituents[0].address);
+ *page_count = rx_region->constituents[0].page_count;
+ } else {
+ *owner_vm_id = vm_locked.vm->id;
+ }
+}
/**
* Configures the VM to send/receive data through the specified pages. The pages
* must not be shared. Locking of the page tables combined with a local memory
@@ -1247,8 +1288,26 @@
struct vm *vm = current->vm;
struct ffa_value ret;
struct vm_locked vm_locked;
+ struct vm_locked owner_vm_locked;
struct mm_stage1_locked mm_stage1_locked;
struct mpool local_page_pool;
+ ffa_vm_id_t owner_vm_id;
+
+ vm_locked = vm_lock(vm);
+ /*
+ * Get the original buffer addresses and VM ID in case of forwarded
+ * message.
+ */
+ api_get_rxtx_description(vm_locked, &send, &recv, &page_count,
+ &owner_vm_id);
+ vm_unlock(&vm_locked);
+
+ owner_vm_locked = plat_ffa_vm_find_locked_create(owner_vm_id);
+ if (owner_vm_locked.vm == NULL) {
+ dlog_error("Cannot map RX/TX for VM ID %#x, not found.\n",
+ owner_vm_id);
+ return ffa_error(FFA_DENIED);
+ }
/*
* Create a local pool so any freed memory can't be used by another
@@ -1257,22 +1316,23 @@
*/
mpool_init_with_fallback(&local_page_pool, &api_page_pool);
- vm_locked = vm_lock(vm);
mm_stage1_locked = mm_lock_stage1();
- ret = api_vm_configure_pages(mm_stage1_locked, vm_locked, send, recv,
- page_count, &local_page_pool);
+ ret = api_vm_configure_pages(mm_stage1_locked, owner_vm_locked, send,
+ recv, page_count, &local_page_pool);
if (ret.func != FFA_SUCCESS_32) {
goto exit;
}
+ /* Forward buffer mapping to SPMC if coming from a VM. */
+ plat_ffa_rxtx_map_forward(owner_vm_locked);
+
ret = (struct ffa_value){.func = FFA_SUCCESS_32};
exit:
mpool_fini(&local_page_pool);
-
mm_unlock_stage1(&mm_stage1_locked);
- vm_unlock(&vm_locked);
+ vm_unlock(&owner_vm_locked);
return ret;
}
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 64adfef..8ee65ca 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -956,7 +956,8 @@
*/
uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id)
{
- return (id == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS : 0;
+ return ((id & HF_VM_ID_WORLD_MASK) == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS
+ : 0;
}
/**
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index b7ce7df..c8e7d60 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -99,6 +99,11 @@
return false;
}
+void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+{
+ (void)vm_locked;
+}
+
bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
@@ -236,6 +241,12 @@
return (struct vm_locked){.vm = NULL};
}
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+{
+ (void)vm_id;
+ return (struct vm_locked){.vm = NULL};
+}
+
bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
{
(void)vm_id;
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 7a7ec5e..c64e0c6 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -62,11 +62,22 @@
dlog_info("Initializing Hafnium (Hypervisor)\n");
}
+static void plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send,
+ uint64_t page_count)
+{
+ struct ffa_value ret;
+
+ ret = arch_other_world_call((struct ffa_value){.func = FFA_RXTX_MAP_64,
+ .arg1 = pa_addr(recv),
+ .arg2 = pa_addr(send),
+ .arg3 = page_count});
+ CHECK(ret.func == FFA_SUCCESS_32);
+}
+
void plat_ffa_init(bool tee_enabled)
{
struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID);
struct ffa_value ret;
- uint32_t func;
if (!tee_enabled) {
return;
@@ -97,27 +108,10 @@
* perspective and vice-versa.
*/
dlog_verbose("Setting up buffers for TEE.\n");
- ret = arch_other_world_call((struct ffa_value){
- .func = FFA_RXTX_MAP_64,
- .arg1 = pa_addr(
- pa_from_va(va_from_ptr(other_world_vm->mailbox.recv))),
- .arg2 = pa_addr(
- pa_from_va(va_from_ptr(other_world_vm->mailbox.send))),
- .arg3 = HF_MAILBOX_SIZE / FFA_PAGE_SIZE});
- func = ret.func & ~SMCCC_CONVENTION_MASK;
- if (ret.func == SMCCC_ERROR_UNKNOWN) {
- dlog_error(
- "Unknown function setting up TEE message buffers. "
- "Memory sharing with TEE will not work.\n");
- return;
- }
- if (func == FFA_ERROR_32) {
- panic("Error %d setting up TEE message buffers.", ret.arg2);
- } else if (func != FFA_SUCCESS_32) {
- panic("Unexpected function %#x returned setting up TEE message "
- "buffers.",
- ret.func);
- }
+ plat_ffa_rxtx_map_spmc(
+ pa_from_va(va_from_ptr(other_world_vm->mailbox.recv)),
+ pa_from_va(va_from_ptr(other_world_vm->mailbox.send)),
+ HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
ffa_tee_enabled = true;
@@ -420,6 +414,11 @@
return (struct vm_locked){.vm = NULL};
}
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+{
+ return plat_ffa_vm_find_locked(vm_id);
+}
+
bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
{
return vm_id_is_current_world(vm_id);
@@ -536,6 +535,33 @@
return false;
}
+void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+{
+ struct vm *vm = vm_locked.vm;
+ struct vm *other_world;
+
+ if (!ffa_tee_enabled) {
+ return;
+ }
+
+ if (vm->ffa_version < MAKE_FFA_VERSION(1, 1)) {
+ return;
+ }
+
+ /* Hypervisor always forward the call to the SPMC. */
+
+ other_world = vm_find(HF_OTHER_WORLD_ID);
+
+ /* Fill the buffers descriptor in SPMC's RX buffer. */
+ ffa_endpoint_rx_tx_descriptor_init(
+ (struct ffa_endpoint_rx_tx_descriptor *)
+ other_world->mailbox.recv,
+ vm->id, (uintptr_t)vm->mailbox.recv,
+ (uintptr_t)vm->mailbox.send);
+
+ plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+}
+
bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index e6555b1..2ba25d4 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -410,6 +410,11 @@
return false;
}
+void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+{
+ (void)vm_locked;
+}
+
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
ffa_vm_id_t receiver_id, uint32_t flags)
{
@@ -519,6 +524,15 @@
return to_ret_locked;
}
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+{
+ if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
+ return vm_find_locked(vm_id);
+ }
+
+ return plat_ffa_nwd_vm_create(vm_id);
+}
+
struct ffa_value plat_ffa_notifications_bitmap_create(
ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
{
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index f4bbd36..ab84b72 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -137,6 +137,11 @@
return false;
}
+void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+{
+ (void)vm_locked;
+}
+
ffa_partition_properties_t plat_ffa_partition_properties(
ffa_vm_id_t vm_id, const struct vm *target)
{
@@ -229,6 +234,12 @@
return (struct vm_locked){.vm = NULL};
}
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+{
+ (void)vm_id;
+ return (struct vm_locked){.vm = NULL};
+}
+
bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
{
(void)vm_id;