feat(memory share): continue memory retrieve between worlds
Hypervisor can forward retrieve requests to the SPMC in two cases:
- For the retrieve request from the hypervisor, as part of its handling
of the FFA_MEM_RECLAIM to obtain the full memory descriptor, validate it
and conclude the operation.
- For the retrieve request from a NWd VM borrower.
The SPMC distinguishes both cases when the current VM is the
`other_world_vm`. For the first case the retrieve request would have
followed a specific format, whose response needs to be fragmented.
In this case the SPMC would increment the number of fragments retrieved
by the hypervisor. At the handling of FFA_MEM_FRAG_RX, if the counting
of fragments retrieved by the hypervisor is not zero, the SPMC considers
it needs to continue the retrieve operation from the hypervisor,
otherwise continues the retrieve request from the NWd VM.
Change-Id: I4a7ec597d6629fadbc16731037c6015023b19359
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 658ed7c..461fdd3 100644
--- a/src/api.c
+++ b/src/api.c
@@ -3009,7 +3009,7 @@
* Can't retrieve memory information if the mailbox is not
* available.
*/
- dlog_verbose("RX buffer not ready.\n");
+ dlog_verbose("%s: RX buffer not ready.\n", __func__);
ret = ffa_error(FFA_BUSY);
goto out;
}
@@ -3117,8 +3117,11 @@
struct ffa_value ret;
/* Sender ID MBZ at virtual instance. */
- if (sender_vm_id != 0) {
- return ffa_error(FFA_INVALID_PARAMETERS);
+ if (vm_id_is_current_world(to->id)) {
+ if (sender_vm_id != 0) {
+ dlog_verbose("%s: Invalid sender.\n", __func__);
+ return ffa_error(FFA_INVALID_PARAMETERS);
+ }
}
to_locked = vm_lock(to);
@@ -3128,14 +3131,14 @@
* Can't retrieve memory information if the mailbox is not
* available.
*/
- dlog_verbose("RX buffer not ready.\n");
+ dlog_verbose("%s: RX buffer not ready partition %x.\n",
+ __func__, to_locked.vm->id);
ret = ffa_error(FFA_BUSY);
goto out;
}
ret = ffa_memory_retrieve_continue(to_locked, handle, fragment_offset,
- &api_page_pool);
-
+ sender_vm_id, &api_page_pool);
out:
vm_unlock(&to_locked);
return ret;
@@ -3168,6 +3171,7 @@
sl_unlock(&from->lock);
if (from_msg == NULL) {
+ dlog_verbose("Mailbox from %x is not set.\n", from->id);
return ffa_error(FFA_INVALID_PARAMETERS);
}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 3ff5103..6d847cf 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -1414,6 +1414,7 @@
uint32_t length;
uint32_t fragment_length;
uint32_t fragment_offset;
+ struct ffa_memory_region *retrieved;
CHECK(request_length <= HF_MAILBOX_SIZE);
CHECK(from_locked.vm->id == HF_OTHER_WORLD_ID);
@@ -1453,14 +1454,27 @@
sizeof(other_world_retrieve_buffer),
from_locked.vm->mailbox.send, fragment_length);
+ retrieved = (struct ffa_memory_region *)other_world_retrieve_buffer;
+
+ /* Hypervisor always forward VM's RX_RELEASE to SPMC. */
+ other_world_ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_RX_RELEASE_32, .arg1 = HF_HYPERVISOR_VM_ID});
+ assert(other_world_ret.func == FFA_SUCCESS_32);
+
/* Fetch the remaining fragments into the same buffer. */
fragment_offset = fragment_length;
while (fragment_offset < length) {
- other_world_ret = arch_other_world_call(
- (struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
- .arg1 = (uint32_t)handle,
- .arg2 = (uint32_t)(handle >> 32),
- .arg3 = fragment_offset});
+ /*
+ * Request the next fragment, and provide the sender ID,
+ * it is expected to be part of FFA_MEM_FRAG_RX_32 at
+ * physical FF-A instance.
+ */
+ other_world_ret = arch_other_world_call((struct ffa_value){
+ .func = FFA_MEM_FRAG_RX_32,
+ .arg1 = (uint32_t)handle,
+ .arg2 = (uint32_t)(handle >> 32),
+ .arg3 = fragment_offset,
+ .arg4 = (uint32_t)retrieved->sender << 16});
if (other_world_ret.func != FFA_MEM_FRAG_TX_32) {
dlog_verbose(
"Got %#x (%d) from other world in response to "
@@ -1498,6 +1512,10 @@
from_locked.vm->mailbox.send, fragment_length);
fragment_offset += fragment_length;
+ other_world_ret = arch_other_world_call(
+ (struct ffa_value){.func = FFA_RX_RELEASE_32,
+ .arg1 = HF_HYPERVISOR_VM_ID});
+ assert(other_world_ret.func == FFA_SUCCESS_32);
}
*memory_region =
@@ -1534,7 +1552,6 @@
}
assert(memory_region != NULL);
-
if (memory_region->receiver_count != 1) {
/* Only one receiver supported by Hafnium for now. */
dlog_verbose(
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index c95379c..774239f 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -584,8 +584,11 @@
* with `state_mask`, as a result of the function
* `ffa_send_check_transition`.
*/
- assert((orig_to_mode & (MM_MODE_INVALID | MM_MODE_UNOWNED |
- MM_MODE_SHARED)) != 0U);
+ if (vm_id_is_current_world(to.vm->id)) {
+ assert((orig_to_mode &
+ (MM_MODE_INVALID | MM_MODE_UNOWNED |
+ MM_MODE_SHARED)) != 0U);
+ }
} else {
/*
* If the retriever is from virtual FF-A instance:
@@ -1951,7 +1954,7 @@
dlog_verbose(
"Incorrect receiver VM ID %d for "
"FFA_MEM_RETRIEVE_REQ, for handle %#x.\n",
- receiver_id, handle);
+ receiver_id, memory_region->handle);
return ffa_error(FFA_INVALID_PARAMETERS);
}
@@ -2214,7 +2217,6 @@
ret = (struct ffa_value){.func = FFA_MEM_RETRIEVE_RESP_32,
.arg1 = total_length,
.arg2 = fragment_length};
-
out:
share_states_unlock(&share_states);
dump_share_states();
@@ -2224,6 +2226,7 @@
struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
ffa_memory_handle_t handle,
uint32_t fragment_offset,
+ ffa_vm_id_t sender_vm_id,
struct mpool *page_pool)
{
struct ffa_memory_region *memory_region;
@@ -2237,6 +2240,7 @@
uint32_t remaining_constituent_count;
uint32_t fragment_length;
uint32_t receiver_index;
+ bool continue_ffa_hyp_mem_retrieve_req;
dump_share_states();
@@ -2251,18 +2255,6 @@
memory_region = share_state->memory_region;
CHECK(memory_region != NULL);
- receiver_index =
- ffa_memory_region_get_receiver(memory_region, to_locked.vm->id);
-
- if (receiver_index == memory_region->receiver_count) {
- dlog_verbose(
- "Caller of FFA_MEM_FRAG_RX (%x) is not a borrower to "
- "memory sharing transaction (%x)\n",
- to_locked.vm->id, handle);
- ret = ffa_error(FFA_INVALID_PARAMETERS);
- goto out;
- }
-
if (!share_state->sending_complete) {
dlog_verbose(
"Memory with handle %#x not fully sent, can't "
@@ -2272,20 +2264,72 @@
goto out;
}
- if (share_state->retrieved_fragment_count[receiver_index] == 0 ||
- share_state->retrieved_fragment_count[receiver_index] >=
- share_state->fragment_count) {
- dlog_verbose(
- "Retrieval of memory with handle %#x not yet started "
- "or already completed (%d/%d fragments retrieved).\n",
- handle,
- share_state->retrieved_fragment_count[receiver_index],
- share_state->fragment_count);
- ret = ffa_error(FFA_INVALID_PARAMETERS);
- goto out;
- }
+ /*
+ * If retrieve request from the hypervisor has been initiated in the
+ * given share_state, continue it, else assume it is a continuation of
+ * retrieve request from a NWd VM.
+ */
+ continue_ffa_hyp_mem_retrieve_req =
+ (to_locked.vm->id == HF_HYPERVISOR_VM_ID) &&
+ (share_state->hypervisor_fragment_count != 0U) &&
+ plat_ffa_is_vm_id(sender_vm_id);
- fragment_index = share_state->retrieved_fragment_count[receiver_index];
+ if (!continue_ffa_hyp_mem_retrieve_req) {
+ receiver_index = ffa_memory_region_get_receiver(
+ memory_region, to_locked.vm->id);
+
+ if (receiver_index == memory_region->receiver_count) {
+ dlog_verbose(
+ "Caller of FFA_MEM_FRAG_RX (%x) is not a "
+ "borrower to memory sharing transaction (%x)\n",
+ to_locked.vm->id, handle);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ if (share_state->retrieved_fragment_count[receiver_index] ==
+ 0 ||
+ share_state->retrieved_fragment_count[receiver_index] >=
+ share_state->fragment_count) {
+ dlog_verbose(
+ "Retrieval of memory with handle %#x not yet "
+ "started or already completed (%d/%d fragments "
+ "retrieved).\n",
+ handle,
+ share_state->retrieved_fragment_count
+ [receiver_index],
+ share_state->fragment_count);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ fragment_index =
+ share_state->retrieved_fragment_count[receiver_index];
+ } else {
+ if (share_state->hypervisor_fragment_count == 0 ||
+ share_state->hypervisor_fragment_count >=
+ share_state->fragment_count) {
+ dlog_verbose(
+ "Retrieve of memory with handle %x not "
+ "started from hypervisor.\n",
+ handle);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ if (memory_region->sender != sender_vm_id) {
+ dlog_verbose(
+ "Sender ID (%x) is not as expected for memory "
+ "handle %x\n",
+ sender_vm_id, handle);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out;
+ }
+
+ fragment_index = share_state->hypervisor_fragment_count;
+
+ receiver_index = 0;
+ }
/*
* Check that the given fragment offset is correct by counting
@@ -2313,6 +2357,9 @@
goto out;
}
+ /* VMs acquire the RX buffer from SPMC. */
+ CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
+
remaining_constituent_count = ffa_memory_fragment_init(
to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE,
share_state->fragments[fragment_index],
@@ -2323,13 +2370,19 @@
to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
- share_state->retrieved_fragment_count[receiver_index]++;
- if (share_state->retrieved_fragment_count[receiver_index] ==
- share_state->fragment_count) {
- ffa_memory_retrieve_complete(share_states, share_state,
- page_pool);
- }
+ if (!continue_ffa_hyp_mem_retrieve_req) {
+ share_state->retrieved_fragment_count[receiver_index]++;
+ if (share_state->retrieved_fragment_count[receiver_index] ==
+ share_state->fragment_count) {
+ ffa_memory_retrieve_complete(share_states, share_state,
+ page_pool);
+ }
+ } else {
+ share_state->hypervisor_fragment_count++;
+
+ ffa_memory_retrieve_complete_from_hyp(share_state);
+ }
ret = (struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
.arg1 = (uint32_t)handle,
.arg2 = (uint32_t)(handle >> 32),