diff options
author | J-Alves <joao.alves@arm.com> | 2022-07-19 13:16:31 +0100 |
---|---|---|
committer | Olivier Deprez <olivier.deprez@arm.com> | 2023-01-18 14:41:09 +0100 |
commit | fdd29277caf2088553231fec89629b13cf7a24e3 (patch) | |
tree | 45aec0e75892e2302f767c16f8491a3c124a454f | |
parent | b5084cf6f0235c86e8c4e1599b3f26ad299a59ad (diff) | |
download | hafnium-fdd29277caf2088553231fec89629b13cf7a24e3.tar.gz |
refactor(memory share): continue memory sharing
Continue memory sharing operation between NWd and SWd.
If the handle is allocated by the SPMC, the hypervisor forward the
FFA_MEM_FRAG_TX to the SWd.
The SPMC can only handle calls to FFA_MEM_FRAG_TX ABI if it relates to a
handle allocated by the SPMC. Otherwise returns error.
Change-Id: I9801f1fd5fdc84af7317ac7570c428ece5da53f6
Signed-off-by: J-Alves <joao.alves@arm.com>
-rw-r--r-- | inc/hf/arch/plat/ffa.h | 8 | ||||
-rw-r--r-- | inc/hf/ffa_memory.h | 4 | ||||
-rw-r--r-- | inc/hf/ffa_memory_internal.h | 38 | ||||
-rw-r--r-- | src/api.c | 31 | ||||
-rw-r--r-- | src/arch/aarch64/plat/ffa/absent.c | 13 | ||||
-rw-r--r-- | src/arch/aarch64/plat/ffa/hypervisor.c | 250 | ||||
-rw-r--r-- | src/arch/aarch64/plat/ffa/spmc.c | 13 | ||||
-rw-r--r-- | src/arch/fake/hypervisor/ffa.c | 13 | ||||
-rw-r--r-- | src/ffa_memory.c | 259 | ||||
-rw-r--r-- | test/vmapi/common/ffa.c | 2 |
10 files changed, 359 insertions, 272 deletions
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h index d8e68449e..ff28b0705 100644 --- a/inc/hf/arch/plat/ffa.h +++ b/inc/hf/arch/plat/ffa.h @@ -342,3 +342,11 @@ struct ffa_value plat_ffa_other_world_mem_reclaim( struct ffa_value plat_ffa_other_world_mem_retrieve( struct vm_locked to_locked, struct ffa_memory_region *retrieve_request, uint32_t length, struct mpool *page_pool); + +/** + * Handles the continuation of the memory send operation in case the memory + * region descriptor contains multiple segments. + */ +struct ffa_value plat_ffa_other_world_mem_send_continue( + struct vm *from, void *fragment, uint32_t fragment_length, + ffa_memory_handle_t handle, struct mpool *page_pool); diff --git a/inc/hf/ffa_memory.h b/inc/hf/ffa_memory.h index 1fccfbfc5..36a325bec 100644 --- a/inc/hf/ffa_memory.h +++ b/inc/hf/ffa_memory.h @@ -23,10 +23,6 @@ struct ffa_value ffa_memory_send_continue(struct vm_locked from_locked, uint32_t fragment_length, ffa_memory_handle_t handle, struct mpool *page_pool); -struct ffa_value ffa_memory_other_world_send_continue( - struct vm_locked from_locked, struct vm_locked to_locked, - void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle, - struct mpool *page_pool); struct ffa_value ffa_memory_retrieve(struct vm_locked to_locked, struct ffa_memory_region *retrieve_request, uint32_t retrieve_request_length, diff --git a/inc/hf/ffa_memory_internal.h b/inc/hf/ffa_memory_internal.h index be5897d5c..dbf615829 100644 --- a/inc/hf/ffa_memory_internal.h +++ b/inc/hf/ffa_memory_internal.h @@ -105,6 +105,15 @@ bool allocate_share_state(struct share_states_locked share_states, struct ffa_memory_share_state **share_state_ret); struct share_states_locked share_states_lock(void); void share_states_unlock(struct share_states_locked *share_states); +void share_state_free(struct share_states_locked share_states, + struct ffa_memory_share_state *share_state, + struct mpool *page_pool); +uint32_t share_state_next_fragment_offset( + struct share_states_locked share_states, + struct ffa_memory_share_state *share_state); +/** Checks whether the given share state has been fully sent. */ +bool share_state_sending_complete(struct share_states_locked share_states, + struct ffa_memory_share_state *share_state); void dump_share_states(void); /** @@ -136,21 +145,38 @@ struct ffa_value ffa_send_check_update( uint32_t share_func, struct ffa_memory_access *receivers, uint32_t receivers_count, struct mpool *page_pool, bool clear, uint32_t *orig_from_mode_ret); -bool ffa_region_group_identity_map( - struct vm_locked vm_locked, +struct ffa_value ffa_memory_send_complete( + struct vm_locked from_locked, struct share_states_locked share_states, + struct ffa_memory_share_state *share_state, struct mpool *page_pool, + uint32_t *orig_from_mode_ret); +struct ffa_value ffa_memory_send_continue_validate( + struct share_states_locked share_states, ffa_memory_handle_t handle, + struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id, + struct mpool *page_pool); +struct ffa_value ffa_send_check_update( + struct vm_locked from_locked, struct ffa_memory_region_constituent **fragments, - const uint32_t *fragment_constituent_counts, uint32_t fragment_count, - uint32_t mode, struct mpool *ppool, bool commit); + uint32_t *fragment_constituent_counts, uint32_t fragment_count, + uint32_t share_func, struct ffa_memory_access *receivers, + uint32_t receivers_count, struct mpool *page_pool, bool clear, + uint32_t *orig_from_mode_ret); struct ffa_value ffa_retrieve_check_transition( struct vm_locked to, uint32_t share_func, struct ffa_memory_region_constituent **fragments, uint32_t *fragment_constituent_counts, uint32_t fragment_count, uint32_t memory_to_attributes, uint32_t *to_mode); -uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region, - ffa_vm_id_t receiver); struct ffa_value ffa_retrieve_check_update( struct vm_locked to_locked, ffa_vm_id_t from_id, struct ffa_memory_region_constituent **fragments, uint32_t *fragment_constituent_counts, uint32_t fragment_count, uint32_t memory_to_attributes, uint32_t share_func, bool clear, struct mpool *page_pool); +uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region, + ffa_vm_id_t receiver); +bool ffa_region_group_identity_map( + struct vm_locked vm_locked, + struct ffa_memory_region_constituent **fragments, + const uint32_t *fragment_constituent_counts, uint32_t fragment_count, + uint32_t mode, struct mpool *ppool, bool commit); +bool memory_region_receivers_from_other_world( + struct ffa_memory_region *memory_region); @@ -3152,7 +3152,8 @@ struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle, struct ffa_value ret; /* Sender ID MBZ at virtual instance. */ - if (sender_vm_id != 0) { + if (vm_id_is_current_world(from->id) && sender_vm_id != 0) { + dlog_verbose("Invalid sender."); return ffa_error(FFA_INVALID_PARAMETERS); } @@ -3202,10 +3203,9 @@ struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle, * memory send (i.e. donate, lend or share) request. * * We can tell from the handle whether the memory transaction is for the - * TEE or not. + * other world or not. */ - if ((handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK) == - FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR) { + if (plat_ffa_memory_handle_allocated_by_current_world(handle)) { struct vm_locked from_locked = vm_lock(from); ret = ffa_memory_send_continue(from_locked, fragment_copy, @@ -3217,26 +3217,9 @@ struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle, */ vm_unlock(&from_locked); } else { - struct vm *to = vm_find(HF_TEE_VM_ID); - struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from); - - /* - * The TEE RX buffer state is checked in - * `ffa_memory_other_world_send_continue` rather than here, as - * we need to return `FFA_MEM_FRAG_RX` with the current offset - * rather than FFA_ERROR FFA_BUSY in case it is busy. - */ - - ret = ffa_memory_other_world_send_continue( - vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment_copy, - fragment_length, handle, &api_page_pool); - /* - * `ffa_memory_other_world_send_continue` takes ownership of the - * fragment_copy, so we don't need to free it here. - */ - - vm_unlock(&vm_to_from_lock.vm1); - vm_unlock(&vm_to_from_lock.vm2); + ret = plat_ffa_other_world_mem_send_continue( + from, fragment_copy, fragment_length, handle, + &api_page_pool); } return ret; diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c index 144425dea..e08d3322f 100644 --- a/src/arch/aarch64/plat/ffa/absent.c +++ b/src/arch/aarch64/plat/ffa/absent.c @@ -553,3 +553,16 @@ struct ffa_value plat_ffa_other_world_mem_retrieve( return ffa_error(FFA_INVALID_PARAMETERS); } + +struct ffa_value plat_ffa_other_world_mem_send_continue( + struct vm *from, void *fragment, uint32_t fragment_length, + ffa_memory_handle_t handle, struct mpool *page_pool) +{ + (void)from; + (void)fragment; + (void)fragment_length; + (void)handle; + (void)page_pool; + + return ffa_error(FFA_INVALID_PARAMETERS); +} diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c index 2ae461004..3ff5103c0 100644 --- a/src/arch/aarch64/plat/ffa/hypervisor.c +++ b/src/arch/aarch64/plat/ffa/hypervisor.c @@ -1196,8 +1196,7 @@ static struct ffa_value ffa_memory_other_world_send( } else if (ret.func != FFA_MEM_FRAG_RX_32) { dlog_warning( "Got %#x from other world in response to %#x " - "for " - "fragment with %d/%d, expected " + "for fragment with %d/%d, expected " "FFA_MEM_FRAG_RX.\n", ret.func, share_func, fragment_length, memory_share_length); @@ -1721,3 +1720,250 @@ out: /* Return ret as received from the SPMC. */ return ret; } + +/** + * Forwards a memory send continuation message on to the other world. + */ +static struct ffa_value memory_send_continue_other_world_forward( + struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id, + void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle) +{ + struct ffa_value ret; + + memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, + fragment, fragment_length); + other_world_locked.vm->mailbox.recv_size = fragment_length; + other_world_locked.vm->mailbox.recv_sender = sender_vm_id; + other_world_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32; + other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED; + ret = arch_other_world_call( + (struct ffa_value){.func = FFA_MEM_FRAG_TX_32, + .arg1 = (uint32_t)handle, + .arg2 = (uint32_t)(handle >> 32), + .arg3 = fragment_length, + .arg4 = (uint64_t)sender_vm_id << 16}); + + /* + * After the call to the other world completes it must have finished + * reading its RX buffer, so it is ready for another message. + */ + other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; + + return ret; +} + +/** + * Continues an operation to donate, lend or share memory to the other world VM. + * If this is the last fragment then checks that the transition is valid for the + * type of memory sending operation and updates the stage-2 page tables of the + * sender. + * + * Assumes that the caller has already found and locked the sender VM and copied + * the memory region descriptor from the sender's TX buffer to a freshly + * allocated page from Hafnium's internal pool. + * + * This function takes ownership of the `memory_region` passed in and will free + * it when necessary; it must not be freed by the caller. + */ +static struct ffa_value ffa_memory_other_world_send_continue( + struct vm_locked from_locked, struct vm_locked to_locked, + void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle, + struct mpool *page_pool) +{ + struct share_states_locked share_states = share_states_lock(); + struct ffa_memory_share_state *share_state; + struct ffa_value ret; + struct ffa_memory_region *memory_region; + + ret = ffa_memory_send_continue_validate(share_states, handle, + &share_state, + from_locked.vm->id, page_pool); + if (ret.func != FFA_SUCCESS_32) { + goto out_free_fragment; + } + memory_region = share_state->memory_region; + + if (!memory_region_receivers_from_other_world(memory_region)) { + dlog_error( + "Got SPM-allocated handle for memory send to non-other " + "world VM. This should never happen, and indicates a " + "bug.\n"); + ret = ffa_error(FFA_INVALID_PARAMETERS); + goto out_free_fragment; + } + + if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY || + to_locked.vm->mailbox.recv == NULL) { + /* + * If the other_world RX buffer is not available, tell the + * sender to retry by returning the current offset again. + */ + ret = (struct ffa_value){ + .func = FFA_MEM_FRAG_RX_32, + .arg1 = (uint32_t)handle, + .arg2 = (uint32_t)(handle >> 32), + .arg3 = share_state_next_fragment_offset(share_states, + share_state), + }; + goto out_free_fragment; + } + + /* Add this fragment. */ + share_state->fragments[share_state->fragment_count] = fragment; + share_state->fragment_constituent_counts[share_state->fragment_count] = + fragment_length / sizeof(struct ffa_memory_region_constituent); + share_state->fragment_count++; + + /* Check whether the memory send operation is now ready to complete. */ + if (share_state_sending_complete(share_states, share_state)) { + struct mpool local_page_pool; + uint32_t orig_from_mode; + + /* + * Use a local page pool so that we can roll back if necessary. + */ + mpool_init_with_fallback(&local_page_pool, page_pool); + + ret = ffa_memory_send_complete(from_locked, share_states, + share_state, &local_page_pool, + &orig_from_mode); + + if (ret.func == FFA_SUCCESS_32) { + /* + * Forward final fragment on to the other_world so that + * it can complete the memory sending operation. + */ + ret = memory_send_continue_other_world_forward( + to_locked, from_locked.vm->id, fragment, + fragment_length, handle); + + if (ret.func != FFA_SUCCESS_32) { + /* + * The error will be passed on to the caller, + * but log it here too. + */ + dlog_verbose( + "other_world didn't successfully " + "complete " + "memory send operation; returned %#x " + "(%d). Rolling back.\n", + ret.func, ret.arg2); + + /* + * The other_world failed to complete the send + * operation, so roll back the page table update + * for the VM. This can't fail because it won't + * try to allocate more memory than was freed + * into the `local_page_pool` by + * `ffa_send_check_update` in the initial + * update. + */ + CHECK(ffa_region_group_identity_map( + from_locked, share_state->fragments, + share_state + ->fragment_constituent_counts, + share_state->fragment_count, + orig_from_mode, &local_page_pool, + true)); + } + + /* Free share state. */ + share_state_free(share_states, share_state, page_pool); + } else { + /* Abort sending to other_world. */ + struct ffa_value other_world_ret = + arch_other_world_call((struct ffa_value){ + .func = FFA_MEM_RECLAIM_32, + .arg1 = (uint32_t)handle, + .arg2 = (uint32_t)(handle >> 32)}); + + if (other_world_ret.func != FFA_SUCCESS_32) { + /* + * Nothing we can do if other_world doesn't + * abort properly, just log it. + */ + dlog_verbose( + "other_world didn't successfully abort " + "failed memory send operation; " + "returned %#x %d).\n", + other_world_ret.func, + other_world_ret.arg2); + } + /* + * We don't need to free the share state in this case + * because ffa_memory_send_complete does that already. + */ + } + + mpool_fini(&local_page_pool); + } else { + uint32_t next_fragment_offset = + share_state_next_fragment_offset(share_states, + share_state); + + ret = memory_send_continue_other_world_forward( + to_locked, from_locked.vm->id, fragment, + fragment_length, handle); + + if (ret.func != FFA_MEM_FRAG_RX_32 || + ffa_frag_handle(ret) != handle || + ret.arg3 != next_fragment_offset || + ffa_frag_sender(ret) != from_locked.vm->id) { + dlog_verbose( + "Got unexpected result from forwarding " + "FFA_MEM_FRAG_TX to other_world: %#x (handle " + "%#x, offset %d, sender %d); expected " + "FFA_MEM_FRAG_RX (handle %#x, offset %d, " + "sender %d).\n", + ret.func, ffa_frag_handle(ret), ret.arg3, + ffa_frag_sender(ret), handle, + next_fragment_offset, from_locked.vm->id); + /* Free share state. */ + share_state_free(share_states, share_state, page_pool); + ret = ffa_error(FFA_INVALID_PARAMETERS); + goto out; + } + + ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32, + .arg1 = (uint32_t)handle, + .arg2 = (uint32_t)(handle >> 32), + .arg3 = next_fragment_offset}; + } + goto out; + +out_free_fragment: + mpool_free(page_pool, fragment); + +out: + share_states_unlock(&share_states); + return ret; +} + +struct ffa_value plat_ffa_other_world_mem_send_continue( + struct vm *from, void *fragment, uint32_t fragment_length, + ffa_memory_handle_t handle, struct mpool *page_pool) +{ + struct ffa_value ret; + struct vm *to = vm_find(HF_TEE_VM_ID); + struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from); + + /* + * The TEE RX buffer state is checked in + * `ffa_memory_other_world_send_continue` rather than here, as + * we need to return `FFA_MEM_FRAG_RX` with the current offset + * rather than FFA_ERROR FFA_BUSY in case it is busy. + */ + + ret = ffa_memory_other_world_send_continue( + vm_to_from_lock.vm2, vm_to_from_lock.vm1, fragment, + fragment_length, handle, page_pool); + /* + * `ffa_memory_other_world_send_continue` takes ownership of the + * fragment_copy, so we don't need to free it here. + */ + + vm_unlock(&vm_to_from_lock.vm1); + vm_unlock(&vm_to_from_lock.vm2); + + return ret; +} diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c index 15b9cf43e..150f5acda 100644 --- a/src/arch/aarch64/plat/ffa/spmc.c +++ b/src/arch/aarch64/plat/ffa/spmc.c @@ -2584,3 +2584,16 @@ struct ffa_value plat_ffa_other_world_mem_retrieve( return ffa_error(FFA_INVALID_PARAMETERS); } + +struct ffa_value plat_ffa_other_world_mem_send_continue( + struct vm *from, void *fragment, uint32_t fragment_length, + ffa_memory_handle_t handle, struct mpool *page_pool) +{ + (void)from; + (void)fragment; + (void)fragment_length; + (void)handle; + (void)page_pool; + + return ffa_error(FFA_INVALID_PARAMETERS); +} diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c index e6045bef1..ffa87cdab 100644 --- a/src/arch/fake/hypervisor/ffa.c +++ b/src/arch/fake/hypervisor/ffa.c @@ -528,3 +528,16 @@ struct ffa_value plat_ffa_other_world_mem_retrieve( return ffa_error(FFA_INVALID_PARAMETERS); } + +struct ffa_value plat_ffa_other_world_mem_send_continue( + struct vm *from, void *fragment, uint32_t fragment_length, + ffa_memory_handle_t handle, struct mpool *page_pool) +{ + (void)from; + (void)fragment; + (void)fragment_length; + (void)handle; + (void)page_pool; + + return ffa_error(FFA_INVALID_PARAMETERS); +} diff --git a/src/ffa_memory.c b/src/ffa_memory.c index 4e0f68416..c95379cb3 100644 --- a/src/ffa_memory.c +++ b/src/ffa_memory.c @@ -116,9 +116,9 @@ void share_states_unlock(struct share_states_locked *share_states) * initialises `share_state_ret` to point to the share state and returns true. * Otherwise returns false. */ -static bool get_share_state(struct share_states_locked share_states, - ffa_memory_handle_t handle, - struct ffa_memory_share_state **share_state_ret) +bool get_share_state(struct share_states_locked share_states, + ffa_memory_handle_t handle, + struct ffa_memory_share_state **share_state_ret) { struct ffa_memory_share_state *share_state; uint64_t index; @@ -156,9 +156,9 @@ static bool get_share_state(struct share_states_locked share_states, } /** Marks a share state as unallocated. */ -static void share_state_free(struct share_states_locked share_states, - struct ffa_memory_share_state *share_state, - struct mpool *page_pool) +void share_state_free(struct share_states_locked share_states, + struct ffa_memory_share_state *share_state, + struct mpool *page_pool) { uint32_t i; @@ -183,9 +183,8 @@ static void share_state_free(struct share_states_locked share_states, } /** Checks whether the given share state has been fully sent. */ -static bool share_state_sending_complete( - struct share_states_locked share_states, - struct ffa_memory_share_state *share_state) +bool share_state_sending_complete(struct share_states_locked share_states, + struct ffa_memory_share_state *share_state) { struct ffa_composite_memory_region *composite; uint32_t expected_constituent_count; @@ -222,7 +221,7 @@ static bool share_state_sending_complete( * Calculates the offset of the next fragment expected for the given share * state. */ -static uint32_t share_state_next_fragment_offset( +uint32_t share_state_next_fragment_offset( struct share_states_locked share_states, struct ffa_memory_share_state *share_state) { @@ -1100,7 +1099,7 @@ out: * * Returns FFA_SUCCESS with the handle encoded, or the relevant FFA_ERROR. */ -static struct ffa_value ffa_memory_send_complete( +struct ffa_value ffa_memory_send_complete( struct vm_locked from_locked, struct share_states_locked share_states, struct ffa_memory_share_state *share_state, struct mpool *page_pool, uint32_t *orig_from_mode_ret) @@ -1396,7 +1395,7 @@ struct ffa_value ffa_memory_send_validate( * Returns FFA_SUCCESS if the request was valid, or the relevant FFA_ERROR if * not. */ -static struct ffa_value ffa_memory_send_continue_validate( +struct ffa_value ffa_memory_send_continue_validate( struct share_states_locked share_states, ffa_memory_handle_t handle, struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id, struct mpool *page_pool) @@ -1418,7 +1417,8 @@ static struct ffa_value ffa_memory_send_continue_validate( } memory_region = share_state->memory_region; - if (memory_region->sender != from_vm_id) { + if (vm_id_is_current_world(from_vm_id) && + memory_region->sender != from_vm_id) { dlog_verbose("Invalid sender %d.\n", memory_region->sender); return ffa_error(FFA_INVALID_PARAMETERS); } @@ -1450,39 +1450,9 @@ static struct ffa_value ffa_memory_send_continue_validate( } /** - * Forwards a memory send continuation message on to the other world. - */ -static struct ffa_value memory_send_continue_other_world_forward( - struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id, - void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle) -{ - struct ffa_value ret; - - memcpy_s(other_world_locked.vm->mailbox.recv, FFA_MSG_PAYLOAD_MAX, - fragment, fragment_length); - other_world_locked.vm->mailbox.recv_size = fragment_length; - other_world_locked.vm->mailbox.recv_sender = sender_vm_id; - other_world_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32; - other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED; - ret = arch_other_world_call( - (struct ffa_value){.func = FFA_MEM_FRAG_TX_32, - .arg1 = (uint32_t)handle, - .arg2 = (uint32_t)(handle >> 32), - .arg3 = fragment_length, - .arg4 = (uint64_t)sender_vm_id << 16}); - /* - * After the call to the other world completes it must have finished - * reading its RX buffer, so it is ready for another message. - */ - other_world_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY; - - return ret; -} - -/** * Checks if there is at least one receiver from the other world. */ -static bool memory_region_receivers_from_other_world( +bool memory_region_receivers_from_other_world( struct ffa_memory_region *memory_region) { for (uint32_t i = 0; i < memory_region->receiver_count; i++) { @@ -1569,11 +1539,21 @@ struct ffa_value ffa_memory_send(struct vm_locked from_locked, from_locked, share_states, share_state, page_pool, &(share_state->sender_orig_mode)); } else { + /* + * Use sender ID from 'memory_region' assuming + * that at this point it has been validated: + * - MBZ at virtual FF-A instance. + */ + ffa_vm_id_t sender_to_ret = + (from_locked.vm->id == HF_OTHER_WORLD_ID) + ? memory_region->sender + : 0; ret = (struct ffa_value){ .func = FFA_MEM_FRAG_RX_32, .arg1 = (uint32_t)memory_region->handle, .arg2 = (uint32_t)(memory_region->handle >> 32), - .arg3 = fragment_length}; + .arg3 = fragment_length, + .arg4 = (uint32_t)(sender_to_ret & 0xffff) << 16}; } out: @@ -1653,195 +1633,6 @@ out: return ret; } -/** - * Continues an operation to donate, lend or share memory to the other world VM. - * If this is the last fragment then checks that the transition is valid for the - * type of memory sending operation and updates the stage-2 page tables of the - * sender. - * - * Assumes that the caller has already found and locked the sender VM and copied - * the memory region descriptor from the sender's TX buffer to a freshly - * allocated page from Hafnium's internal pool. - * - * This function takes ownership of the `memory_region` passed in and will free - * it when necessary; it must not be freed by the caller. - */ -struct ffa_value ffa_memory_other_world_send_continue( - struct vm_locked from_locked, struct vm_locked to_locked, - void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle, - struct mpool *page_pool) -{ - struct share_states_locked share_states = share_states_lock(); - struct ffa_memory_share_state *share_state; - struct ffa_value ret; - struct ffa_memory_region *memory_region; - - ret = ffa_memory_send_continue_validate(share_states, handle, - &share_state, - from_locked.vm->id, page_pool); - if (ret.func != FFA_SUCCESS_32) { - goto out_free_fragment; - } - memory_region = share_state->memory_region; - - if (!memory_region_receivers_from_other_world(memory_region)) { - dlog_error( - "Got SPM-allocated handle for memory send to non-other " - "world VM. This should never happen, and indicates a " - "bug.\n"); - ret = ffa_error(FFA_INVALID_PARAMETERS); - goto out_free_fragment; - } - - if (to_locked.vm->mailbox.state != MAILBOX_STATE_EMPTY || - to_locked.vm->mailbox.recv == NULL) { - /* - * If the other world RX buffer is not available, tell the - * sender to retry by returning the current offset again. - */ - ret = (struct ffa_value){ - .func = FFA_MEM_FRAG_RX_32, - .arg1 = (uint32_t)handle, - .arg2 = (uint32_t)(handle >> 32), - .arg3 = share_state_next_fragment_offset(share_states, - share_state), - }; - goto out_free_fragment; - } - - /* Add this fragment. */ - share_state->fragments[share_state->fragment_count] = fragment; - share_state->fragment_constituent_counts[share_state->fragment_count] = - fragment_length / sizeof(struct ffa_memory_region_constituent); - share_state->fragment_count++; - - /* Check whether the memory send operation is now ready to complete. */ - if (share_state_sending_complete(share_states, share_state)) { - struct mpool local_page_pool; - uint32_t orig_from_mode; - - /* - * Use a local page pool so that we can roll back if necessary. - */ - mpool_init_with_fallback(&local_page_pool, page_pool); - - ret = ffa_memory_send_complete(from_locked, share_states, - share_state, &local_page_pool, - &orig_from_mode); - - if (ret.func == FFA_SUCCESS_32) { - /* - * Forward final fragment on to the other world so that - * it can complete the memory sending operation. - */ - ret = memory_send_continue_other_world_forward( - to_locked, from_locked.vm->id, fragment, - fragment_length, handle); - - if (ret.func != FFA_SUCCESS_32) { - /* - * The error will be passed on to the caller, - * but log it here too. - */ - dlog_verbose( - "other world didn't successfully " - "complete " - "memory send operation; returned %#x " - "(%d). Rolling back.\n", - ret.func, ret.arg2); - - /* - * The other world failed to complete the send - * operation, so roll back the page table update - * for the VM. This can't fail because it won't - * try to allocate more memory than was freed - * into the `local_page_pool` by - * `ffa_send_check_update` in the initial - * update. - */ - CHECK(ffa_region_group_identity_map( - from_locked, share_state->fragments, - share_state - ->fragment_constituent_counts, - share_state->fragment_count, - orig_from_mode, &local_page_pool, - true)); - } - - /* Free share state. */ - share_state_free(share_states, share_state, page_pool); - } else { - /* Abort sending to other world. */ - struct ffa_value other_world_ret = - arch_other_world_call((struct ffa_value){ - .func = FFA_MEM_RECLAIM_32, - .arg1 = (uint32_t)handle, - .arg2 = (uint32_t)(handle >> 32)}); - - if (other_world_ret.func != FFA_SUCCESS_32) { - /* - * Nothing we can do if other world doesn't - * abort properly, just log it. - */ - dlog_verbose( - "other world didn't successfully abort " - "failed " - "memory send operation; returned %#x " - "(%d).\n", - other_world_ret.func, - other_world_ret.arg2); - } - /* - * We don't need to free the share state in this case - * because ffa_memory_send_complete does that already. - */ - } - - mpool_fini(&local_page_pool); - } else { - uint32_t next_fragment_offset = - share_state_next_fragment_offset(share_states, - share_state); - - ret = memory_send_continue_other_world_forward( - to_locked, from_locked.vm->id, fragment, - fragment_length, handle); - - if (ret.func != FFA_MEM_FRAG_RX_32 || - ffa_frag_handle(ret) != handle || - ret.arg3 != next_fragment_offset || - ffa_frag_sender(ret) != from_locked.vm->id) { - dlog_verbose( - "Got unexpected result from forwarding " - "FFA_MEM_FRAG_TX to other world. %#x (handle " - "%#x, " - "offset %d, sender %d); expected " - "FFA_MEM_FRAG_RX (handle %#x, offset %d, " - "sender %d).\n", - ret.func, ffa_frag_handle(ret), ret.arg3, - ffa_frag_sender(ret), handle, - next_fragment_offset, from_locked.vm->id); - /* Free share state. */ - share_state_free(share_states, share_state, page_pool); - ret = ffa_error(FFA_INVALID_PARAMETERS); - goto out; - } - - ret = (struct ffa_value){.func = FFA_MEM_FRAG_RX_32, - .arg1 = (uint32_t)handle, - .arg2 = (uint32_t)(handle >> 32), - .arg3 = next_fragment_offset}; - } - goto out; - -out_free_fragment: - mpool_free(page_pool, fragment); - -out: - share_states_unlock(&share_states); - return ret; -} - /** Clean up after the receiver has finished retrieving a memory region. */ static void ffa_memory_retrieve_complete( struct share_states_locked share_states, diff --git a/test/vmapi/common/ffa.c b/test/vmapi/common/ffa.c index 91cf9e485..7a93c8630 100644 --- a/test/vmapi/common/ffa.c +++ b/test/vmapi/common/ffa.c @@ -61,8 +61,6 @@ static void send_fragmented_memory_region( EXPECT_EQ(ffa_frag_handle(*send_ret), fragment_handle); } EXPECT_EQ(send_ret->arg3, sent_length); - /* Sender MBZ at virtual instance. */ - EXPECT_EQ(ffa_frag_sender(*send_ret), 0); remaining_constituent_count = ffa_memory_fragment_init( tx_buffer, HF_MAILBOX_SIZE, |