Merge "fix(realm): fix host_realm_init_ipa_state()'s retry path"
diff --git a/include/runtime_services/ffa_helpers.h b/include/runtime_services/ffa_helpers.h
index 917885f..4dc3f53 100644
--- a/include/runtime_services/ffa_helpers.h
+++ b/include/runtime_services/ffa_helpers.h
@@ -867,6 +867,21 @@
void ffa_hypervisor_retrieve_request_init(struct ffa_memory_region *region,
ffa_memory_handle_t handle);
+static inline uint32_t ffa_mem_retrieve_res_total_size(struct ffa_value ret)
+{
+ return ret.arg1;
+}
+
+static inline uint32_t ffa_mem_retrieve_res_frag_size(struct ffa_value ret)
+{
+ return ret.arg2;
+}
+
+static inline uint32_t ffa_mem_frag_tx_frag_size(struct ffa_value ret)
+{
+ return ret.arg3;
+}
+
uint32_t ffa_memory_region_init(
struct ffa_memory_region *memory_region, size_t memory_region_max_size,
ffa_id_t sender, struct ffa_memory_access receivers[],
diff --git a/include/runtime_services/spm_common.h b/include/runtime_services/spm_common.h
index 3fe154a..c794cb0 100644
--- a/include/runtime_services/spm_common.h
+++ b/include/runtime_services/spm_common.h
@@ -116,6 +116,10 @@
uint32_t receiver_count, ffa_memory_region_flags_t flags,
bool is_normal_memory);
+bool hypervisor_retrieve_request_continue(
+ struct mailbox_buffers *mb, uint64_t handle, void *out, uint32_t out_size,
+ uint32_t total_size, uint32_t fragment_offset, bool release_rx);
+
bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
void *out, uint32_t out_size);
diff --git a/tftf/tests/runtime_services/secure_service/spm_common.c b/tftf/tests/runtime_services/secure_service/spm_common.c
index 4fac119..753d491 100644
--- a/tftf/tests/runtime_services/secure_service/spm_common.c
+++ b/tftf/tests/runtime_services/secure_service/spm_common.c
@@ -333,6 +333,99 @@
return true;
}
+/**
+ * Looping part of the fragmented retrieve request.
+ */
+bool hypervisor_retrieve_request_continue(
+ struct mailbox_buffers *mb, uint64_t handle, void *out, uint32_t out_size,
+ uint32_t total_size, uint32_t fragment_offset, bool release_rx)
+{
+ struct ffa_value ret;
+ uint32_t fragment_size;
+
+ if (mb == NULL) {
+ ERROR("Invalid parameters, please provide valid mailbox.\n");
+ return false;
+ }
+
+ while (fragment_offset < total_size) {
+ VERBOSE("Calling again. frag offset: %d; total: %d\n",
+ fragment_offset, total_size);
+
+ /* The first time it is called is controlled through arguments. */
+ if (release_rx) {
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+ } else {
+ release_rx = true;
+ }
+
+ ret = ffa_mem_frag_rx(handle, fragment_offset);
+ if (ret.fid != FFA_MEM_FRAG_TX) {
+ ERROR("ffa_mem_frag_rx() failed: %d\n",
+ ffa_error_code(ret));
+ return false;
+ }
+
+ if (ffa_frag_handle(ret) != handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, got "
+ "%llu\n",
+ __func__, handle, ffa_frag_handle(ret));
+ return false;
+ }
+
+ /* Sender MBZ at physical instance. */
+ if (ffa_frag_sender(ret) != 0) {
+ ERROR("%s: fragment sender mismatch: expected %d, got "
+ "%d\n",
+ __func__, 0, ffa_frag_sender(ret));
+ return false;
+ }
+
+ fragment_size = ffa_mem_frag_tx_frag_size(ret);
+
+ if (fragment_size == 0) {
+ ERROR("%s: fragment size must not be 0\n", __func__);
+ return false;
+ }
+
+ if (out != NULL) {
+ if (fragment_offset + fragment_size > out_size) {
+ ERROR("%s: fragment is too big to fit in out buffer "
+ "(%d > %d)\n",
+ __func__, fragment_offset + fragment_size,
+ out_size);
+ return false;
+ }
+
+ VERBOSE("Copying fragment at offset %d with size %d\n",
+ fragment_offset, fragment_size);
+ memcpy((uint8_t *)out + fragment_offset, mb->recv,
+ fragment_size);
+ }
+
+ fragment_offset += fragment_size;
+ }
+
+ if (fragment_offset != total_size) {
+ ERROR("%s: fragment size mismatch: expected %d, got %d\n",
+ __func__, total_size, fragment_offset);
+ return false;
+ }
+
+ ret = ffa_rx_release();
+ if (ret.fid != FFA_SUCCESS_SMC32) {
+ ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
+ return false;
+ }
+
+ return true;
+}
+
bool hypervisor_retrieve_request(struct mailbox_buffers *mb, uint64_t handle,
void *out, uint32_t out_size)
{
@@ -342,8 +435,8 @@
uint32_t fragment_offset;
struct ffa_memory_region *region_out = out;
- if (out == NULL || mb == NULL) {
- ERROR("Invalid parameters!\n");
+ if (mb == NULL) {
+ ERROR("Invalid parameters, please provide valid mailbox.\n");
return false;
}
@@ -363,112 +456,49 @@
* fragments is equal to total_size, the memory transaction has been
* completed.
*/
- total_size = ret.arg1;
- fragment_size = ret.arg2;
+ total_size = ffa_mem_retrieve_res_total_size(ret);
+ fragment_size = ffa_mem_retrieve_res_frag_size(ret);
+
fragment_offset = fragment_size;
VERBOSE("total_size=%d, fragment_size=%d, fragment_offset=%d\n",
total_size, fragment_size, fragment_offset);
- if (fragment_size > PAGE_SIZE) {
- ERROR("Fragment should be smaller than RX buffer!\n");
- return false;
- }
- if (total_size > out_size) {
- ERROR("output buffer is not large enough to store all "
- "fragments (total_size=%d, max_size=%d)\n",
- total_size, out_size);
- return false;
- }
-
- /*
- * Copy the received message to the out buffer. This is necessary
- * because `mb->recv` will be overwritten if sending a fragmented
- * message.
- */
- memcpy(out, mb->recv, fragment_size);
-
- if (region_out->receiver_count == 0) {
- VERBOSE("copied region has no recivers\n");
- return false;
- }
-
- if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
- VERBOSE("SPMC memory sharing operations support max of %u "
- "receivers!\n",
- MAX_MEM_SHARE_RECIPIENTS);
- return false;
- }
-
- while (fragment_offset < total_size) {
- VERBOSE("Calling again. frag offset: %d; total: %d\n",
- fragment_offset, total_size);
- ret = ffa_rx_release();
- if (ret.fid != FFA_SUCCESS_SMC32) {
- ERROR("ffa_rx_release() failed: %d\n",
- ffa_error_code(ret));
+ if (out != NULL) {
+ if (fragment_size > PAGE_SIZE) {
+ ERROR("Fragment should be smaller than RX buffer!\n");
+ return false;
+ }
+ if (total_size > out_size) {
+ ERROR("Output buffer is not large enough to store all "
+ "fragments (total_size=%d, max_size=%d)\n",
+ total_size, out_size);
return false;
}
- ret = ffa_mem_frag_rx(handle, fragment_offset);
- if (ret.fid != FFA_MEM_FRAG_TX) {
- ERROR("ffa_mem_frag_rx() failed: %d\n",
- ffa_error_code(ret));
+ /*
+ * Copy the received message to the out buffer. This is necessary
+ * because `mb->recv` will be overwritten if sending a fragmented
+ * message.
+ */
+ memcpy(out, mb->recv, fragment_size);
+
+ if (region_out->receiver_count == 0) {
+ VERBOSE("Copied region has no recivers\n");
return false;
}
- if (ffa_frag_handle(ret) != handle) {
- ERROR("%s: fragment handle mismatch: expected %llu, "
- "got "
- "%llu\n",
- __func__, handle, ffa_frag_handle(ret));
+ if (region_out->receiver_count > MAX_MEM_SHARE_RECIPIENTS) {
+ VERBOSE("SPMC memory sharing operations support max of %u "
+ "receivers!\n",
+ MAX_MEM_SHARE_RECIPIENTS);
return false;
}
-
- /* Sender MBZ at physical instance. */
- if (ffa_frag_sender(ret) != 0) {
- ERROR("%s: fragment sender mismatch: expected %d, got "
- "%d\n",
- __func__, 0, ffa_frag_sender(ret));
- return false;
- }
-
- fragment_size = ret.arg2;
- if (fragment_size == 0) {
- ERROR("%s: fragment size must not be 0\n", __func__);
- return false;
- }
-
- if (fragment_offset + fragment_size > out_size) {
- ERROR("%s: fragment is too big to fit in out buffer "
- "(%d > %d)\n",
- __func__, fragment_offset + fragment_size,
- out_size);
- return false;
- }
-
- VERBOSE("copying fragment at offset %d with size %d\n",
- fragment_offset, fragment_size);
- memcpy((uint8_t *)out + fragment_offset, mb->recv,
- fragment_size);
-
- fragment_offset += fragment_size;
+ } else {
+ VERBOSE("%s: No output buffer provided...\n", __func__);
}
- if (fragment_offset != total_size) {
- ERROR("%s: fragment size mismatch: expected %d, got %d\n",
- __func__, total_size, fragment_offset);
- return false;
- }
-
- ret = ffa_rx_release();
- if (ret.fid != FFA_SUCCESS_SMC32) {
- ERROR("ffa_rx_release() failed: %d\n", ffa_error_code(ret));
- return false;
- }
-
- VERBOSE("Memory Retrieved!\n");
-
- return true;
+ return hypervisor_retrieve_request_continue(
+ mb, handle, out, out_size, total_size, fragment_offset, false);
}
bool memory_relinquish(struct ffa_mem_relinquish *m, uint64_t handle,
@@ -493,9 +523,8 @@
const struct ffa_memory_region_constituent constituents[],
uint32_t constituent_count, uint32_t remaining_constituent_count,
uint32_t sent_length, uint32_t total_length, bool allocator_is_spmc,
- struct ffa_value ret)
+ struct ffa_value *ret)
{
-
uint64_t handle;
uint64_t handle_mask;
uint64_t expected_handle_mask =
@@ -508,25 +537,24 @@
while (remaining_constituent_count != 0) {
VERBOSE("%s: %d constituents left to send.\n", __func__,
remaining_constituent_count);
- if (ret.fid != FFA_MEM_FRAG_RX) {
+ if (ret->fid != FFA_MEM_FRAG_RX) {
ERROR("ffa_mem_frax_tx() failed: %d\n",
- ffa_error_code(ret));
+ ffa_error_code(*ret));
return false;
}
if (fragment_handle == FFA_MEMORY_HANDLE_INVALID) {
- fragment_handle = ffa_frag_handle(ret);
- } else if (ffa_frag_handle(ret) != fragment_handle) {
- ERROR("%s: fragment handle mismatch: expected %llu, "
- "got %llu\n",
- __func__, fragment_handle, ffa_frag_handle(ret));
+ fragment_handle = ffa_frag_handle(*ret);
+ } else if (ffa_frag_handle(*ret) != fragment_handle) {
+ ERROR("%s: fragment handle mismatch: expected %llu, got %llu\n",
+ __func__, fragment_handle, ffa_frag_handle(*ret));
return false;
}
- if (ret.arg3 != sent_length) {
+ if (ret->arg3 != sent_length) {
ERROR("%s: fragment length mismatch: expected %u, got "
"%lu\n",
- __func__, sent_length, ret.arg3);
+ __func__, sent_length, ret->arg3);
return false;
}
@@ -536,7 +564,7 @@
remaining_constituent_count,
remaining_constituent_count, &fragment_length);
- ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
+ *ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
sent_length += fragment_length;
}
@@ -546,13 +574,13 @@
return false;
}
- if (ret.fid != FFA_SUCCESS_SMC32) {
+ if (ret->fid != FFA_SUCCESS_SMC32) {
ERROR("%s: ffa_mem_frax_tx() failed: %d\n", __func__,
- ffa_error_code(ret));
+ ffa_error_code(*ret));
return false;
}
- handle = ffa_mem_success_handle(ret);
+ handle = ffa_mem_success_handle(*ret);
handle_mask = (handle >> FFA_MEMORY_HANDLE_ALLOCATOR_SHIFT) &
FFA_MEMORY_HANDLE_ALLOCATOR_MASK;
@@ -612,7 +640,7 @@
if (!send_fragmented_memory_region(
send_buffer, constituents, constituent_count,
remaining_constituent_count, fragment_length, total_length,
- true, *ret)) {
+ true, ret)) {
return FFA_MEMORY_HANDLE_INVALID;
}
@@ -968,7 +996,6 @@
return false;
}
-
if (sender != NULL) {
*sender = source_vm_id;
}
diff --git a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
index ad4040a..b402c58 100644
--- a/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
+++ b/tftf/tests/runtime_services/secure_service/test_ffa_memory_sharing.c
@@ -1580,6 +1580,8 @@
return TEST_RESULT_FAIL;
}
+ ffa_rx_release();
+
return TEST_RESULT_SUCCESS;
}
@@ -1606,3 +1608,308 @@
{
return base_ffa_memory_retrieve_request_fail_buffer_realm(true, true);
}
+
+/**
+ * Do a memory sharing operation over two fragments.
+ * Before the 2nd fragment the TX buffer is set in the realm PAS.
+ * The SPMC should fault, recover from it and return ffa_error(FFA_ERROR_ABORTED).
+ */
+test_result_t test_ffa_memory_share_fragmented_tx_realm(void)
+{
+ struct mailbox_buffers mb;
+ uint32_t remaining_constituent_count = 0;
+ uint32_t total_length;
+ uint32_t fragment_length;
+ struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
+ SP_ID(1), FFA_MEM_SHARE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+ struct ffa_value ffa_ret;
+ u_register_t ret_rmm;
+ test_result_t ret;
+ uint64_t handle;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /* Only send one constituent to start with. */
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
+ 0, FFA_MEMORY_NOT_SPECIFIED_MEM,
+ FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE,
+ &total_length, &fragment_length);
+
+ /* It should have copied them all. */
+ if (remaining_constituent_count > 0) {
+ ERROR("Transaction descriptor initialization failed!\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /*
+ * Take the size of a constituent from the fragment to force the
+ * operation to be fragmented.
+ */
+ fragment_length -= sizeof(struct ffa_memory_region_constituent);
+
+ ffa_ret = ffa_mem_share(total_length, fragment_length);
+
+ if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
+ ERROR("Expected %s after the memory share.\n",
+ ffa_func_name(FFA_MEM_FRAG_RX));
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ handle = ffa_frag_handle(ffa_ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("SPMC returned an invalid handle for the operation.\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Prepare the next fragment for the operation. */
+ remaining_constituent_count = ffa_memory_fragment_init(
+ mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_length);
+
+ /*
+ * Delegate send/tx buffer to a realm. This should make memory sharing operation
+ * fail.
+ */
+ ret_rmm = host_rmi_granule_delegate((u_register_t)mb.send);
+
+ if (ret_rmm != 0UL) {
+ INFO("Delegate operation returns 0x%lx for address %p\n",
+ ret_rmm, mb.send);
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
+
+ if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Undelegate to reestablish the same security state for PAS. */
+ ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.send);
+ if (ret_rmm != 0UL) {
+ ERROR("Undelegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, (uint64_t)mb.send);
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* This time test should pass. */
+ ffa_ret = ffa_mem_frag_tx(handle, fragment_length);
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Reclaim memory to be able to reuse it. */
+ ffa_ret = ffa_mem_reclaim(handle, 0);
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ERROR("Failed to reclaim memory to be used in next test\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ ret = TEST_RESULT_SUCCESS;
+
+exit:
+ unregister_custom_sync_exception_handler();
+
+ return ret;
+}
+
+/**
+ * Do a memory sharing operation over two fragments.
+ * Before the 2nd fragment the RX buffer is set in the realm PAS.
+ * The SPMC should fault, recover from it and return
+ * ffa_error(FFA_ERROR_ABORTED).
+ *
+ * Test Sequence:
+ * - Share memory with SP(1), using a force fragmented approach.
+ * - Initiate an hypervisor retrieve request, and retrieve only
+ * the first fragment.
+ * - Change the physical address space of NWd RX buffer.
+ * - Invoke the FFA_MEM_FRAG_RX interface, which should abort because
+ * of previous step.
+ * - Reestablish the PAS of the NWd RX buffer.
+ * - Contiueing with hypervisor retrieve request, and obtain the 2nd
+ * fragment.
+ * - Reclaim memory for clean-up of SPMC state.
+ */
+test_result_t test_ffa_memory_share_fragmented_rx_realm(void)
+{
+ struct mailbox_buffers mb;
+ uint32_t remaining_constituent_count = 0;
+ uint32_t total_size;
+ uint32_t fragment_size;
+ uint32_t fragment_offset;
+ struct ffa_memory_access receiver = ffa_memory_access_init_permissions_from_mem_func(
+ SP_ID(1), FFA_MEM_SHARE_SMC32);
+ struct ffa_memory_region_constituent constituents[] = {
+ {(void *)four_share_pages, 4, 0},
+ {(void *)share_page, 1, 0}
+ };
+ struct ffa_value ffa_ret;
+ u_register_t ret_rmm;
+ test_result_t ret;
+ uint64_t handle;
+
+ if (get_armv9_2_feat_rme_support() == 0U) {
+ return TEST_RESULT_SKIPPED;
+ }
+
+ CHECK_SPMC_TESTING_SETUP(1, 2, expected_sp_uuids);
+
+ GET_TFTF_MAILBOX(mb);
+
+ register_custom_sync_exception_handler(data_abort_handler);
+
+ /* Only send one constituent to start with. */
+ remaining_constituent_count = ffa_memory_region_init(
+ (struct ffa_memory_region *)mb.send, MAILBOX_SIZE, SENDER,
+ &receiver, 1, constituents, ARRAY_SIZE(constituents), 0,
+ 0, FFA_MEMORY_NOT_SPECIFIED_MEM,
+ FFA_MEMORY_CACHE_WRITE_BACK,
+ FFA_MEMORY_INNER_SHAREABLE,
+ &total_size, &fragment_size);
+
+ /* It should have copied them all. */
+ if (remaining_constituent_count > 0) {
+ ERROR("Transaction descriptor initialization failed!\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /*
+ * Take the size of a constituent from the fragment to force the
+ * operation to be fragmented.
+ */
+ fragment_size -= sizeof(struct ffa_memory_region_constituent);
+
+ ffa_ret = ffa_mem_share(total_size, fragment_size);
+
+ if (!is_expected_ffa_return(ffa_ret, FFA_MEM_FRAG_RX)) {
+ ERROR("Expected %s after the memory share.\n",
+ ffa_func_name(FFA_MEM_FRAG_RX));
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ handle = ffa_frag_handle(ffa_ret);
+
+ if (handle == FFA_MEMORY_HANDLE_INVALID) {
+ ERROR("SPMC returned an invalid handle for the operation.\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Prepare the next fragment for the operation. */
+ remaining_constituent_count = ffa_memory_fragment_init(
+ mb.send, PAGE_SIZE, &constituents[1], 1, &fragment_size);
+
+ ffa_ret = ffa_mem_frag_tx(handle, fragment_size);
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /*
+ * Request the hypervisor retrieve request.
+ * Response should be fragmented.
+ */
+ ffa_hypervisor_retrieve_request_init(mb.send, handle);
+ ffa_ret = ffa_mem_retrieve_req(sizeof(struct ffa_memory_region),
+ sizeof(struct ffa_memory_region));
+
+ if (ffa_func_id(ffa_ret) != FFA_MEM_RETRIEVE_RESP) {
+ ERROR("%s: couldn't retrieve the memory page. Error: %d\n",
+ __func__, ffa_error_code(ffa_ret));
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ total_size = ffa_mem_retrieve_res_total_size(ffa_ret);
+ fragment_size = ffa_mem_retrieve_res_frag_size(ffa_ret);
+ fragment_offset = fragment_size;
+
+ ret_rmm = host_rmi_granule_delegate((u_register_t)mb.recv);
+
+ if (ret_rmm != 0UL) {
+ INFO("Delegate operation returns 0x%lx for address %p\n",
+ ret_rmm, mb.send);
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ ffa_ret = ffa_rx_release();
+ if (is_ffa_call_error(ffa_ret)) {
+ ERROR("ffa_rx_release() failed.\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Call FFA_MEM_FRAG_RX but expect it to abort. */
+ ffa_ret = ffa_mem_frag_rx(handle, fragment_offset);
+
+ if (!is_expected_ffa_error(ffa_ret, FFA_ERROR_ABORTED)) {
+ ERROR("Expected FFA_MEM_FRAG_RX to have failed with"
+ "FFA_ERROR_ABORTED.\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Undelegate to reestablish the same security state for PAS. */
+ ret_rmm = host_rmi_granule_undelegate((u_register_t)mb.recv);
+ if (ret_rmm != 0UL) {
+ ERROR("Undelegate operation returns 0x%lx for address %llx\n",
+ ret_rmm, (uint64_t)mb.send);
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Continue the hypervisor retrieve request. */
+ if (!hypervisor_retrieve_request_continue(
+ &mb, handle, NULL, 0, total_size, fragment_offset, false)) {
+ ERROR("Failed to continue hypervisor retrieve request after"
+ " restablishing PAS.\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ /* Reclaim memory to be able to reuse it. */
+ ffa_ret = ffa_mem_reclaim(handle, 0);
+
+ if (is_ffa_call_error(ffa_ret)) {
+ ERROR("Failed to reclaim memory to be used in next test\n");
+ ret = TEST_RESULT_FAIL;
+ goto exit;
+ }
+
+ ret = TEST_RESULT_SUCCESS;
+
+exit:
+ unregister_custom_sync_exception_handler();
+
+ return ret;
+}
diff --git a/tftf/tests/tests-memory-access.xml b/tftf/tests/tests-memory-access.xml
index 200c5dd..fbc0e2e 100644
--- a/tftf/tests/tests-memory-access.xml
+++ b/tftf/tests/tests-memory-access.xml
@@ -83,6 +83,10 @@
function="test_ffa_hypervisor_retrieve_request_fail_tx_realm" />
<testcase name="FF-A Memory Relinquish, NWd TX buffer is in realm PAS"
function="test_ffa_memory_relinquish_fail_tx_realm" />
+ <testcase name="FF-A Memory Frag Tx, NWd TX buffer is in realm PAS"
+ function="test_ffa_memory_share_fragmented_tx_realm" />
+ <testcase name="FF-A Memory Frag Rx, NWd RX buffer is in realm PAS"
+ function="test_ffa_memory_share_fragmented_rx_realm" />
</testsuite>
</testsuites>