refactor(memory share): reclaim from other world

The memory reclaim has been refactored to better differentiate
SPMC's and Hypervisor's implementation.
The functions that operate a memory reclaim targetting the other
world, are moved to the plat/ffa module:

- The SPMC returns ffa_error(FFA_INVALID_PARAMETER), as it considers
to have received an invalid memory handle.

- The Hypervisor delegates the tracking of the memory region to
the SPMC if there is a SP involved, either as the Sender or as
a receiver. As such, all functions doing memory reclaim with
the other world (i.e. the secure world) that were defined in
`ffa_memory.c` have been moved to `plat/ffa/hypervisor.c`.
Few functions from ffa_memory.c were made public to be
invoked in 'plat/ffa/hypervisor.c'.

Change-Id: Idff0dae89ca8b6c99d33632bb4401decc368b7e5
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index bec4314..224f61a 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -322,3 +322,11 @@
 	struct vm *from, uint32_t share_func,
 	struct ffa_memory_region **memory_region, uint32_t length,
 	uint32_t fragment_length, struct mpool *page_pool);
+
+/**
+ * Handles the memory reclaim if a memory handle from the other world is
+ * provided.
+ */
+struct ffa_value plat_ffa_other_world_mem_reclaim(
+	struct vm *to, ffa_memory_handle_t handle,
+	ffa_memory_region_flags_t flags, struct mpool *page_pool);
diff --git a/inc/hf/ffa_memory.h b/inc/hf/ffa_memory.h
index 7d0680a..1fccfbf 100644
--- a/inc/hf/ffa_memory.h
+++ b/inc/hf/ffa_memory.h
@@ -42,8 +42,3 @@
 				    ffa_memory_handle_t handle,
 				    ffa_memory_region_flags_t flags,
 				    struct mpool *page_pool);
-struct ffa_value ffa_memory_other_world_reclaim(struct vm_locked to_locked,
-						struct vm_locked from_locked,
-						ffa_memory_handle_t handle,
-						ffa_memory_region_flags_t flags,
-						struct mpool *page_pool);
diff --git a/inc/hf/ffa_memory_internal.h b/inc/hf/ffa_memory_internal.h
index 08f7820..2da68f1 100644
--- a/inc/hf/ffa_memory_internal.h
+++ b/inc/hf/ffa_memory_internal.h
@@ -141,3 +141,8 @@
 	struct ffa_memory_region_constituent **fragments,
 	const uint32_t *fragment_constituent_counts, uint32_t fragment_count,
 	uint32_t mode, struct mpool *ppool, bool commit);
+struct ffa_value ffa_retrieve_check_transition(
+	struct vm_locked to, uint32_t share_func,
+	struct ffa_memory_region_constituent **fragments,
+	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
+	uint32_t memory_to_attributes, uint32_t *to_mode);
diff --git a/src/api.c b/src/api.c
index 8421449..cb6f779 100644
--- a/src/api.c
+++ b/src/api.c
@@ -3071,15 +3071,8 @@
 
 		vm_unlock(&to_locked);
 	} else {
-		struct vm *from = vm_find(HF_TEE_VM_ID);
-		struct two_vm_locked vm_to_from_lock = vm_lock_both(to, from);
-
-		ret = ffa_memory_other_world_reclaim(
-			vm_to_from_lock.vm1, vm_to_from_lock.vm2, handle, flags,
-			&api_page_pool);
-
-		vm_unlock(&vm_to_from_lock.vm1);
-		vm_unlock(&vm_to_from_lock.vm2);
+		ret = plat_ffa_other_world_mem_reclaim(to, handle, flags,
+						       &api_page_pool);
 	}
 
 	return ret;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 7c20ca7..244da47 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -511,3 +511,15 @@
 
 	return (struct ffa_value){.func = FFA_ERROR_32};
 }
+
+struct ffa_value plat_ffa_other_world_mem_reclaim(
+	struct vm *to, ffa_memory_handle_t handle,
+	ffa_memory_region_flags_t flags, struct mpool *page_pool)
+{
+	(void)handle;
+	(void)flags;
+	(void)page_pool;
+	(void)to;
+
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index c41f7c9..e8a9fe6 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -30,6 +30,14 @@
 alignas(FFA_PAGE_SIZE) static uint8_t other_world_send_buffer[HF_MAILBOX_SIZE];
 alignas(FFA_PAGE_SIZE) static uint8_t other_world_recv_buffer[HF_MAILBOX_SIZE];
 
+/**
+ * Buffer for retrieving memory region information from the other world for when
+ * a region is reclaimed by a VM. Access to this buffer must be guarded by the
+ * VM lock of the other world VM.
+ */
+alignas(PAGE_SIZE) static uint8_t
+	other_world_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
+
 /** Returns information on features specific to the NWd. */
 struct ffa_value plat_ffa_features(uint32_t function_feature_id)
 {
@@ -1233,3 +1241,297 @@
 
 	return ret;
 }
+
+/**
+ * Reclaims the given memory from the other world. To do this space is first
+ * reserved in the <to> VM's page table, then the reclaim request is sent on to
+ * the other world. then (if that is successful) the memory is mapped back into
+ * the <to> VM's page table.
+ *
+ * This function requires the calling context to hold the <to> lock.
+ *
+ * Returns:
+ *  In case of error, one of the following values is returned:
+ *   1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
+ *     erroneous;
+ *   2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
+ *     the request.
+ *  Success is indicated by FFA_SUCCESS.
+ */
+static struct ffa_value ffa_other_world_reclaim_check_update(
+	struct vm_locked to_locked, ffa_memory_handle_t handle,
+	struct ffa_memory_region_constituent *constituents,
+	uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
+	struct mpool *page_pool)
+{
+	uint32_t to_mode;
+	struct mpool local_page_pool;
+	struct ffa_value ret;
+	ffa_memory_region_flags_t other_world_flags;
+
+	/*
+	 * Make sure constituents are properly aligned to a 64-bit boundary. If
+	 * not we would get alignment faults trying to read (64-bit) values.
+	 */
+	if (!is_aligned(constituents, 8)) {
+		dlog_verbose("Constituents not aligned.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * Check if the state transition is lawful for the recipient, and ensure
+	 * that all constituents of the memory region being retrieved are at the
+	 * same state.
+	 */
+	ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
+					    &constituents, &constituent_count,
+					    1, memory_to_attributes, &to_mode);
+	if (ret.func != FFA_SUCCESS_32) {
+		dlog_verbose("Invalid transition.\n");
+		return ret;
+	}
+
+	/*
+	 * Create a local pool so any freed memory can't be used by another
+	 * thread. This is to ensure the original mapping can be restored if the
+	 * clear fails.
+	 */
+	mpool_init_with_fallback(&local_page_pool, page_pool);
+
+	/*
+	 * First reserve all required memory for the new page table entries in
+	 * the recipient page tables without committing, to make sure the entire
+	 * operation will succeed without exhausting the page pool.
+	 */
+	if (!ffa_region_group_identity_map(to_locked, &constituents,
+					   &constituent_count, 1, to_mode,
+					   page_pool, false)) {
+		/* TODO: partial defrag of failed range. */
+		dlog_verbose(
+			"Insufficient memory to update recipient page "
+			"table.\n");
+		ret = ffa_error(FFA_NO_MEMORY);
+		goto out;
+	}
+
+	/*
+	 * Forward the request to the other world and see what happens.
+	 */
+	other_world_flags = 0;
+	if (clear) {
+		other_world_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
+	}
+	ret = arch_other_world_call(
+		(struct ffa_value){.func = FFA_MEM_RECLAIM_32,
+				   .arg1 = (uint32_t)handle,
+				   .arg2 = (uint32_t)(handle >> 32),
+				   .arg3 = other_world_flags});
+
+	if (ret.func != FFA_SUCCESS_32) {
+		dlog_verbose(
+			"Got %#x (%d) from other world in response to "
+			"FFA_MEM_RECLAIM, "
+			"expected FFA_SUCCESS.\n",
+			ret.func, ret.arg2);
+		goto out;
+	}
+
+	/*
+	 * The other world was happy with it, so complete the reclaim by mapping
+	 * the memory into the recipient. This won't allocate because the
+	 * transaction was already prepared above, so it doesn't need to use the
+	 * `local_page_pool`.
+	 */
+	CHECK(ffa_region_group_identity_map(to_locked, &constituents,
+					    &constituent_count, 1, to_mode,
+					    page_pool, true));
+
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+
+out:
+	mpool_fini(&local_page_pool);
+
+	/*
+	 * Tidy up the page table by reclaiming failed mappings (if there was an
+	 * error) or merging entries into blocks where possible (on success).
+	 */
+	vm_ptable_defrag(to_locked, page_pool);
+
+	return ret;
+}
+
+/**
+ * Validates that the reclaim transition is allowed for the memory region with
+ * the given handle which was previously shared with the other world. tells the
+ * other world to mark it as reclaimed, and updates the page table of the
+ * reclaiming VM.
+ *
+ * To do this information about the memory region is first fetched from the
+ * other world.
+ */
+static struct ffa_value ffa_memory_other_world_reclaim(
+	struct vm_locked to_locked, struct vm_locked from_locked,
+	ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
+	struct mpool *page_pool)
+{
+	uint32_t request_length = ffa_memory_lender_retrieve_request_init(
+		from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
+	struct ffa_value other_world_ret;
+	uint32_t length;
+	uint32_t fragment_length;
+	uint32_t fragment_offset;
+	struct ffa_memory_region *memory_region;
+	struct ffa_composite_memory_region *composite;
+	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+
+	CHECK(request_length <= HF_MAILBOX_SIZE);
+	CHECK(from_locked.vm->id == HF_OTHER_WORLD_ID);
+
+	/* Retrieve memory region information from the other world. */
+	other_world_ret = arch_other_world_call(
+		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
+				   .arg1 = request_length,
+				   .arg2 = request_length});
+	if (other_world_ret.func == FFA_ERROR_32) {
+		dlog_verbose("Got error %d from EL3.\n", other_world_ret.arg2);
+		return other_world_ret;
+	}
+	if (other_world_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
+		dlog_verbose(
+			"Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
+			other_world_ret.func);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	length = other_world_ret.arg1;
+	fragment_length = other_world_ret.arg2;
+
+	if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
+	    length > sizeof(other_world_retrieve_buffer)) {
+		dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
+			     fragment_length, length, HF_MAILBOX_SIZE,
+			     sizeof(other_world_retrieve_buffer));
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/*
+	 * Copy the first fragment of the memory region descriptor to an
+	 * internal buffer.
+	 */
+	memcpy_s(other_world_retrieve_buffer,
+		 sizeof(other_world_retrieve_buffer),
+		 from_locked.vm->mailbox.send, fragment_length);
+
+	/* Fetch the remaining fragments into the same buffer. */
+	fragment_offset = fragment_length;
+	while (fragment_offset < length) {
+		other_world_ret = arch_other_world_call(
+			(struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
+					   .arg1 = (uint32_t)handle,
+					   .arg2 = (uint32_t)(handle >> 32),
+					   .arg3 = fragment_offset});
+		if (other_world_ret.func != FFA_MEM_FRAG_TX_32) {
+			dlog_verbose(
+				"Got %#x (%d) from other world in response to "
+				"FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
+				other_world_ret.func, other_world_ret.arg2);
+			return other_world_ret;
+		}
+		if (ffa_frag_handle(other_world_ret) != handle) {
+			dlog_verbose(
+				"Got FFA_MEM_FRAG_TX for unexpected handle %#x "
+				"in response to FFA_MEM_FRAG_RX for handle "
+				"%#x.\n",
+				ffa_frag_handle(other_world_ret), handle);
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		if (ffa_frag_sender(other_world_ret) != 0) {
+			dlog_verbose(
+				"Got FFA_MEM_FRAG_TX with unexpected sender %d "
+				"(expected 0).\n",
+				ffa_frag_sender(other_world_ret));
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		fragment_length = other_world_ret.arg3;
+		if (fragment_length > HF_MAILBOX_SIZE ||
+		    fragment_offset + fragment_length > length) {
+			dlog_verbose(
+				"Invalid fragment length %d at offset %d (max "
+				"%d).\n",
+				fragment_length, fragment_offset,
+				HF_MAILBOX_SIZE);
+			return ffa_error(FFA_INVALID_PARAMETERS);
+		}
+		memcpy_s(other_world_retrieve_buffer + fragment_offset,
+			 sizeof(other_world_retrieve_buffer) - fragment_offset,
+			 from_locked.vm->mailbox.send, fragment_length);
+
+		fragment_offset += fragment_length;
+	}
+
+	memory_region = (struct ffa_memory_region *)other_world_retrieve_buffer;
+
+	if (memory_region->receiver_count != 1) {
+		/* Only one receiver supported by Hafnium for now. */
+		dlog_verbose(
+			"Multiple recipients not supported (got %d, expected "
+			"1).\n",
+			memory_region->receiver_count);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	if (memory_region->handle != handle) {
+		dlog_verbose(
+			"Got memory region handle %#x from other world but "
+			"requested handle %#x.\n",
+			memory_region->handle, handle);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	/* The original sender must match the caller. */
+	if (to_locked.vm->id != memory_region->sender) {
+		dlog_verbose(
+			"VM %#x attempted to reclaim memory handle %#x "
+			"originally sent by VM %#x.\n",
+			to_locked.vm->id, handle, memory_region->sender);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	composite = ffa_memory_region_get_composite(memory_region, 0);
+
+	/*
+	 * Validate that the reclaim transition is allowed for the given memory
+	 * region, forward the request to the other world and then map the
+	 * memory back into the caller's stage-2 page table.
+	 */
+	return ffa_other_world_reclaim_check_update(
+		to_locked, handle, composite->constituents,
+		composite->constituent_count, memory_to_attributes,
+		flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
+}
+
+struct ffa_value plat_ffa_other_world_mem_reclaim(
+	struct vm *to, ffa_memory_handle_t handle,
+	ffa_memory_region_flags_t flags, struct mpool *page_pool)
+{
+	struct ffa_value ret;
+	struct vm *from = vm_find(HF_TEE_VM_ID);
+	struct two_vm_locked vm_to_from_lock;
+
+	if (!ffa_tee_enabled) {
+		dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
+			     handle);
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	vm_to_from_lock = vm_lock_both(to, from);
+
+	ret = ffa_memory_other_world_reclaim(vm_to_from_lock.vm1,
+					     vm_to_from_lock.vm2, handle, flags,
+					     page_pool);
+
+	vm_unlock(&vm_to_from_lock.vm1);
+	vm_unlock(&vm_to_from_lock.vm2);
+
+	return ret;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index ef3da24..18bb84b 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -2384,3 +2384,20 @@
 
 	return ret;
 }
+
+/*
+ * SPMC handles its memory share requests internally, so no forwarding of the
+ * request is required.
+ */
+struct ffa_value plat_ffa_other_world_mem_reclaim(
+	struct vm *to, ffa_memory_handle_t handle,
+	ffa_memory_region_flags_t flags, struct mpool *page_pool)
+{
+	(void)handle;
+	(void)flags;
+	(void)page_pool;
+	(void)to;
+
+	dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n", handle);
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 2d2b89a..140b0e4 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -486,3 +486,15 @@
 
 	return (struct ffa_value){0};
 }
+
+struct ffa_value plat_ffa_other_world_mem_reclaim(
+	struct vm *to, ffa_memory_handle_t handle,
+	ffa_memory_region_flags_t flags, struct mpool *page_pool)
+{
+	(void)handle;
+	(void)flags;
+	(void)page_pool;
+	(void)to;
+
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 7df1491..fd3030b 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -30,14 +30,6 @@
 static struct ffa_memory_share_state share_states[MAX_MEM_SHARES];
 
 /**
- * Buffer for retrieving memory region information from the other world for when
- * a region is reclaimed by a VM. Access to this buffer must be guarded by the
- * VM lock of the other world VM.
- */
-alignas(PAGE_SIZE) static uint8_t
-	other_world_retrieve_buffer[HF_MAILBOX_SIZE * MAX_FRAGMENTS];
-
-/**
  * Extracts the index from a memory handle allocated by Hafnium's current world.
  */
 uint64_t ffa_memory_handle_get_index(ffa_memory_handle_t handle)
@@ -569,7 +561,7 @@
  *   4) FFA_INVALID_PARAMETERS if the requested share type was not handled.
  *  Or FFA_SUCCESS on success.
  */
-static struct ffa_value ffa_retrieve_check_transition(
+struct ffa_value ffa_retrieve_check_transition(
 	struct vm_locked to, uint32_t share_func,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
@@ -1016,126 +1008,6 @@
 	return ret;
 }
 
-/**
- * Reclaims the given memory from the other world. To do this space is first
- * reserved in the <to> VM's page table, then the reclaim request is sent on to
- * the other world. then (if that is successful) the memory is mapped back into
- * the <to> VM's page table.
- *
- * This function requires the calling context to hold the <to> lock.
- *
- * Returns:
- *  In case of error, one of the following values is returned:
- *   1) FFA_INVALID_PARAMETERS - The endpoint provided parameters were
- *     erroneous;
- *   2) FFA_NO_MEMORY - Hafnium did not have sufficient memory to complete
- *     the request.
- *  Success is indicated by FFA_SUCCESS.
- */
-static struct ffa_value ffa_other_world_reclaim_check_update(
-	struct vm_locked to_locked, ffa_memory_handle_t handle,
-	struct ffa_memory_region_constituent *constituents,
-	uint32_t constituent_count, uint32_t memory_to_attributes, bool clear,
-	struct mpool *page_pool)
-{
-	uint32_t to_mode;
-	struct mpool local_page_pool;
-	struct ffa_value ret;
-	ffa_memory_region_flags_t other_world_flags;
-
-	/*
-	 * Make sure constituents are properly aligned to a 64-bit boundary. If
-	 * not we would get alignment faults trying to read (64-bit) values.
-	 */
-	if (!is_aligned(constituents, 8)) {
-		dlog_verbose("Constituents not aligned.\n");
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	/*
-	 * Check if the state transition is lawful for the recipient, and ensure
-	 * that all constituents of the memory region being retrieved are at the
-	 * same state.
-	 */
-	ret = ffa_retrieve_check_transition(to_locked, FFA_MEM_RECLAIM_32,
-					    &constituents, &constituent_count,
-					    1, memory_to_attributes, &to_mode);
-	if (ret.func != FFA_SUCCESS_32) {
-		dlog_verbose("Invalid transition.\n");
-		return ret;
-	}
-
-	/*
-	 * Create a local pool so any freed memory can't be used by another
-	 * thread. This is to ensure the original mapping can be restored if the
-	 * clear fails.
-	 */
-	mpool_init_with_fallback(&local_page_pool, page_pool);
-
-	/*
-	 * First reserve all required memory for the new page table entries in
-	 * the recipient page tables without committing, to make sure the entire
-	 * operation will succeed without exhausting the page pool.
-	 */
-	if (!ffa_region_group_identity_map(to_locked, &constituents,
-					   &constituent_count, 1, to_mode,
-					   page_pool, false)) {
-		/* TODO: partial defrag of failed range. */
-		dlog_verbose(
-			"Insufficient memory to update recipient page "
-			"table.\n");
-		ret = ffa_error(FFA_NO_MEMORY);
-		goto out;
-	}
-
-	/*
-	 * Forward the request to the other world, check if SPMC returned
-	 * FFA_SUCCESS_32. If not, terminate and return the error to caller
-	 * VM.
-	 */
-	other_world_flags = 0;
-	if (clear) {
-		other_world_flags |= FFA_MEMORY_REGION_FLAG_CLEAR;
-	}
-	ret = arch_other_world_call(
-		(struct ffa_value){.func = FFA_MEM_RECLAIM_32,
-				   .arg1 = (uint32_t)handle,
-				   .arg2 = (uint32_t)(handle >> 32),
-				   .arg3 = other_world_flags});
-
-	if (ret.func != FFA_SUCCESS_32) {
-		dlog_verbose(
-			"Got %#x (%d) from other world in response to "
-			"FFA_MEM_RECLAIM, "
-			"expected FFA_SUCCESS.\n",
-			ret.func, ret.arg2);
-		goto out;
-	}
-
-	/*
-	 * The other world was happy with it, so complete the reclaim by mapping
-	 * the memory into the recipient. This won't allocate because the
-	 * transaction was already prepared above, so it doesn't need to use the
-	 * `local_page_pool`.
-	 */
-	CHECK(ffa_region_group_identity_map(to_locked, &constituents,
-					    &constituent_count, 1, to_mode,
-					    page_pool, true));
-
-	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
-
-out:
-	mpool_fini(&local_page_pool);
-
-	/*
-	 * Tidy up the page table by reclaiming failed mappings (if there was an
-	 * error) or merging entries into blocks where possible (on success).
-	 */
-	vm_ptable_defrag(to_locked, page_pool);
-
-	return ret;
-}
-
 static struct ffa_value ffa_relinquish_check_update(
 	struct vm_locked from_locked,
 	struct ffa_memory_region_constituent **fragments,
@@ -2820,155 +2692,3 @@
 	share_states_unlock(&share_states);
 	return ret;
 }
-
-/**
- * Validates that the reclaim transition is allowed for the memory
- * region with the given handle which was previously shared with the
- * other world. Tells the other world to mark it as reclaimed, and
- * updates the page table of the reclaiming VM.
- *
- * To do this information about the memory region is first fetched from
- * the other world.
- */
-struct ffa_value ffa_memory_other_world_reclaim(struct vm_locked to_locked,
-						struct vm_locked from_locked,
-						ffa_memory_handle_t handle,
-						ffa_memory_region_flags_t flags,
-						struct mpool *page_pool)
-{
-	uint32_t request_length = ffa_memory_lender_retrieve_request_init(
-		from_locked.vm->mailbox.recv, handle, to_locked.vm->id);
-	struct ffa_value other_world_ret;
-	uint32_t length;
-	uint32_t fragment_length;
-	uint32_t fragment_offset;
-	struct ffa_memory_region *memory_region;
-	struct ffa_composite_memory_region *composite;
-	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
-
-	CHECK(request_length <= HF_MAILBOX_SIZE);
-	CHECK(from_locked.vm->id == HF_OTHER_WORLD_ID);
-
-	/* Retrieve memory region information from the other world. */
-	other_world_ret = arch_other_world_call(
-		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
-				   .arg1 = request_length,
-				   .arg2 = request_length});
-	if (other_world_ret.func == FFA_ERROR_32) {
-		dlog_verbose("Got error %d from EL3.\n", other_world_ret.arg2);
-		return other_world_ret;
-	}
-	if (other_world_ret.func != FFA_MEM_RETRIEVE_RESP_32) {
-		dlog_verbose(
-			"Got %#x from EL3, expected FFA_MEM_RETRIEVE_RESP.\n",
-			other_world_ret.func);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	length = other_world_ret.arg1;
-	fragment_length = other_world_ret.arg2;
-
-	if (fragment_length > HF_MAILBOX_SIZE || fragment_length > length ||
-	    length > sizeof(other_world_retrieve_buffer)) {
-		dlog_verbose("Invalid fragment length %d/%d (max %d/%d).\n",
-			     fragment_length, length, HF_MAILBOX_SIZE,
-			     sizeof(other_world_retrieve_buffer));
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	/*
-	 * Copy the first fragment of the memory region descriptor to an
-	 * internal buffer.
-	 */
-	memcpy_s(other_world_retrieve_buffer,
-		 sizeof(other_world_retrieve_buffer),
-		 from_locked.vm->mailbox.send, fragment_length);
-
-	/* Fetch the remaining fragments into the same buffer. */
-	fragment_offset = fragment_length;
-	while (fragment_offset < length) {
-		other_world_ret = arch_other_world_call(
-			(struct ffa_value){.func = FFA_MEM_FRAG_RX_32,
-					   .arg1 = (uint32_t)handle,
-					   .arg2 = (uint32_t)(handle >> 32),
-					   .arg3 = fragment_offset});
-		if (other_world_ret.func != FFA_MEM_FRAG_TX_32) {
-			dlog_verbose(
-				"Got %#x (%d) from other world in response to "
-				"FFA_MEM_FRAG_RX, expected FFA_MEM_FRAG_TX.\n",
-				other_world_ret.func, other_world_ret.arg2);
-			return other_world_ret;
-		}
-		if (ffa_frag_handle(other_world_ret) != handle) {
-			dlog_verbose(
-				"Got FFA_MEM_FRAG_TX for unexpected handle %#x "
-				"in response to FFA_MEM_FRAG_RX for handle "
-				"%#x.\n",
-				ffa_frag_handle(other_world_ret), handle);
-			return ffa_error(FFA_INVALID_PARAMETERS);
-		}
-		if (ffa_frag_sender(other_world_ret) != 0) {
-			dlog_verbose(
-				"Got FFA_MEM_FRAG_TX with unexpected sender %d "
-				"(expected 0).\n",
-				ffa_frag_sender(other_world_ret));
-			return ffa_error(FFA_INVALID_PARAMETERS);
-		}
-		fragment_length = other_world_ret.arg3;
-		if (fragment_length > HF_MAILBOX_SIZE ||
-		    fragment_offset + fragment_length > length) {
-			dlog_verbose(
-				"Invalid fragment length %d at offset %d (max "
-				"%d).\n",
-				fragment_length, fragment_offset,
-				HF_MAILBOX_SIZE);
-			return ffa_error(FFA_INVALID_PARAMETERS);
-		}
-		memcpy_s(other_world_retrieve_buffer + fragment_offset,
-			 sizeof(other_world_retrieve_buffer) - fragment_offset,
-			 from_locked.vm->mailbox.send, fragment_length);
-
-		fragment_offset += fragment_length;
-	}
-
-	memory_region = (struct ffa_memory_region *)other_world_retrieve_buffer;
-
-	if (memory_region->receiver_count != 1) {
-		/* Only one receiver supported by Hafnium for now. */
-		dlog_verbose(
-			"Multiple recipients not supported (got %d, "
-			"expected 1).\n",
-			memory_region->receiver_count);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	if (memory_region->handle != handle) {
-		dlog_verbose(
-			"Got memory region handle %#x from other world "
-			"but requested handle %#x.\n",
-			memory_region->handle, handle);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	/* The original sender must match the caller. */
-	if (to_locked.vm->id != memory_region->sender) {
-		dlog_verbose(
-			"VM %#x attempted to reclaim memory handle %#x "
-			"originally sent by VM %#x.\n",
-			to_locked.vm->id, handle, memory_region->sender);
-		return ffa_error(FFA_INVALID_PARAMETERS);
-	}
-
-	composite = ffa_memory_region_get_composite(memory_region, 0);
-
-	/*
-	 * Validate that the reclaim transition is allowed for the given
-	 * memory region, forward the request to the other world and
-	 * then map the memory back into the caller's stage-2 page
-	 * table.
-	 */
-	return ffa_other_world_reclaim_check_update(
-		to_locked, handle, composite->constituents,
-		composite->constituent_count, memory_to_attributes,
-		flags & FFA_MEM_RECLAIM_CLEAR, page_pool);
-}