feat(memory share): retrieve request from NWd

If the SPMC is allocator of a given memory handle, it is
expected to keep track of the on going memory region operations.
In the case a VM is retrieving a memory region that is shared with
an SP, it might attempt to retrieve the memory.
The hypervisor issues a retrieve req for the SPMC to pass it
the recorded memory region descriptor, such that it can
validate the retrieve request from the VM and then
update the VM's S2 memory mapping.

The SPMC doesn't expect to receive a request whose memory handle
was allocated by the hypervisor. Return FFA_INVALID_PARAMETERS
in such case.

Change-Id: Id86069cfb263c6a2efc4202ad9e3c5e53cdf1e65
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index a11fd1a..88d809e 100644
--- a/src/api.c
+++ b/src/api.c
@@ -3014,9 +3014,14 @@
 		goto out;
 	}
 
-	ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
-				  &api_page_pool);
-
+	if (plat_ffa_memory_handle_allocated_by_current_world(
+		    retrieve_request->handle)) {
+		ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
+					  &api_page_pool);
+	} else {
+		ret = plat_ffa_other_world_mem_retrieve(
+			to_locked, retrieve_request, length, &api_page_pool);
+	}
 out:
 	vm_unlock(&to_locked);
 	return ret;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index a07313c..144425d 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -541,3 +541,15 @@
 
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
+
+struct ffa_value plat_ffa_other_world_mem_retrieve(
+	struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
+	uint32_t length, struct mpool *page_pool)
+{
+	(void)to_locked;
+	(void)retrieve_request;
+	(void)length;
+	(void)page_pool;
+
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 13767f7..2ae4610 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -308,6 +308,8 @@
 bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
 				  struct ffa_value *ret)
 {
+	struct ffa_value other_world_ret;
+
 	if (!ffa_tee_enabled) {
 		return true;
 	}
@@ -316,10 +318,14 @@
 		return true;
 	}
 
-	*ret = arch_other_world_call((struct ffa_value){
+	other_world_ret = arch_other_world_call((struct ffa_value){
 		.func = FFA_RX_ACQUIRE_32, .arg1 = to_locked.vm->id});
 
-	return ret->func == FFA_SUCCESS_32;
+	if (ret != NULL) {
+		*ret = other_world_ret;
+	}
+
+	return other_world_ret.func == FFA_SUCCESS_32;
 }
 
 bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
@@ -1594,3 +1600,124 @@
 
 	return ret;
 }
+
+struct ffa_value plat_ffa_other_world_mem_retrieve(
+	struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
+	uint32_t length, struct mpool *page_pool)
+{
+	struct ffa_memory_region_constituent *constituents = NULL;
+	struct ffa_value ret;
+	struct ffa_memory_region *memory_region;
+	struct vm *from;
+	struct vm_locked from_locked;
+	struct ffa_composite_memory_region *composite;
+	uint32_t receiver_index;
+	uint32_t fragment_length;
+	uint32_t share_func;
+	ffa_memory_region_flags_t transaction_type;
+
+	/*
+	 * TODO: Is there a way to retrieve the sender's original attributes
+	 * if it is an SP? Such that a receiver VM does not get more privilege
+	 * than a sender SP.
+	 */
+	uint32_t memory_to_attributes = MM_MODE_R | MM_MODE_W | MM_MODE_X;
+
+	assert(length <= HF_MAILBOX_SIZE);
+
+	if (!ffa_tee_enabled) {
+		dlog_verbose("There isn't a TEE in the system.\n");
+		return ffa_error(FFA_INVALID_PARAMETERS);
+	}
+
+	from = vm_find(HF_TEE_VM_ID);
+	assert(from != NULL);
+	from_locked = vm_lock(from);
+
+	/* Copy retrieve request to the SPMC's RX buffer. */
+	memcpy_s(from->mailbox.recv, HF_MAILBOX_SIZE, retrieve_request, length);
+
+	ret = arch_other_world_call(
+		(struct ffa_value){.func = FFA_MEM_RETRIEVE_REQ_32,
+				   .arg1 = length,
+				   .arg2 = length});
+	if (ret.func == FFA_ERROR_32) {
+		dlog_verbose(
+			"Fail to forward FFA_MEM_RETRIEVE_REQ to the SPMC.\n");
+		goto out;
+	}
+
+	/* Check we received a valid memory retrieve response from the SPMC. */
+	CHECK(ret.func == FFA_MEM_RETRIEVE_RESP_32);
+
+	fragment_length = ret.arg2;
+	CHECK(fragment_length >=
+	      sizeof(struct ffa_memory_region) +
+		      sizeof(struct ffa_memory_access) +
+		      sizeof(struct ffa_composite_memory_region));
+	CHECK(fragment_length <= sizeof(other_world_retrieve_buffer));
+
+	/* Retrieve the retrieve response from SPMC. */
+	memcpy_s(other_world_retrieve_buffer,
+		 sizeof(other_world_retrieve_buffer),
+		 from_locked.vm->mailbox.send, fragment_length);
+
+	memory_region = (struct ffa_memory_region *)other_world_retrieve_buffer;
+
+	if (retrieve_request->sender != memory_region->sender) {
+		dlog_verbose(
+			"Retrieve request doesn't match the received memory "
+			"region from SPMC.\n");
+		ret = ffa_error(FFA_INVALID_PARAMETERS);
+		goto out;
+	}
+
+	receiver_index =
+		ffa_memory_region_get_receiver(memory_region, to_locked.vm->id);
+	CHECK(receiver_index == 0);
+
+	composite =
+		ffa_memory_region_get_composite(memory_region, receiver_index);
+	constituents = &composite->constituents[0];
+
+	/* Get the share func ID from the transaction type flag. */
+	transaction_type =
+		memory_region->flags & FFA_MEMORY_REGION_TRANSACTION_TYPE_MASK;
+	switch (transaction_type) {
+	case FFA_MEMORY_REGION_TRANSACTION_TYPE_SHARE:
+		share_func = FFA_MEM_SHARE_32;
+		break;
+	case FFA_MEMORY_REGION_TRANSACTION_TYPE_LEND:
+		share_func = FFA_MEM_LEND_32;
+		break;
+	case FFA_MEMORY_REGION_TRANSACTION_TYPE_DONATE:
+		share_func = FFA_MEM_DONATE_32;
+		break;
+	case FFA_MEMORY_REGION_TRANSACTION_TYPE_UNSPECIFIED:
+	default:
+		panic("Invalid transaction in memory region flag: %x\n",
+		      transaction_type);
+	}
+
+	CHECK(ffa_retrieve_check_update(
+		      to_locked, memory_region->sender, &constituents,
+		      &composite->constituent_count, 1, memory_to_attributes,
+		      share_func, false, page_pool)
+		      .func == FFA_SUCCESS_32);
+
+	/* Acquire RX buffer from the SPMC and copy the retrieve response. */
+	CHECK(plat_ffa_acquire_receiver_rx(to_locked, NULL));
+	memcpy_s(to_locked.vm->mailbox.recv, HF_MAILBOX_SIZE, memory_region,
+		 fragment_length);
+
+	to_locked.vm->mailbox.recv_size = length;
+	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
+	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
+	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
+
+out:
+	vm_unlock(&from_locked);
+
+	/* Return ret as received from the SPMC. */
+	return ret;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index 375f0a1..15b9cf4 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -2572,3 +2572,15 @@
 
 	plat_ffa_enable_virtual_maintenance_interrupts(current_locked);
 }
+
+struct ffa_value plat_ffa_other_world_mem_retrieve(
+	struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
+	uint32_t length, struct mpool *page_pool)
+{
+	(void)to_locked;
+	(void)retrieve_request;
+	(void)length;
+	(void)page_pool;
+
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index fcb4bf7..e6045be 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -516,3 +516,15 @@
 
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
+
+struct ffa_value plat_ffa_other_world_mem_retrieve(
+	struct vm_locked to_locked, struct ffa_memory_region *retrieve_request,
+	uint32_t length, struct mpool *page_pool)
+{
+	(void)to_locked;
+	(void)retrieve_request;
+	(void)length;
+	(void)page_pool;
+
+	return ffa_error(FFA_INVALID_PARAMETERS);
+}
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 93dcce6..4e0f684 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -918,7 +918,7 @@
  *     the request.
  *  Success is indicated by FFA_SUCCESS.
  */
-static struct ffa_value ffa_retrieve_check_update(
+struct ffa_value ffa_retrieve_check_update(
 	struct vm_locked to_locked, ffa_vm_id_t from_id,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
@@ -936,6 +936,7 @@
 	 */
 	for (i = 0; i < fragment_count; ++i) {
 		if (!is_aligned(fragments[i], 8)) {
+			dlog_verbose("Fragment not properly aligned.\n");
 			return ffa_error(FFA_INVALID_PARAMETERS);
 		}
 	}
@@ -981,6 +982,7 @@
 	    !ffa_clear_memory_constituents(
 		    plat_ffa_owner_world_mode(from_id), fragments,
 		    fragment_constituent_counts, fragment_count, page_pool)) {
+		dlog_verbose("Couldn't clear constituents.\n");
 		ret = ffa_error(FFA_NO_MEMORY);
 		goto out;
 	}
@@ -1860,8 +1862,8 @@
  * returns its index in the receiver's array. If receiver's ID doesn't exist
  * in the array, return the region's 'receiver_count'.
  */
-static uint32_t ffa_memory_region_get_receiver(
-	struct ffa_memory_region *memory_region, ffa_vm_id_t receiver)
+uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
+					ffa_vm_id_t receiver)
 {
 	struct ffa_memory_access *receivers;
 	uint32_t i;
@@ -2292,6 +2294,43 @@
 						     to_locked)) {
 		uint32_t receiver_index;
 
+		/*
+		 * The SPMC can only process retrieve requests to memory share
+		 * operations with one borrower from the other world. It can't
+		 * determine the ID of the NWd VM that invoked the retrieve
+		 * request interface call. It relies on the hypervisor to
+		 * validate the caller's ID against that provided in the
+		 * `receivers` list of the retrieve response.
+		 * In case there is only one borrower from the NWd in the
+		 * transaction descriptor, record that in the `receiver_id` for
+		 * later use, and validate in the retrieve request message.
+		 */
+		if (to_locked.vm->id == HF_HYPERVISOR_VM_ID) {
+			uint32_t other_world_count = 0;
+
+			for (uint32_t i = 0; i < memory_region->receiver_count;
+			     i++) {
+				receiver_id =
+					retrieve_request->receivers[0]
+						.receiver_permissions.receiver;
+				if (!vm_id_is_current_world(receiver_id)) {
+					other_world_count++;
+				}
+			}
+			if (other_world_count > 1) {
+				dlog_verbose(
+					"Support one receiver from the other "
+					"world.\n");
+				return ffa_error(FFA_NOT_SUPPORTED);
+			}
+		}
+
+		/*
+		 * Validate retrieve request, according to what was sent by the
+		 * sender. Function will output the `receiver_index` from the
+		 * provided memory region, and will output `permissions` from
+		 * the validated requested permissions.
+		 */
 		ret = ffa_memory_retrieve_validate(
 			receiver_id, retrieve_request, memory_region,
 			&receiver_index, share_state->share_func);
@@ -2335,8 +2374,7 @@
 	} else {
 		if (share_state->hypervisor_fragment_count != 0U) {
 			dlog_verbose(
-				"Memory with handle %#x already "
-				"retrieved by "
+				"Memory with handle %#x already retrieved by "
 				"the hypervisor.\n",
 				handle);
 			ret = ffa_error(FFA_DENIED);
@@ -2348,6 +2386,9 @@
 		ffa_memory_retrieve_complete_from_hyp(share_state);
 	}
 
+	/* VMs acquire the RX buffer from SPMC. */
+	CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
+
 	/*
 	 * Copy response to RX buffer of caller and deliver the message.
 	 * This must be done before the share_state is (possibly) freed.
@@ -2369,6 +2410,7 @@
 		share_state->fragments[0],
 		share_state->fragment_constituent_counts[0], &total_length,
 		&fragment_length));
+
 	to_locked.vm->mailbox.recv_size = fragment_length;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
 	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
@@ -2633,14 +2675,15 @@
 	dump_share_states();
 
 	share_states = share_states_lock();
-	if (!get_share_state(share_states, handle, &share_state)) {
+	if (get_share_state(share_states, handle, &share_state)) {
+		memory_region = share_state->memory_region;
+	} else {
 		dlog_verbose("Invalid handle %#x for FFA_MEM_RECLAIM.\n",
 			     handle);
 		ret = ffa_error(FFA_INVALID_PARAMETERS);
 		goto out;
 	}
 
-	memory_region = share_state->memory_region;
 	CHECK(memory_region != NULL);
 
 	if (vm_id_is_current_world(to_locked.vm->id) &&