refactor(indirect message): mailbox state as per FF-A v1.1

The maibox_state structure was refactored to represent
the mailbox state, and achieve ownership rules as defined by
the FF-A v1.1 specification.
The code was changed to reflect the new state transitions.

Kept compatibility with FF-A v1.0 indirect messaging to avoid
breaking legacy tests.

Change-Id: I1f2353f97d5d46436a3f81b5abf4a032f43745e8
Signed-off-by: J-Alves <joao.alves@arm.com>
diff --git a/src/api.c b/src/api.c
index 9ceb743..3156fe7 100644
--- a/src/api.c
+++ b/src/api.c
@@ -362,6 +362,12 @@
 	uint32_t buffer_size;
 	struct ffa_value ret;
 
+	/* Acquire receiver's RX buffer. */
+	if (!plat_ffa_acquire_receiver_rx(vm_locked, &ret)) {
+		dlog_verbose("Failed to acquire RX buffer for VM %x\n", vm->id);
+		return ret;
+	}
+
 	if (vm_is_mailbox_busy(vm_locked)) {
 		/*
 		 * Can't retrieve memory information if the mailbox is not
@@ -371,12 +377,6 @@
 		return ffa_error(FFA_BUSY);
 	}
 
-	/* Acquire receiver's RX buffer. */
-	if (!plat_ffa_acquire_receiver_rx(vm_locked, &ret)) {
-		dlog_verbose("Failed to acquire RX buffer for VM %x\n", vm->id);
-		return ret;
-	}
-
 	if (version == MAKE_FFA_VERSION(1, 0)) {
 		struct ffa_partition_info_v1_0 *recv_mailbox = vm->mailbox.recv;
 
@@ -425,7 +425,7 @@
 	/* Sender is Hypervisor in the normal world (TEE in secure world). */
 	vm->mailbox.recv_sender = HF_VM_ID_BASE;
 	vm->mailbox.recv_func = FFA_PARTITION_INFO_GET_32;
-	vm->mailbox.state = MAILBOX_STATE_READ;
+	vm->mailbox.state = MAILBOX_STATE_FULL;
 
 	/*
 	 * Return the count of partition information descriptors in w2
@@ -863,10 +863,65 @@
 	}
 }
 
+/**
+ * Change the state of mailbox to empty, such that the ownership is given to the
+ * Partition manager.
+ * Returns true if the mailbox was reset successfully, false otherwise.
+ */
+static bool api_release_mailbox(struct vm_locked vm_locked, int32_t *error_code)
+{
+	ffa_vm_id_t vm_id = vm_locked.vm->id;
+	int32_t error_code_to_ret = 0;
+	bool ret = false;
+
+	switch (vm_locked.vm->mailbox.state) {
+	case MAILBOX_STATE_EMPTY:
+		dlog_verbose("Mailbox of %x is empty.\n", vm_id);
+		error_code_to_ret = FFA_DENIED;
+		goto out;
+	case MAILBOX_STATE_FULL:
+		/* Check it doesn't have pending RX full notifications. */
+		if (vm_are_fwk_notifications_pending(vm_locked)) {
+			dlog_verbose(
+				"Mailbox of endpoint %x has pending "
+				"messages.\n",
+				vm_id);
+			error_code_to_ret = FFA_DENIED;
+			goto out;
+		}
+		break;
+	case MAILBOX_STATE_OTHER_WORLD_OWNED:
+		/*
+		 * The SPMC shouldn't let SP's mailbox get into this state.
+		 * For the Hypervisor, the VM may call FFA_RX_RELEASE, whilst
+		 * the mailbox is in this state. In that case, we should report
+		 * error.
+		 */
+		if (vm_id_is_current_world(vm_id)) {
+			dlog_verbose(
+				"Mailbox of endpoint %x in a wrongful state.\n",
+				vm_id);
+			error_code_to_ret = FFA_ABORTED;
+			goto out;
+		}
+		break;
+	}
+
+	vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
+	ret = true;
+out:
+	if (error_code != NULL) {
+		*error_code = error_code_to_ret;
+	}
+
+	return ret;
+}
+
 struct ffa_value api_ffa_msg_wait(struct vcpu *current, struct vcpu **next,
 				  struct ffa_value *args)
 {
 	enum vcpu_state next_state = VCPU_STATE_WAITING;
+	struct ffa_value ret;
 
 	if (args->arg1 != 0U || args->arg2 != 0U || args->arg3 != 0U ||
 	    args->arg4 != 0U || args->arg5 != 0U || args->arg6 != 0U ||
@@ -882,7 +937,16 @@
 
 	assert(next_state == VCPU_STATE_WAITING);
 
-	return plat_ffa_msg_wait_prepare(current, next);
+	ret = plat_ffa_msg_wait_prepare(current, next);
+
+	if (ret.func != FFA_ERROR_32) {
+		struct vm_locked vm_locked = vm_lock(current->vm);
+
+		api_release_mailbox(vm_locked, NULL);
+		vm_unlock(&vm_locked);
+	}
+
+	return ret;
 }
 
 /**
@@ -1004,12 +1068,9 @@
 		 * A pending message allows the vCPU to run so the message can
 		 * be delivered directly.
 		 */
-		if (vcpu->vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
+		if (vcpu->vm->mailbox.state == MAILBOX_STATE_FULL) {
 			arch_regs_set_retval(&vcpu->regs,
 					     ffa_msg_recv_return(vcpu->vm));
-			if (vcpu->vm->mailbox.recv_func == FFA_MSG_SEND_32) {
-				vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
-			}
 			break;
 		}
 
@@ -1215,41 +1276,6 @@
 }
 
 /**
- * Determines the value to be returned by api_ffa_rxtx_map and
- * api_ffa_rx_release after they've succeeded. If a secondary VM is running and
- * there are waiters, it also switches back to the primary VM for it to wake
- * waiters up.
- */
-static struct ffa_value api_waiter_result(struct vm_locked locked_vm,
-					  struct vcpu *current,
-					  struct vcpu **next)
-{
-	struct vm *vm = locked_vm.vm;
-
-	CHECK(list_empty(&vm->mailbox.waiter_list));
-
-	if (list_empty(&vm->mailbox.waiter_list)) {
-		/* No waiters, nothing else to do. */
-		return (struct ffa_value){.func = FFA_SUCCESS_32};
-	}
-
-	if (vm->id == HF_PRIMARY_VM_ID) {
-		/* The caller is the primary VM. Tell it to wake up waiters. */
-		return (struct ffa_value){.func = FFA_RX_RELEASE_32};
-	}
-
-	/*
-	 * Switch back to the primary VM, informing it that there are waiters
-	 * that need to be notified.
-	 */
-	*next = api_switch_to_primary(
-		current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
-		VCPU_STATE_WAITING);
-
-	return (struct ffa_value){.func = FFA_SUCCESS_32};
-}
-
-/**
  * Configures the hypervisor's stage-1 view of the send and receive pages.
  */
 static bool api_vm_configure_stage1(struct mm_stage1_locked mm_stage1_locked,
@@ -1768,8 +1794,7 @@
 		goto out;
 	}
 
-	if (to->mailbox.state != MAILBOX_STATE_EMPTY ||
-	    to->mailbox.recv == NULL) {
+	if (vm_is_mailbox_busy(to_locked)) {
 		dlog_error(
 			"Cannot deliver message to VM %#x, RX buffer not "
 			"ready.\n",
@@ -1798,7 +1823,7 @@
 	to->mailbox.recv_size = msg_size;
 	to->mailbox.recv_sender = sender_id;
 	to->mailbox.recv_func = FFA_MSG_SEND2_32;
-	to->mailbox.state = MAILBOX_STATE_RECEIVED;
+	to->mailbox.state = MAILBOX_STATE_FULL;
 
 	rx_buffer_full = plat_ffa_is_vm_id(sender_id)
 				 ? FFA_NOTIFICATION_HYP_BUFFER_FULL_MASK
@@ -1886,10 +1911,10 @@
 	sl_lock(&vm->lock);
 
 	/* Return pending messages without blocking. */
-	if (vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
+	if (vm->mailbox.state == MAILBOX_STATE_FULL) {
 		return_code = ffa_msg_recv_return(vm);
 		if (return_code.func == FFA_MSG_SEND_32) {
-			vm->mailbox.state = MAILBOX_STATE_READ;
+			vm->mailbox.state = MAILBOX_STATE_EMPTY;
 		}
 		goto out;
 	}
@@ -2025,7 +2050,7 @@
  *    hf_mailbox_waiter_get.
  */
 struct ffa_value api_ffa_rx_release(ffa_vm_id_t receiver_id,
-				    struct vcpu *current, struct vcpu **next)
+				    struct vcpu *current)
 {
 	struct vm *current_vm = current->vm;
 	struct vm *vm;
@@ -2033,6 +2058,7 @@
 	ffa_vm_id_t current_vm_id = current_vm->id;
 	ffa_vm_id_t release_vm_id;
 	struct ffa_value ret;
+	int32_t error_code;
 
 	/* `receiver_id` can be set only at Non-Secure Physical interface. */
 	if (vm_id_is_current_world(current_vm_id) && (receiver_id != 0)) {
@@ -2058,49 +2084,16 @@
 		return ffa_error(FFA_INVALID_PARAMETERS);
 	}
 
-	if (!plat_ffa_rx_release_forward(vm_locked, &ret)) {
-		dlog_verbose("RX_RELEASE forward failed for VM ID %#x.\n",
-			     release_vm_id);
+	if (plat_ffa_rx_release_forward(vm_locked, &ret)) {
 		goto out;
 	}
 
-	/*
-	 * When SPMC owns a VM's RX buffer, the Hypervisor's view can be out of
-	 * sync: reset it to empty and exit.
-	 */
-	if (plat_ffa_rx_release_forwarded(vm_locked)) {
-		ret = (struct ffa_value){.func = FFA_SUCCESS_32};
+	if (!api_release_mailbox(vm_locked, &error_code)) {
+		ret = ffa_error(error_code);
 		goto out;
 	}
 
-	switch (vm->mailbox.state) {
-	case MAILBOX_STATE_EMPTY:
-		ret = ffa_error(FFA_DENIED);
-		break;
-
-	case MAILBOX_STATE_RECEIVED:
-		if (release_vm_id == current_vm_id) {
-			/*
-			 * VM requesting to release its own RX buffer,
-			 * must be in READ state.
-			 */
-			ret = ffa_error(FFA_DENIED);
-		} else {
-			/*
-			 * Forwarded message from Hypervisor to release a VM
-			 * RX buffer, SPMC's mailbox view can be still in
-			 * RECEIVED state.
-			 */
-			ret = (struct ffa_value){.func = FFA_SUCCESS_32};
-			vm->mailbox.state = MAILBOX_STATE_EMPTY;
-		}
-		break;
-
-	case MAILBOX_STATE_READ:
-		ret = api_waiter_result(vm_locked, current, next);
-		vm->mailbox.state = MAILBOX_STATE_EMPTY;
-		break;
-	}
+	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
 out:
 	vm_unlock(&vm_locked);
@@ -2152,7 +2145,7 @@
 		goto out;
 	}
 
-	receiver->mailbox.state = MAILBOX_STATE_RECEIVED;
+	receiver->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
 
 	ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 
@@ -3073,10 +3066,12 @@
 	 */
 	memcpy_s(retrieve_request, message_buffer_size, to_msg, length);
 
-	if (vm_is_mailbox_busy(to_locked)) {
+	if ((vm_is_mailbox_other_world_owned(to_locked) &&
+	     !plat_ffa_acquire_receiver_rx(to_locked, &ret)) ||
+	    vm_is_mailbox_busy(to_locked)) {
 		/*
-		 * Can't retrieve memory information if the mailbox is not
-		 * available.
+		 * Can't retrieve memory information if the mailbox is
+		 * not available.
 		 */
 		dlog_verbose("%s: RX buffer not ready.\n", __func__);
 		ret = ffa_error(FFA_BUSY);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index f38c571..ea149a8 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -562,7 +562,7 @@
 					 current->vm->ffa_version);
 		return true;
 	case FFA_RX_RELEASE_32:
-		*args = api_ffa_rx_release(ffa_receiver(*args), current, next);
+		*args = api_ffa_rx_release(ffa_receiver(*args), current);
 		return true;
 	case FFA_RXTX_MAP_64:
 		*args = api_ffa_rxtx_map(ipa_init(args->arg1),
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 49052ab..35db9f6 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -14,6 +14,7 @@
 #include "hf/arch/plat/ffa.h"
 
 #include "hf/api.h"
+#include "hf/check.h"
 #include "hf/dlog.h"
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
@@ -86,16 +87,13 @@
 	ffa_tee_enabled = tee_enabled;
 }
 
-static void plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send,
-				   uint64_t page_count)
+static struct ffa_value plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send,
+					       uint64_t page_count)
 {
-	struct ffa_value ret;
-
-	ret = arch_other_world_call((struct ffa_value){.func = FFA_RXTX_MAP_64,
-						       .arg1 = pa_addr(recv),
-						       .arg2 = pa_addr(send),
-						       .arg3 = page_count});
-	CHECK(ret.func == FFA_SUCCESS_32);
+	return arch_other_world_call((struct ffa_value){.func = FFA_RXTX_MAP_64,
+							.arg1 = pa_addr(recv),
+							.arg2 = pa_addr(send),
+							.arg3 = page_count});
 }
 
 void plat_ffa_init(struct mpool *ppool)
@@ -134,11 +132,13 @@
 	 * perspective and vice-versa.
 	 */
 	dlog_verbose("Setting up buffers for TEE.\n");
-	plat_ffa_rxtx_map_spmc(
+	ret = plat_ffa_rxtx_map_spmc(
 		pa_from_va(va_from_ptr(other_world_vm->mailbox.recv)),
 		pa_from_va(va_from_ptr(other_world_vm->mailbox.send)),
 		HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
 
+	CHECK(ret.func == FFA_SUCCESS_32);
+
 	ffa_tee_enabled = true;
 
 	dlog_verbose("TEE finished setting up buffers.\n");
@@ -271,7 +271,7 @@
 	ffa_vm_id_t vm_id = vm->id;
 
 	if (!ffa_tee_enabled || vm_does_not_support_indirect_messages(vm)) {
-		return true;
+		return false;
 	}
 
 	CHECK(vm_id_is_current_world(vm_id));
@@ -280,29 +280,18 @@
 	*ret = arch_other_world_call(
 		(struct ffa_value){.func = FFA_RX_RELEASE_32, .arg1 = vm_id});
 
-	return ret->func == FFA_SUCCESS_32;
-}
-
-/**
- * In FF-A v1.1 with SPMC enabled the SPMC owns the RX buffers for NWd VMs,
- * hence the SPMC is handling FFA_RX_RELEASE calls for NWd VMs too.
- * The Hypervisor's view of a VM's RX buffer can be out of sync, reset it to
- * 'empty' if the FFA_RX_RELEASE call has been successfully forwarded to the
- * SPMC.
- */
-bool plat_ffa_rx_release_forwarded(struct vm_locked vm_locked)
-{
-	struct vm *vm = vm_locked.vm;
-
-	if (ffa_tee_enabled && (vm->ffa_version > MAKE_FFA_VERSION(1, 0))) {
-		dlog_verbose(
-			"RX_RELEASE forwarded, reset MB state for VM ID %#x.\n",
-			vm->id);
-		vm->mailbox.state = MAILBOX_STATE_EMPTY;
-		return true;
+	if (ret->func == FFA_SUCCESS_32) {
+		/*
+		 * The SPMC owns the VM's RX buffer after a successful
+		 * FFA_RX_RELEASE call.
+		 */
+		vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
+	} else {
+		dlog_verbose("FFA_RX_RELEASE forwarded failed for VM ID %#x.\n",
+			     vm_locked.vm->id);
 	}
 
-	return false;
+	return true;
 }
 
 /**
@@ -311,17 +300,23 @@
  * VM RX/TX buffers must have been previously mapped in the SPM either
  * by forwarding VM's RX_TX_MAP API or another way if buffers were
  * declared in manifest.
+ *
+ * Returns true if the ownership belongs to the hypervisor.
  */
 bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
 				  struct ffa_value *ret)
 {
 	struct ffa_value other_world_ret;
 
-	if (!ffa_tee_enabled) {
-		return true;
-	}
-
-	if (vm_does_not_support_indirect_messages(to_locked.vm)) {
+	/*
+	 * Do not forward the call if either:
+	 * - The TEE is not present.
+	 * - The VM's version is not FF-A v1.1.
+	 * - If the mailbox ownership hasn't been transferred to the SPMC.
+	 */
+	if (!ffa_tee_enabled ||
+	    vm_does_not_support_indirect_messages(to_locked.vm) ||
+	    to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
 		return true;
 	}
 
@@ -332,7 +327,13 @@
 		*ret = other_world_ret;
 	}
 
-	return other_world_ret.func == FFA_SUCCESS_32;
+	if (other_world_ret.func != FFA_SUCCESS_32) {
+		return false;
+	}
+
+	to_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
+
+	return true;
 }
 
 bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
@@ -711,8 +712,10 @@
 {
 	struct vm *vm = vm_locked.vm;
 	struct vm *other_world;
+	struct ffa_value ret;
 
 	if (!ffa_tee_enabled) {
+		vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
 		return;
 	}
 
@@ -731,7 +734,17 @@
 		vm->id, (uintptr_t)vm->mailbox.recv,
 		(uintptr_t)vm->mailbox.send);
 
-	plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+	ret = plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+
+	if (ret.func != FFA_SUCCESS_32) {
+		panic("Fail to map RXTX buffers for VM %x, in the SPMC's "
+		      "translation regime\n",
+		      vm->id);
+	}
+
+	vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
+
+	dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id);
 }
 
 void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
@@ -1219,7 +1232,7 @@
 	other_world_locked.vm->mailbox.recv_size = fragment_length;
 	other_world_locked.vm->mailbox.recv_sender = sender_vm_id;
 	other_world_locked.vm->mailbox.recv_func = share_func;
-	other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
+	other_world_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
 	ret = arch_other_world_call(
 		(struct ffa_value){.func = share_func,
 				   .arg1 = memory_share_length,
@@ -1449,13 +1462,12 @@
 		 */
 		primary_ret = ffa_msg_recv_return(to.vm);
 
-		to.vm->mailbox.state = MAILBOX_STATE_READ;
 		*next = api_switch_to_primary(current, primary_ret,
 					      VCPU_STATE_BLOCKED);
 		return ret;
 	}
 
-	to.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
+	to.vm->mailbox.state = MAILBOX_STATE_FULL;
 
 	/* Messages for the TEE are sent on via the dispatcher. */
 	if (to.vm->id == HF_TEE_VM_ID) {
@@ -1925,7 +1937,7 @@
 	to_locked.vm->mailbox.recv_size = length;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
 	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
-	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
+	to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
 
 out:
 	vm_unlock(&from_locked);
@@ -1948,7 +1960,7 @@
 	other_world_locked.vm->mailbox.recv_size = fragment_length;
 	other_world_locked.vm->mailbox.recv_sender = sender_vm_id;
 	other_world_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
-	other_world_locked.vm->mailbox.state = MAILBOX_STATE_RECEIVED;
+	other_world_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
 	ret = arch_other_world_call(
 		(struct ffa_value){.func = FFA_MEM_FRAG_TX_32,
 				   .arg1 = (uint32_t)handle,
@@ -2259,6 +2271,7 @@
 	to->mailbox.recv_size = size;
 	to->mailbox.recv_sender = sender_vm_id;
 	to->mailbox.recv_func = FFA_MSG_SEND_32;
+	to->mailbox.state = MAILBOX_STATE_FULL;
 	ret = deliver_msg(to_locked, sender_vm_id, current, next);
 
 out:
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index d027dfe..7c87f6d 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -597,7 +597,7 @@
 	(void)vm_locked;
 	(void)ret;
 
-	return true;
+	return false;
 }
 
 bool plat_ffa_rx_release_forwarded(struct vm_locked vm_locked)
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 3fa07a9..ee64f54 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -2221,7 +2221,7 @@
 	to_locked.vm->mailbox.recv_size = fragment_length;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
 	to_locked.vm->mailbox.recv_func = FFA_MEM_RETRIEVE_RESP_32;
-	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
+	to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
 
 	if (is_send_complete) {
 		ffa_memory_retrieve_complete(share_states, share_state,
@@ -2382,7 +2382,7 @@
 	to_locked.vm->mailbox.recv_size = fragment_length;
 	to_locked.vm->mailbox.recv_sender = HF_HYPERVISOR_VM_ID;
 	to_locked.vm->mailbox.recv_func = FFA_MEM_FRAG_TX_32;
-	to_locked.vm->mailbox.state = MAILBOX_STATE_READ;
+	to_locked.vm->mailbox.state = MAILBOX_STATE_FULL;
 
 	if (!continue_ffa_hyp_mem_retrieve_req) {
 		share_state->retrieved_fragment_count[receiver_index]++;
diff --git a/src/vm.c b/src/vm.c
index 2b4a3a7..128d850 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -252,6 +252,14 @@
 }
 
 /**
+ * Checks if mailbox is currently owned by the other world.
+ */
+bool vm_is_mailbox_other_world_owned(struct vm_locked to)
+{
+	return to.vm->mailbox.state == MAILBOX_STATE_OTHER_WORLD_OWNED;
+}
+
+/**
  * Gets the ID of the VM which the given VM's wait entry is for.
  */
 ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
@@ -512,7 +520,16 @@
 {
 	return vm_get_notifications(vm_locked, true)->global.pending != 0ULL ||
 	       vm_get_notifications(vm_locked, false)->global.pending != 0ULL ||
-	       vm_locked.vm->notifications.framework.pending != 0ULL;
+	       vm_are_fwk_notifications_pending(vm_locked);
+}
+
+/**
+ * Currently only RX full notification is supported as framework notification.
+ * Returns true if there is one pending, either from Hypervisor or SPMC.
+ */
+bool vm_are_fwk_notifications_pending(struct vm_locked vm_locked)
+{
+	return vm_locked.vm->notifications.framework.pending != 0ULL;
 }
 
 /**
@@ -815,24 +832,12 @@
 {
 	struct vm *vm = vm_locked.vm;
 	ffa_notifications_bitmap_t framework;
-	bool rx_buffer_full;
 
 	assert(vm != NULL);
 
 	framework = vm_notifications_state_get_pending(
 		&vm->notifications.framework);
 
-	/*
-	 * By retrieving an RX buffer full notification the buffer state
-	 * transitions from RECEIVED to READ; the VM is now the RX buffer
-	 * owner, can read it and is allowed to release it.
-	 */
-	rx_buffer_full = is_ffa_spm_buffer_full_notification(framework) ||
-			 is_ffa_hyp_buffer_full_notification(framework);
-	if (rx_buffer_full && vm->mailbox.state == MAILBOX_STATE_RECEIVED) {
-		vm->mailbox.state = MAILBOX_STATE_READ;
-	}
-
 	return framework;
 }