refactor(ff-a): replace `plat_ffa_` prefix for public functions

Replace `plat_ffa_` prefix with `ffa_{file_name}_` prefix. Only do so
for public functions to avoid unnecessary churn (private functions are
renamed in the next commit).

Change-Id: I847c218f134a2519a45a3af1d6c4b953db9c1cbb
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/api.c b/src/api.c
index a5ddbb7..47df0ab 100644
--- a/src/api.c
+++ b/src/api.c
@@ -2905,7 +2905,7 @@
 	}
 
 	if (ffa_is_framework_msg(args) &&
-	    plat_ffa_handle_framework_msg(args, &ret)) {
+	    ffa_direct_msg_handle_framework_msg(args, &ret)) {
 		return ret;
 	}
 
@@ -3080,7 +3080,7 @@
 				       struct ffa_value to_ret,
 				       bool is_nwd_call_chain)
 {
-	if (plat_ffa_is_spmd_lp_id(receiver_vm_id) ||
+	if (ffa_direct_msg_is_spmd_lp_id(receiver_vm_id) ||
 	    !vm_id_is_current_world(receiver_vm_id)) {
 		*next = api_switch_to_other_world(current_locked, to_ret,
 						  VCPU_STATE_WAITING);
@@ -4518,12 +4518,12 @@
 
 	/*
 	 * This check assumes receiver is the current VM, and has been enforced
-	 * by `plat_ffa_is_notifications_get_valid`.
+	 * by `ffa_notifications_is_get_valid`.
 	 */
 	receiver_locked = ffa_vm_find_locked(receiver_vm_id);
 
 	/*
-	 * `plat_ffa_is_notifications_get_valid` ensures following is never
+	 * `ffa_notifications_is_get_valid` ensures following is never
 	 * true.
 	 */
 	CHECK(receiver_locked.vm != NULL);
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 5e32a1e..2328d41 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -731,7 +731,7 @@
 		*args = api_ffa_console_log(*args, current);
 		return true;
 	case FFA_ERROR_32:
-		*args = plat_ffa_error_32(current, next, args->arg2);
+		*args = ffa_cpu_cycles_error_32(current, next, args->arg2);
 		return true;
 
 	default:
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index e7b9234..ea4fbd6 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -18,7 +18,7 @@
 	return HF_SPMC_VM_ID;
 }
 
-void plat_ffa_log_init(void)
+void ffa_init_log(void)
 {
 }
 
@@ -449,7 +449,7 @@
 	(void)next_locked;
 }
 
-bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
+bool ffa_direct_msg_is_spmd_lp_id(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return false;
@@ -604,8 +604,9 @@
 	return attributes;
 }
 
-struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
-				   enum ffa_error error_code)
+struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
+					 struct vcpu **next,
+					 enum ffa_error error_code)
 {
 	(void)current;
 	(void)next;
@@ -647,7 +648,8 @@
 	return 0;
 }
 
-bool plat_ffa_handle_framework_msg(struct ffa_value args, struct ffa_value *ret)
+bool ffa_direct_msg_handle_framework_msg(struct ffa_value args,
+					 struct ffa_value *ret)
 {
 	(void)args;
 	(void)ret;
diff --git a/src/ffa/absent.c b/src/ffa/absent.c
index 0cd9e1d..79090fc 100644
--- a/src/ffa/absent.c
+++ b/src/ffa/absent.c
@@ -6,6 +6,7 @@
  * https://opensource.org/licenses/BSD-3-Clause.
  */
 
+#include "hf/api.h"
 #include "hf/ffa.h"
 #include "hf/ffa_internal.h"
 #include "hf/manifest.h"
@@ -26,7 +27,7 @@
 	return 0;
 }
 
-void plat_ffa_log_init(void)
+void ffa_init_log(void)
 {
 }
 
@@ -35,7 +36,7 @@
 	(void)tee_enabled;
 }
 
-void plat_ffa_init(struct mpool *ppool)
+void ffa_init(struct mpool *ppool)
 {
 	(void)ppool;
 }
@@ -463,7 +464,7 @@
 	(void)sender_vm_id;
 }
 
-bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
+bool ffa_direct_msg_is_spmd_lp_id(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return false;
@@ -582,7 +583,8 @@
 	return api_interrupt_get(current_locked);
 }
 
-bool plat_ffa_handle_framework_msg(struct ffa_value args, struct ffa_value *ret)
+bool ffa_direct_msg_handle_framework_msg(struct ffa_value args,
+					 struct ffa_value *ret)
 {
 	(void)args;
 	(void)ret;
diff --git a/src/ffa/hypervisor/cpu_cycles.c b/src/ffa/hypervisor/cpu_cycles.c
index 91aa1ce..f2a9251 100644
--- a/src/ffa/hypervisor/cpu_cycles.c
+++ b/src/ffa/hypervisor/cpu_cycles.c
@@ -127,8 +127,9 @@
 	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
-struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
-				   enum ffa_error error_code)
+struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
+					 struct vcpu **next,
+					 enum ffa_error error_code)
 {
 	(void)current;
 	(void)next;
diff --git a/src/ffa/hypervisor/direct_messaging.c b/src/ffa/hypervisor/direct_messaging.c
index 2555545..7aeffe4 100644
--- a/src/ffa/hypervisor/direct_messaging.c
+++ b/src/ffa/hypervisor/direct_messaging.c
@@ -69,7 +69,7 @@
 					   struct ffa_value args,
 					   struct ffa_value *ret)
 {
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		dlog_verbose("Not forwarding: ffa_tee_enabled is false\n");
 		return false;
 	}
@@ -119,7 +119,8 @@
 	(void)next_locked;
 }
 
-bool plat_ffa_handle_framework_msg(struct ffa_value args, struct ffa_value *ret)
+bool ffa_direct_msg_handle_framework_msg(struct ffa_value args,
+					 struct ffa_value *ret)
 {
 	(void)args;
 	(void)ret;
@@ -127,7 +128,7 @@
 	return false;
 }
 
-bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
+bool ffa_direct_msg_is_spmd_lp_id(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return false;
diff --git a/src/ffa/hypervisor/ffa_memory.c b/src/ffa/hypervisor/ffa_memory.c
index f41378b..0803f63 100644
--- a/src/ffa/hypervisor/ffa_memory.c
+++ b/src/ffa/hypervisor/ffa_memory.c
@@ -425,7 +425,7 @@
 	struct vm *from = vm_find(HF_TEE_VM_ID);
 	struct two_vm_locked vm_to_from_lock;
 
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		dlog_verbose("Invalid handle %#lx for FFA_MEM_RECLAIM.\n",
 			     handle);
 		return ffa_error(FFA_INVALID_PARAMETERS);
diff --git a/src/ffa/hypervisor/init.c b/src/ffa/hypervisor/init.c
index 0b53d6a..b87ca54 100644
--- a/src/ffa/hypervisor/init.c
+++ b/src/ffa/hypervisor/init.c
@@ -17,22 +17,22 @@
 
 static bool ffa_tee_enabled = false;
 
-bool plat_ffa_is_tee_enabled(void)
+bool ffa_init_is_tee_enabled(void)
 {
 	return ffa_tee_enabled;
 }
 
-void plat_ffa_set_tee_enabled(bool tee_enabled)
+void ffa_init_set_tee_enabled(bool tee_enabled)
 {
 	ffa_tee_enabled = tee_enabled;
 }
 
-void plat_ffa_log_init(void)
+void ffa_init_log(void)
 {
 	dlog_info("Initializing Hafnium (Hypervisor)\n");
 }
 
-void plat_ffa_init(struct mpool *ppool)
+void ffa_init(struct mpool *ppool)
 {
 	struct vm *other_world_vm = vm_find(HF_OTHER_WORLD_ID);
 	struct ffa_value ret;
@@ -51,7 +51,7 @@
 
 	(void)ppool;
 
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		return;
 	}
 
@@ -95,7 +95,7 @@
 		pa_from_va(va_from_ptr(other_world_vm->mailbox.send)),
 		HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
 
-	plat_ffa_set_tee_enabled(true);
+	ffa_init_set_tee_enabled(true);
 
 	/*
 	 * Hypervisor will write to secure world receive buffer, and will read
diff --git a/src/ffa/hypervisor/notifications.c b/src/ffa/hypervisor/notifications.c
index 82a42a8..e366241 100644
--- a/src/ffa/hypervisor/notifications.c
+++ b/src/ffa/hypervisor/notifications.c
@@ -132,7 +132,7 @@
 {
 	struct ffa_value ret;
 
-	if (plat_ffa_is_tee_enabled()) {
+	if (ffa_init_is_tee_enabled()) {
 		ret = arch_other_world_call((struct ffa_value){
 			.func = FFA_NOTIFICATION_BITMAP_CREATE_32,
 			.arg1 = vm_id,
@@ -261,7 +261,7 @@
 	assert(from_fwk != NULL);
 
 	/* Get SPMC notifications. */
-	if (plat_ffa_is_tee_enabled()) {
+	if (ffa_init_is_tee_enabled()) {
 		ret = arch_other_world_call((struct ffa_value){
 			.func = FFA_NOTIFICATION_GET_32,
 			.arg1 = (vcpu_id << 16) | receiver_id,
diff --git a/src/ffa/hypervisor/setup_and_discovery.c b/src/ffa/hypervisor/setup_and_discovery.c
index 8c7c8e4..07f1a57 100644
--- a/src/ffa/hypervisor/setup_and_discovery.c
+++ b/src/ffa/hypervisor/setup_and_discovery.c
@@ -20,7 +20,7 @@
 
 struct ffa_value ffa_setup_spmc_id_get(void)
 {
-	if (plat_ffa_is_tee_enabled()) {
+	if (ffa_init_is_tee_enabled()) {
 		/*
 		 * Fetch the SPMC ID from the SPMD using FFA_SPM_ID_GET.
 		 * DEN0077A FF-A v1.1 Beta0 section 13.9.2
@@ -60,7 +60,7 @@
 	struct vm *vm = vm_locked.vm;
 	struct vm *other_world;
 
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		vm_locked.vm->mailbox.state = MAILBOX_STATE_EMPTY;
 		return;
 	}
@@ -97,7 +97,7 @@
 
 	id = vm_locked.vm->id;
 
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		return;
 	}
 
@@ -127,7 +127,7 @@
 	 * Allow forwarding from the Hypervisor if TEE or SPMC exists and
 	 * declared as such in the Hypervisor manifest.
 	 */
-	return plat_ffa_is_tee_enabled();
+	return ffa_init_is_tee_enabled();
 }
 
 /*
@@ -150,7 +150,7 @@
 	 * Allow forwarding from the Hypervisor if TEE or SPMC exists and
 	 * declared as such in the Hypervisor manifest.
 	 */
-	if (!plat_ffa_is_tee_enabled()) {
+	if (!ffa_init_is_tee_enabled()) {
 		return vm_count;
 	}
 
@@ -249,7 +249,7 @@
 	struct vm *vm = vm_locked.vm;
 	ffa_id_t vm_id = vm->id;
 
-	if (!plat_ffa_is_tee_enabled() ||
+	if (!ffa_init_is_tee_enabled() ||
 	    !ffa_vm_supports_indirect_messages(vm)) {
 		return false;
 	}
@@ -294,7 +294,7 @@
 	 * - The VM's version is not FF-A v1.1.
 	 * - If the mailbox ownership hasn't been transferred to the SPMC.
 	 */
-	if (!plat_ffa_is_tee_enabled() ||
+	if (!ffa_init_is_tee_enabled() ||
 	    !ffa_vm_supports_indirect_messages(to_locked.vm) ||
 	    to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
 		return true;
diff --git a/src/ffa/spmc/cpu_cycles.c b/src/ffa/spmc/cpu_cycles.c
index dafef21..16508b1 100644
--- a/src/ffa/spmc/cpu_cycles.c
+++ b/src/ffa/spmc/cpu_cycles.c
@@ -11,14 +11,13 @@
 #include "hf/api.h"
 #include "hf/check.h"
 #include "hf/ffa.h"
+#include "hf/ffa/direct_messaging.h"
 #include "hf/ffa/interrupts.h"
 #include "hf/ffa/vm.h"
 #include "hf/ffa_internal.h"
 #include "hf/plat/interrupts.h"
 #include "hf/vm.h"
 
-void plat_ffa_vcpu_allow_interrupts(struct vcpu *current);
-
 bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
 				struct ffa_value *ret)
 {
@@ -250,7 +249,7 @@
 	vcpu_unlock(&target_locked);
 
 	/* Restore interrupt priority mask. */
-	plat_ffa_vcpu_allow_interrupts(current);
+	ffa_interrupts_unmask(current);
 
 	/* The pre-empted vCPU should be run. */
 	*next = target_vcpu;
@@ -424,7 +423,7 @@
 		 * allow the interrupts(if they were masked earlier) before
 		 * returning control to NWd.
 		 */
-		plat_ffa_vcpu_allow_interrupts(current);
+		ffa_interrupts_unmask(current);
 		break;
 	case RTM_FFA_RUN:
 		ret = ffa_msg_wait_complete(current_locked, next);
@@ -438,7 +437,7 @@
 		 * allow the interrupts(if they were masked earlier) before
 		 * returning control to NWd.
 		 */
-		plat_ffa_vcpu_allow_interrupts(current);
+		ffa_interrupts_unmask(current);
 
 		break;
 	default:
@@ -452,43 +451,6 @@
 	return ret;
 }
 
-/**
- * Enforce action of an SP in response to non-secure or other-secure interrupt
- * by changing the priority mask. Effectively, physical interrupts shall not
- * trigger which has the same effect as queueing interrupts.
- */
-static void plat_ffa_vcpu_queue_interrupts(
-	struct vcpu_locked receiver_vcpu_locked)
-{
-	struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
-	uint8_t current_priority;
-
-	/* Save current value of priority mask. */
-	current_priority = plat_interrupts_get_priority_mask();
-	receiver_vcpu->prev_interrupt_priority = current_priority;
-
-	if (receiver_vcpu->vm->other_s_interrupts_action ==
-		    OTHER_S_INT_ACTION_QUEUED ||
-	    receiver_vcpu->scheduling_mode == SPMC_MODE) {
-		/*
-		 * If secure interrupts not masked yet, mask them now. We could
-		 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
-		 * sends a direct request, and we are making the IMPDEF choice
-		 * to mask interrupts when such a situation occurs. This keeps
-		 * design simple.
-		 */
-		if (current_priority > SWD_MASK_ALL_INT) {
-			plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
-		}
-	} else if (receiver_vcpu->vm->ns_interrupts_action ==
-		   NS_ACTION_QUEUED) {
-		/* If non secure interrupts not masked yet, mask them now. */
-		if (current_priority > SWD_MASK_NS_INT) {
-			plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
-		}
-	}
-}
-
 /*
  * Initialize the scheduling mode and/or Partition Runtime model of the target
  * SP upon being resumed by an FFA_RUN ABI.
@@ -521,7 +483,7 @@
 		      vcpu->state == VCPU_STATE_BLOCKED);
 	}
 
-	plat_ffa_vcpu_queue_interrupts(target_locked);
+	ffa_interrupts_mask(target_locked);
 }
 
 /*
@@ -592,37 +554,12 @@
 	 * masked earlier).
 	 */
 	if (*next != NULL) {
-		plat_ffa_vcpu_allow_interrupts(current);
+		ffa_interrupts_unmask(current);
 	}
 
 	return ret_args;
 }
 
-static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
-{
-	struct vcpu *prev_node;
-	struct vcpu *current = current_locked.vcpu;
-	struct vcpu *target = target_locked.vcpu;
-
-	assert(current != NULL);
-	assert(target != NULL);
-
-	prev_node = current->call_chain.prev_node;
-
-	while (prev_node != NULL) {
-		if (prev_node == target) {
-			return true;
-		}
-
-		/* The target vCPU is not it's immediate predecessor. */
-		prev_node = prev_node->call_chain.prev_node;
-	}
-
-	/* Search terminated. Reached start of call chain. */
-	return false;
-}
-
 /**
  * Validates the Runtime model for FFA_RUN. Refer to section 7.2 of the FF-A
  * v1.1 EAC0 spec.
@@ -639,7 +576,8 @@
 		/* Fall through. */
 	case FFA_RUN_32: {
 		/* Rules 1,2 section 7.2 EAC0 spec. */
-		if (is_predecessor_in_call_chain(current_locked, locked_vcpu)) {
+		if (ffa_direct_msg_precedes_in_call_chain(current_locked,
+							  locked_vcpu)) {
 			return false;
 		}
 		*next_state = VCPU_STATE_BLOCKED;
@@ -681,7 +619,8 @@
 		/* Fall through. */
 	case FFA_RUN_32: {
 		/* Rules 1,2. */
-		if (is_predecessor_in_call_chain(current_locked, locked_vcpu)) {
+		if (ffa_direct_msg_precedes_in_call_chain(current_locked,
+							  locked_vcpu)) {
 			return false;
 		}
 
@@ -864,8 +803,9 @@
  * in RTM_SP_INIT runtime model, not implemented. Refer to section 8.5
  * of FF-A 1.2 spec.
  */
-struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
-				   enum ffa_error error_code)
+struct ffa_value ffa_cpu_cycles_error_32(struct vcpu *current,
+					 struct vcpu **next,
+					 enum ffa_error error_code)
 {
 	struct vcpu_locked current_locked;
 	struct vm_locked vm_locked;
diff --git a/src/ffa/spmc/direct_messaging.c b/src/ffa/spmc/direct_messaging.c
index 79473bc..183dbe4 100644
--- a/src/ffa/spmc/direct_messaging.c
+++ b/src/ffa/spmc/direct_messaging.c
@@ -11,6 +11,7 @@
 #include "hf/arch/gicv3.h"
 
 #include "hf/bits.h"
+#include "hf/ffa/interrupts.h"
 #include "hf/ffa_internal.h"
 #include "hf/plat/interrupts.h"
 
@@ -30,7 +31,7 @@
 	       vm_id_is_current_world(receiver_vm_id) &&
 	       (sender_vm_id == current_vm_id ||
 		(current_vm_id == HF_HYPERVISOR_VM_ID &&
-		 (plat_ffa_is_spmd_lp_id(sender_vm_id) ||
+		 (ffa_direct_msg_is_spmd_lp_id(sender_vm_id) ||
 		  !vm_id_is_current_world(sender_vm_id))));
 }
 
@@ -125,15 +126,6 @@
 	return false;
 }
 
-/**
- * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
- * restore the priority mask thereby allowing the interrupts to be delivered.
- */
-void plat_ffa_vcpu_allow_interrupts(struct vcpu *current)
-{
-	plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
-}
-
 /*
  * Unwind the present call chain upon the invocation of
  * FFA_MSG_SEND_DIRECT_RESP ABI. The function also returns
@@ -155,7 +147,7 @@
 	current->rt_model = RTM_NONE;
 
 	/* Allow interrupts if they were masked earlier. */
-	plat_ffa_vcpu_allow_interrupts(current);
+	ffa_interrupts_unmask(current);
 
 	if (!vm_id_is_current_world(receiver_vm_id)) {
 		/* End of NWd scheduled call chain. */
@@ -166,43 +158,6 @@
 	}
 }
 
-/**
- * Enforce action of an SP in response to non-secure or other-secure interrupt
- * by changing the priority mask. Effectively, physical interrupts shall not
- * trigger which has the same effect as queueing interrupts.
- */
-static void plat_ffa_vcpu_queue_interrupts(
-	struct vcpu_locked receiver_vcpu_locked)
-{
-	struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
-	uint8_t current_priority;
-
-	/* Save current value of priority mask. */
-	current_priority = plat_interrupts_get_priority_mask();
-	receiver_vcpu->prev_interrupt_priority = current_priority;
-
-	if (receiver_vcpu->vm->other_s_interrupts_action ==
-		    OTHER_S_INT_ACTION_QUEUED ||
-	    receiver_vcpu->scheduling_mode == SPMC_MODE) {
-		/*
-		 * If secure interrupts not masked yet, mask them now. We could
-		 * enter SPMC scheduled mode when an EL3 SPMD Logical partition
-		 * sends a direct request, and we are making the IMPDEF choice
-		 * to mask interrupts when such a situation occurs. This keeps
-		 * design simple.
-		 */
-		if (current_priority > SWD_MASK_ALL_INT) {
-			plat_interrupts_set_priority_mask(SWD_MASK_ALL_INT);
-		}
-	} else if (receiver_vcpu->vm->ns_interrupts_action ==
-		   NS_ACTION_QUEUED) {
-		/* If non secure interrupts not masked yet, mask them now. */
-		if (current_priority > SWD_MASK_NS_INT) {
-			plat_interrupts_set_priority_mask(SWD_MASK_NS_INT);
-		}
-	}
-}
-
 /*
  * Start winding the call chain or continue to wind the present one upon the
  * invocation of FFA_MSG_SEND_DIRECT_REQ or FFA_MSG_SEND_DIRECT_REQ2 (FF-A v1.2)
@@ -225,14 +180,39 @@
 	if (!vm_id_is_current_world(sender_vm_id)) {
 		/* Start of NWd scheduled call chain. */
 		receiver_vcpu->scheduling_mode = NWD_MODE;
-	} else if (plat_ffa_is_spmd_lp_id(sender_vm_id)) {
+	} else if (ffa_direct_msg_is_spmd_lp_id(sender_vm_id)) {
 		receiver_vcpu->scheduling_mode = SPMC_MODE;
 	} else {
 		/* Adding a new node to an existing call chain. */
 		vcpu_call_chain_extend(current_locked, receiver_vcpu_locked);
 		receiver_vcpu->scheduling_mode = current->scheduling_mode;
 	}
-	plat_ffa_vcpu_queue_interrupts(receiver_vcpu_locked);
+	ffa_interrupts_mask(receiver_vcpu_locked);
+}
+
+bool ffa_direct_msg_precedes_in_call_chain(struct vcpu_locked current_locked,
+					   struct vcpu_locked target_locked)
+{
+	struct vcpu *prev_node;
+	struct vcpu *current = current_locked.vcpu;
+	struct vcpu *target = target_locked.vcpu;
+
+	assert(current != NULL);
+	assert(target != NULL);
+
+	prev_node = current->call_chain.prev_node;
+
+	while (prev_node != NULL) {
+		if (prev_node == target) {
+			return true;
+		}
+
+		/* The target vCPU is not it's immediate predecessor. */
+		prev_node = prev_node->call_chain.prev_node;
+	}
+
+	/* Search terminated. Reached start of call chain. */
+	return false;
 }
 
 /**
@@ -310,7 +290,8 @@
  * Handle framework messages: in particular, check VM availability messages are
  * valid.
  */
-bool plat_ffa_handle_framework_msg(struct ffa_value args, struct ffa_value *ret)
+bool ffa_direct_msg_handle_framework_msg(struct ffa_value args,
+					 struct ffa_value *ret)
 {
 	enum ffa_framework_msg_func func = ffa_framework_msg_func(args);
 
@@ -329,7 +310,7 @@
 	return false;
 }
 
-bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
+bool ffa_direct_msg_is_spmd_lp_id(ffa_id_t vm_id)
 {
 	return (vm_id >= EL3_SPMD_LP_ID_START && vm_id <= EL3_SPMD_LP_ID_END);
 }
diff --git a/src/ffa/spmc/init.c b/src/ffa/spmc/init.c
index c32bbaf..31117e2 100644
--- a/src/ffa/spmc/init.c
+++ b/src/ffa/spmc/init.c
@@ -12,18 +12,18 @@
 #include "hf/ffa/vm.h"
 #include "hf/mpool.h"
 
-void plat_ffa_log_init(void)
+void ffa_init_log(void)
 {
 	dlog_info("Initializing Hafnium (SPMC)\n");
 }
 
-void plat_ffa_init(struct mpool *ppool)
+void ffa_init(struct mpool *ppool)
 {
 	arch_ffa_init();
 	ffa_vm_init(ppool);
 }
 
-void plat_ffa_set_tee_enabled(bool tee_enabled)
+void ffa_init_set_tee_enabled(bool tee_enabled)
 {
 	(void)tee_enabled;
 }
diff --git a/src/ffa/spmc/interrupts.c b/src/ffa/spmc/interrupts.c
index 4983e25..555f5bf 100644
--- a/src/ffa/spmc/interrupts.c
+++ b/src/ffa/spmc/interrupts.c
@@ -13,6 +13,7 @@
 
 #include "hf/api.h"
 #include "hf/check.h"
+#include "hf/ffa/direct_messaging.h"
 #include "hf/ffa/vm.h"
 #include "hf/hf_ipi.h"
 #include "hf/vm.h"
@@ -161,12 +162,20 @@
 }
 
 /**
+ * If the interrupts were indeed masked by SPMC before an SP's vCPU was resumed,
+ * restore the priority mask thereby allowing the interrupts to be delivered.
+ */
+void ffa_interrupts_unmask(struct vcpu *current)
+{
+	plat_interrupts_set_priority_mask(current->prev_interrupt_priority);
+}
+
+/**
  * Enforce action of an SP in response to non-secure or other-secure interrupt
  * by changing the priority mask. Effectively, physical interrupts shall not
  * trigger which has the same effect as queueing interrupts.
  */
-static void plat_ffa_vcpu_queue_interrupts(
-	struct vcpu_locked receiver_vcpu_locked)
+void ffa_interrupts_mask(struct vcpu_locked receiver_vcpu_locked)
 {
 	struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
 	uint8_t current_priority;
@@ -219,7 +228,7 @@
 			     target_vcpu->vm->id);
 
 		vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
-		plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
+		ffa_interrupts_mask(target_vcpu_locked);
 
 		vcpu_set_running(target_vcpu_locked, &ret_interrupt);
 
@@ -262,31 +271,6 @@
 	return next;
 }
 
-static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
-{
-	struct vcpu *prev_node;
-	struct vcpu *current = current_locked.vcpu;
-	struct vcpu *target = target_locked.vcpu;
-
-	assert(current != NULL);
-	assert(target != NULL);
-
-	prev_node = current->call_chain.prev_node;
-
-	while (prev_node != NULL) {
-		if (prev_node == target) {
-			return true;
-		}
-
-		/* The target vCPU is not it's immediate predecessor. */
-		prev_node = prev_node->call_chain.prev_node;
-	}
-
-	/* Search terminated. Reached start of call chain. */
-	return false;
-}
-
 /**
  * Handles the secure interrupt according to the target vCPU's state
  * in the case the owner of the interrupt is an S-EL1 partition.
@@ -307,7 +291,7 @@
 
 		/* FF-A v1.1 EAC0 Table 8.2 case 1 and Table 12.10. */
 		vcpu_enter_secure_interrupt_rtm(target_vcpu_locked);
-		plat_ffa_vcpu_queue_interrupts(target_vcpu_locked);
+		ffa_interrupts_mask(target_vcpu_locked);
 
 		/*
 		 * Ideally, we have to mask non-secure interrupts here
@@ -344,8 +328,8 @@
 			next = NULL;
 			plat_ffa_queue_vint(target_vcpu_locked, v_intid,
 					    (struct vcpu_locked){.vcpu = NULL});
-		} else if (is_predecessor_in_call_chain(current_locked,
-							target_vcpu_locked)) {
+		} else if (ffa_direct_msg_precedes_in_call_chain(
+				   current_locked, target_vcpu_locked)) {
 			struct ffa_value ret_interrupt =
 				api_ffa_interrupt_return(0);
 
diff --git a/src/ffa/spmc/notifications.c b/src/ffa/spmc/notifications.c
index a055e5e..f0eed96 100644
--- a/src/ffa/spmc/notifications.c
+++ b/src/ffa/spmc/notifications.c
@@ -58,8 +58,8 @@
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
-	if (plat_ffa_is_spmd_lp_id(sender_id) ||
-	    plat_ffa_is_spmd_lp_id(receiver_id)) {
+	if (ffa_direct_msg_is_spmd_lp_id(sender_id) ||
+	    ffa_direct_msg_is_spmd_lp_id(receiver_id)) {
 		dlog_verbose(
 			"Notification bind: not permitted for logical SPs (%x "
 			"%x).\n",
@@ -128,8 +128,8 @@
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
-	if (plat_ffa_is_spmd_lp_id(sender_id) ||
-	    plat_ffa_is_spmd_lp_id(receiver_id)) {
+	if (ffa_direct_msg_is_spmd_lp_id(sender_id) ||
+	    ffa_direct_msg_is_spmd_lp_id(receiver_id)) {
 		dlog_verbose(
 			"Notification set: not permitted for logical SPs (%x "
 			"%x).\n",
@@ -195,7 +195,7 @@
 	 *  notifications target to a VM.
 	 */
 	bool caller_and_receiver_valid =
-		(!plat_ffa_is_spmd_lp_id(receiver_id) &&
+		(!ffa_direct_msg_is_spmd_lp_id(receiver_id) &&
 		 (current_vm_id == receiver_id)) ||
 		(current_vm_id == HF_HYPERVISOR_VM_ID &&
 		 !vm_id_is_current_world(receiver_id));
diff --git a/src/init.c b/src/init.c
index e4362bf..ddd90d5 100644
--- a/src/init.c
+++ b/src/init.c
@@ -47,7 +47,7 @@
 	/* Make sure the console is initialised before calling dlog. */
 	plat_console_init();
 
-	plat_ffa_log_init();
+	ffa_init_log();
 
 	mpool_init(&ppool, MM_PPOOL_ENTRY_SIZE);
 	mpool_add_chunk(&ppool, ptable_buf, sizeof(ptable_buf));
@@ -150,7 +150,7 @@
 		      manifest_strerror(manifest_ret));
 	}
 
-	plat_ffa_set_tee_enabled(manifest->ffa_tee_enabled);
+	ffa_init_set_tee_enabled(manifest->ffa_tee_enabled);
 
 	if (!plat_iommu_init(&fdt, mm_stage1_locked, &ppool)) {
 		panic("Could not initialize IOMMUs.");
@@ -191,7 +191,7 @@
 	mm_vm_enable_invalidation();
 
 	/* Perform platform specfic FF-A initialization. */
-	plat_ffa_init(&ppool);
+	ffa_init(&ppool);
 
 	/* Initialise the API page pool. ppool will be empty from now on. */
 	api_init(&ppool);