refactor(ffa): remove `plat` prefix

Rename files to remove the `plat` prefix and replace with a
module-specific prefix.

Change-Id: Ie64cefcdf91da7b20e520828d8e234af12ab5c85
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/arch/aarch64/hypervisor/ffa.c b/src/arch/aarch64/hypervisor/ffa.c
index 880a5e86..cf44ee3 100644
--- a/src/arch/aarch64/hypervisor/ffa.c
+++ b/src/arch/aarch64/hypervisor/ffa.c
@@ -32,7 +32,7 @@
  */
 void arch_ffa_init(void)
 {
-	struct ffa_value ret = plat_ffa_spmc_id_get();
+	struct ffa_value ret = ffa_setup_spmc_id_get();
 
 	if (ret.func == FFA_SUCCESS_32) {
 		spmc_id = ret.arg2;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index d501eac..5b1021c 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -548,7 +548,7 @@
 		*args = api_yield(current, next, args);
 		return true;
 	case FFA_MSG_SEND_32:
-		*args = plat_ffa_msg_send(
+		*args = ffa_indirect_msg_send(
 			ffa_sender(*args), ffa_receiver(*args),
 			ffa_msg_send_size(*args), current, next);
 		return true;
@@ -564,7 +564,7 @@
 		struct vcpu_locked current_locked;
 
 		current_locked = vcpu_lock(current);
-		*args = plat_ffa_msg_recv(false, current_locked, next);
+		*args = ffa_indirect_msg_recv(false, current_locked, next);
 		vcpu_unlock(&current_locked);
 		return true;
 	}
@@ -697,7 +697,7 @@
 			return true;
 		}
 
-		plat_ffa_handle_secure_interrupt(current, next);
+		ffa_interrupts_handle_secure_interrupt(current, next);
 
 		/*
 		 * If the next vCPU belongs to an SP, the next time the NWd
@@ -778,7 +778,7 @@
 		 */
 		if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
 		    (*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
-			plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
+			ffa_notifications_sri_trigger_if_delayed(vcpu->cpu);
 		}
 #endif
 		if (func != FFA_VERSION_32) {
@@ -1032,12 +1032,12 @@
 	switch (args.func) {
 #if SECURE_WORLD == 1
 	case HF_INTERRUPT_DEACTIVATE:
-		vcpu->regs.r[0] = plat_ffa_interrupt_deactivate(
-			args.arg1, args.arg2, vcpu);
+		vcpu->regs.r[0] =
+			ffa_interrupts_deactivate(args.arg1, args.arg2, vcpu);
 		break;
 
 	case HF_INTERRUPT_RECONFIGURE:
-		vcpu->regs.r[0] = plat_ffa_interrupt_reconfigure(
+		vcpu->regs.r[0] = ffa_interrupts_reconfigure(
 			args.arg1, args.arg2, args.arg3, vcpu);
 		break;
 
@@ -1054,7 +1054,7 @@
 		struct vcpu_locked current_locked;
 
 		current_locked = vcpu_lock(vcpu);
-		vcpu->regs.r[0] = plat_ffa_interrupt_get(current_locked);
+		vcpu->regs.r[0] = ffa_interrupts_get(current_locked);
 		vcpu_unlock(&current_locked);
 		break;
 	}
@@ -1077,7 +1077,7 @@
 #if SECURE_WORLD == 1
 	struct vcpu *next = NULL;
 
-	plat_ffa_handle_secure_interrupt(current(), &next);
+	ffa_interrupts_handle_secure_interrupt(current(), &next);
 
 	/*
 	 * Since we are in interrupt context, set the bit for the
@@ -1140,7 +1140,7 @@
 	 */
 	assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED);
 
-	if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) {
+	if (ffa_vm_managed_exit_supported(current_vcpu->vm)) {
 		uint8_t pmr = plat_interrupts_get_priority_mask();
 
 		/*
@@ -1177,7 +1177,7 @@
 	 * Unwind Normal World Scheduled Call chain in response to NS
 	 * Interrupt.
 	 */
-	return plat_ffa_unwind_nwd_call_chain_interrupt(current_vcpu);
+	return ffa_interrupts_unwind_nwd_call_chain(current_vcpu);
 #else
 	return irq_lower();
 #endif
diff --git a/src/arch/aarch64/plat/psci/hypervisor.c b/src/arch/aarch64/plat/psci/hypervisor.c
index b7506d6..deae71c 100644
--- a/src/arch/aarch64/plat/psci/hypervisor.c
+++ b/src/arch/aarch64/plat/psci/hypervisor.c
@@ -70,7 +70,7 @@
 	/* Reset the registers to give a clean start for vCPU. */
 	arch_regs_reset(vcpu);
 
-	/* TODO: call plat_ffa_sri_init? */
+	/* TODO: call ffa_notifications_sri_init? */
 
 	return vcpu;
 }
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index bef8b95..6b5132f 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -72,7 +72,7 @@
 	arch_cpu_init(c);
 
 	/* Initialize SRI for running core. */
-	plat_ffa_sri_init(c);
+	ffa_notifications_sri_init(c);
 
 	vcpu = vm_get_vcpu(vm, vm_is_up(vm) ? 0 : cpu_index(c));
 	vcpu_locked = vcpu_lock(vcpu);
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 555bcf0..88aef4a 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -22,8 +22,8 @@
 {
 }
 
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
-				   uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+			      uint32_t share_func, bool multiple_borrower)
 {
 	(void)share_func;
 	(void)receiver;
@@ -33,9 +33,9 @@
 	return true;
 }
 
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
-				      ffa_id_t sender_vm_id,
-				      ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+					    ffa_id_t sender_vm_id,
+					    ffa_id_t receiver_vm_id)
 {
 	(void)current;
 	(void)sender_vm_id;
@@ -44,8 +44,9 @@
 	return true;
 }
 
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
-					  struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+						struct vm *receiver_vm,
+						uint32_t func)
 {
 	(void)sender_vm;
 	(void)receiver_vm;
@@ -54,9 +55,9 @@
 	return false;
 }
 
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
-				       ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+					     ffa_id_t sender_vm_id,
+					     ffa_id_t receiver_vm_id)
 {
 	(void)current;
 	(void)sender_vm_id;
@@ -65,8 +66,8 @@
 	return true;
 }
 
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
-			  struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+				struct ffa_value *ret)
 {
 	(void)vm_id;
 	(void)vcpu_idx;
@@ -75,19 +76,19 @@
 	return false;
 }
 
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
 {
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
-				     struct ffa_value args,
-				     struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+					   struct ffa_value args,
+					   struct ffa_value *ret)
 {
 	(void)receiver_vm_id;
 	(void)args;
@@ -95,8 +96,8 @@
 	return false;
 }
 
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
-				 struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+				  struct ffa_value *ret)
 {
 	(void)vm_locked;
 	(void)ret;
@@ -104,8 +105,8 @@
 	return false;
 }
 
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
-				  struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+				   struct ffa_value *ret)
 {
 	(void)to_locked;
 	(void)ret;
@@ -113,8 +114,8 @@
 	return false;
 }
 
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
-					struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+				   struct vm_locked receiver_locked)
 {
 	(void)sender_locked;
 	(void)receiver_locked;
@@ -122,8 +123,9 @@
 	return false;
 }
 
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
-				struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+				    ffa_id_t sender_vm_id,
+				    struct ffa_value *ret)
 {
 	(void)receiver_vm_id;
 	(void)sender_vm_id;
@@ -132,14 +134,13 @@
 	return false;
 }
 
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
 {
 	return 0U;
 }
 
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
-					  ffa_id_t sender_id,
-					  ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+				     ffa_id_t receiver_id)
 {
 	(void)current;
 	(void)sender_id;
@@ -147,7 +148,7 @@
 	return false;
 }
 
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
 	ffa_id_t receiver_id, ffa_id_t sender_id,
 	ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
 	bool is_bind, struct ffa_value *ret)
@@ -163,12 +164,12 @@
 	return false;
 }
 
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
 	ffa_id_t caller_id, const struct vm *target)
 {
 	(void)caller_id;
@@ -176,7 +177,7 @@
 	return 0;
 }
 
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
 {
 	(void)vm;
 	return false;
@@ -186,8 +187,8 @@
  * Check validity of the calls:
  * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
  */
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
-	struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+							  ffa_id_t vm_id)
 {
 	/*
 	 * Call should only be used by the Hypervisor, so any attempt of
@@ -199,9 +200,8 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
-					ffa_id_t sender_id,
-					ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+				    ffa_id_t receiver_id)
 {
 	(void)current;
 	(void)sender_id;
@@ -209,9 +209,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
-					ffa_id_t receiver_id,
-					ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+				    ffa_notification_flags_t flags)
 {
 	(void)flags;
 	(void)current;
@@ -219,7 +218,7 @@
 	return false;
 }
 
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
 	ffa_notifications_bitmap_t *from_sp)  // NOLINT
 {
@@ -230,7 +229,7 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
 	struct vm_locked receiver_locked,
 	ffa_notifications_bitmap_t *from_fwk,  // NOLINT
 	ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
@@ -243,11 +242,11 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id,
-				       ffa_notification_flags_t flags,
-				       ffa_notifications_bitmap_t bitmap,
-				       struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+				   ffa_id_t receiver_vm_id,
+				   ffa_notification_flags_t flags,
+				   ffa_notifications_bitmap_t bitmap,
+				   struct ffa_value *ret)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -258,8 +257,8 @@
 	return false;
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_create(
-	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+						 ffa_vcpu_count_t vcpu_count)
 {
 	(void)vm_id;
 	(void)vcpu_count;
@@ -267,26 +266,26 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
 {
 	(void)vm_id;
 
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return (struct vm_locked){.vm = NULL};
 }
 
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return (struct vm_locked){.vm = NULL};
 }
 
-bool plat_ffa_vm_notifications_info_get(     // NOLINTNEXTLINE
+bool ffa_vm_notifications_info_get(	     // NOLINTNEXTLINE
 	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
 	uint32_t *lists_sizes,		     // NOLINTNEXTLINE
 	uint32_t *lists_count, const uint32_t ids_count_max)
@@ -300,13 +299,13 @@
 	return false;
 }
 
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
 {
 	(void)current;
 	return false;
 }
 
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
 {
 	(void)current;
 	return false;
@@ -315,9 +314,9 @@
 /**
  * Check if current VM can resume target VM/SP using FFA_RUN ABI.
  */
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
-			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
-			 struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+			       ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			       struct ffa_value *run_ret, struct vcpu **next)
 {
 	(void)current_locked;
 	(void)target_vm_id;
@@ -327,8 +326,8 @@
 	return true;
 }
 
-void plat_ffa_notification_info_get_forward(  // NOLINTNEXTLINE
-	uint16_t *ids, uint32_t *ids_count,   // NOLINTNEXTLINE
+void ffa_notifications_info_get_forward(     // NOLINTNEXTLINE
+	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
 	uint32_t *lists_sizes, uint32_t *lists_count,
 	const uint32_t ids_count_max)
 {
@@ -339,22 +338,22 @@
 	(void)ids_count_max;
 }
 
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
 
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
 
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
 
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)
 {
@@ -381,9 +380,9 @@
 	return true;
 }
 
-ffa_vm_count_t plat_ffa_partition_info_get_forward(  // NOLINTNEXTLINE
-	const struct ffa_uuid *uuid,		     // NOLINTNEXTLINE
-	uint32_t flags,				     // NOLINTNEXTLINE
+ffa_vm_count_t ffa_setup_partition_info_get_forward(  // NOLINTNEXTLINE
+	const struct ffa_uuid *uuid,		      // NOLINTNEXTLINE
+	uint32_t flags,				      // NOLINTNEXTLINE
 	struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
 {
 	(void)uuid;
@@ -393,13 +392,13 @@
 	return vm_count;
 }
 
-bool plat_ffa_is_secondary_ep_register_supported(void)
+bool ffa_setup_is_secondary_ep_register_supported(void)
 {
 	return false;
 }
 
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
-					   struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+	struct vcpu_locked current_locked, struct vcpu **next)
 {
 	(void)current_locked;
 	(void)next;
@@ -407,12 +406,11 @@
 	return (struct ffa_value){.func = FFA_INTERRUPT_32};
 }
 
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
-					     ffa_id_t vm_id,
-					     ffa_id_t receiver_vm_id,
-					     struct vcpu_locked receiver_locked,
-					     uint32_t func,  // NOLINTNEXTLINE
-					     enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+	struct vcpu_locked current_locked, ffa_id_t vm_id,
+	ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+	uint32_t func,	// NOLINTNEXTLINE
+	enum vcpu_state *next_state)
 {
 	/* Perform state transition checks only for Secure Partitions. */
 	(void)current_locked;
@@ -425,15 +423,15 @@
 	return true;
 }
 
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+	struct vcpu_locked current_locked, struct vcpu_locked target_locked)
 {
 	/* Scheduling mode not supported in the Hypervisor/VMs. */
 	(void)current_locked;
 	(void)target_locked;
 }
 
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
 {
@@ -443,7 +441,7 @@
 	(void)sender_vm_id;
 }
 
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
 	struct vcpu_locked current_locked, struct vcpu_locked next_locked)
 {
 	/* Calls chains not supported in the Hypervisor/VMs. */
@@ -457,14 +455,14 @@
 	return false;
 }
 
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
-					struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+					      struct vm_locked vm_locked)
 {
 	(void)current_locked;
 	(void)vm_locked;
 }
 
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
 	struct vm *from, uint32_t share_func,
 	struct ffa_memory_region **memory_region, uint32_t length,
 	uint32_t fragment_length, struct mpool *page_pool)
@@ -479,7 +477,7 @@
 	return (struct ffa_value){0};
 }
 
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
 	struct vm *to, ffa_memory_handle_t handle,
 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
 {
@@ -491,7 +489,7 @@
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
 
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
 	struct vm *from, void *fragment, uint32_t fragment_length,
 	ffa_memory_handle_t handle, struct mpool *page_pool)
 {
@@ -504,9 +502,9 @@
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
 
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
-				   ffa_id_t receiver_vm_id, uint32_t size,
-				   struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id, uint32_t size,
+				       struct vcpu *current, struct vcpu **next)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -517,10 +515,10 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
-					struct vcpu **next,
-					uint32_t timeout_low,
-					uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+					      struct vcpu **next,
+					      uint32_t timeout_low,
+					      uint32_t timeout_high)
 {
 	(void)current_locked;
 	(void)next;
@@ -598,7 +596,7 @@
 	return true;
 }
 
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
 	ffa_memory_attributes_t attributes, uint32_t mode)
 {
 	(void)mode;
@@ -616,12 +614,12 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
 {
 	return false;
 }
 
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
@@ -642,7 +640,7 @@
 	return true;
 }
 
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
 {
 	(void)current_locked;