refactor(ffa): remove `plat` prefix

Rename files to remove the `plat` prefix and replace with a
module-specific prefix.

Change-Id: Ie64cefcdf91da7b20e520828d8e234af12ab5c85
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/ffa/BUILD.gn b/src/ffa/BUILD.gn
index b43fba4..3b077bf 100644
--- a/src/ffa/BUILD.gn
+++ b/src/ffa/BUILD.gn
@@ -30,7 +30,6 @@
     "hypervisor/indirect_messaging.c",
     "hypervisor/interrupts.c",
     "hypervisor/notifications.c",
-    "hypervisor/power_management.c",
     "hypervisor/setup_and_discovery.c",
     "hypervisor/vm.c",
   ]
@@ -53,7 +52,6 @@
     "spmc/indirect_messaging.c",
     "spmc/interrupts.c",
     "spmc/notifications.c",
-    "spmc/power_management.c",
     "spmc/setup_and_discovery.c",
     "spmc/vm.c",
   ]
diff --git a/src/ffa/absent.c b/src/ffa/absent.c
index 9628b17..0cd9e1d 100644
--- a/src/ffa/absent.c
+++ b/src/ffa/absent.c
@@ -12,13 +12,13 @@
 #include "hf/vcpu.h"
 #include "hf/vm.h"
 
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
 {
 	return (struct ffa_value){.func = FFA_ERROR_32,
 				  .arg2 = FFA_NOT_SUPPORTED};
 }
 
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
 	ffa_id_t caller_id, const struct vm *target)
 {
 	(void)caller_id;
@@ -43,7 +43,7 @@
 /**
  * Check validity of the FF-A memory send function attempt.
  */
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
+bool ffa_memory_is_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
 {
 	(void)receiver_vm_id;
 	(void)share_func;
@@ -54,9 +54,9 @@
 /**
  * Check validity of a FF-A direct message request.
  */
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
-				      ffa_id_t sender_vm_id,
-				      ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+					    ffa_id_t sender_vm_id,
+					    ffa_id_t receiver_vm_id)
 {
 	(void)current;
 	(void)sender_vm_id;
@@ -65,8 +65,9 @@
 	return false;
 }
 
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
-					  struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+						struct vm *receiver_vm,
+						uint32_t func)
 {
 	(void)sender_vm;
 	(void)receiver_vm;
@@ -78,9 +79,9 @@
 /**
  * Check validity of a FF-A direct message response.
  */
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
-				       ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+					     ffa_id_t sender_vm_id,
+					     ffa_id_t receiver_vm_id)
 {
 	(void)current;
 	(void)sender_vm_id;
@@ -89,8 +90,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notifications_bitmap_access_valid(struct vcpu *current,
-						   ffa_id_t vm_id)
+bool ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+					      ffa_id_t vm_id)
 {
 	(void)current;
 	(void)vm_id;
@@ -98,24 +99,24 @@
 	return false;
 }
 
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
 {
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
-				     struct ffa_value args,
-				     struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+					   struct ffa_value args,
+					   struct ffa_value *ret)
 {
 	(void)receiver_vm_id;
 	(void)args;
@@ -124,8 +125,8 @@
 	return false;
 }
 
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
-				 struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+				  struct ffa_value *ret)
 {
 	(void)vm_locked;
 	(void)ret;
@@ -133,8 +134,8 @@
 	return false;
 }
 
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
-				  struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+				   struct ffa_value *ret)
 {
 	(void)to_locked;
 	(void)ret;
@@ -142,8 +143,8 @@
 	return false;
 }
 
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
-					struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+				   struct vm_locked receiver_locked)
 {
 	(void)sender_locked;
 	(void)receiver_locked;
@@ -151,8 +152,9 @@
 	return false;
 }
 
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
-				struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+				    ffa_id_t sender_vm_id,
+				    struct ffa_value *ret)
 {
 	(void)receiver_vm_id;
 	(void)sender_vm_id;
@@ -161,14 +163,13 @@
 	return false;
 }
 
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
 {
 	return 0U;
 }
 
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
-					  ffa_id_t sender_id,
-					  ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+				     ffa_id_t receiver_id)
 {
 	(void)current;
 	(void)sender_id;
@@ -176,7 +177,7 @@
 	return false;
 }
 
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
 	ffa_id_t receiver_id, ffa_id_t sender_id,
 	ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
 	bool is_bind, struct ffa_value *ret)
@@ -192,9 +193,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
-					ffa_id_t sender_id,
-					ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+				    ffa_id_t receiver_id)
 {
 	(void)current;
 	(void)sender_id;
@@ -202,9 +202,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
-					ffa_id_t receiver_id,
-					ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+				    ffa_notification_flags_t flags)
 {
 	(void)flags;
 	(void)current;
@@ -212,7 +211,7 @@
 	return false;
 }
 
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
 	const ffa_notifications_bitmap_t *from_sp)
 {
@@ -223,7 +222,7 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
 	struct vm_locked receiver_locked,  // NOLINTNEXTLINE
 	ffa_notifications_bitmap_t *from_fwk, ffa_notification_flags_t flags)
 {
@@ -234,11 +233,11 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id,
-				       ffa_notification_flags_t flags,
-				       ffa_notifications_bitmap_t bitmap,
-				       struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+				   ffa_id_t receiver_vm_id,
+				   ffa_notification_flags_t flags,
+				   ffa_notifications_bitmap_t bitmap,
+				   struct ffa_value *ret)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -249,8 +248,8 @@
 	return false;
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_create(
-	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+						 ffa_vcpu_count_t vcpu_count)
 {
 	(void)vm_id;
 	(void)vcpu_count;
@@ -258,8 +257,8 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
-					       ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+					  ffa_vcpu_count_t vcpu_count)
 {
 	(void)vm_id;
 	(void)vcpu_count;
@@ -267,27 +266,27 @@
 	return false;
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
 {
 	(void)vm_id;
 
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return (struct vm_locked){.vm = NULL};
 }
 
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
 {
 	(void)vm_id;
 	return (struct vm_locked){.vm = NULL};
 }
 
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
-			  struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+				struct ffa_value *ret)
 {
 	(void)vm_id;
 	(void)vcpu_idx;
@@ -296,11 +295,11 @@
 	return false;
 }
 
-bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
-					const uint32_t *ids_count,
-					const uint32_t *lists_sizes,
-					const uint32_t *lists_count,
-					const uint32_t ids_count_max)
+bool ffa_vm_notifications_info_get(const uint16_t *ids,
+				   const uint32_t *ids_count,
+				   const uint32_t *lists_sizes,
+				   const uint32_t *lists_count,
+				   const uint32_t ids_count_max)
 {
 	(void)ids;
 	(void)ids_count;
@@ -311,13 +310,13 @@
 	return false;
 }
 
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
 {
 	(void)current;
 	return false;
 }
 
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
 {
 	(void)current;
 	return false;
@@ -326,9 +325,9 @@
 /**
  * Check if current VM can resume target VM/SP using FFA_RUN ABI.
  */
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
-			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
-			 struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+			       ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			       struct ffa_value *run_ret, struct vcpu **next)
 {
 	(void)current_locked;
 	(void)target_vm_id;
@@ -338,9 +337,9 @@
 	return true;
 }
 
-struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current,
-						  struct vcpu **next,
-						  bool from_normal_world)
+struct ffa_value ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+							struct vcpu **next,
+							bool from_normal_world)
 {
 	(void)current;
 	(void)next;
@@ -354,11 +353,11 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-void plat_ffa_notification_info_get_forward(const uint16_t *ids,
-					    const uint32_t *ids_count,
-					    const uint32_t *lists_sizes,
-					    const uint32_t *lists_count,
-					    const uint32_t ids_count_max)
+void ffa_notifications_info_get_forward(const uint16_t *ids,
+					const uint32_t *ids_count,
+					const uint32_t *lists_sizes,
+					const uint32_t *lists_count,
+					const uint32_t ids_count_max)
 {
 	(void)ids;
 	(void)ids_count;
@@ -367,11 +366,11 @@
 	(void)ids_count_max;
 }
 
-void plat_ffa_sri_init(void)
+void ffa_notifications_sri_init(void)
 {
 }
 
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)
 {
@@ -382,9 +381,9 @@
 	return false;
 }
 
-bool plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
-					 const ffa_notification_flags_t flags,
-					 const ffa_vm_count_t *ret_count)
+bool ffa_setup_partition_info_get_forward(const struct ffa_uuid *uuid,
+					  const ffa_notification_flags_t flags,
+					  const ffa_vm_count_t *ret_count)
 {
 	(void)uuid;
 	(void)flags;
@@ -410,11 +409,11 @@
 	return true;
 }
 
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
-				       paddr_t fdt_addr,
-				       size_t fdt_allocated_size,
-				       const struct manifest_vm *manifest_vm,
-				       struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+					paddr_t fdt_addr,
+					size_t fdt_allocated_size,
+					const struct manifest_vm *manifest_vm,
+					struct mpool *ppool)
 {
 	(void)stage1_locked;
 	(void)fdt_addr;
@@ -423,8 +422,8 @@
 	(void)ppool;
 }
 
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
-					   struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+	struct vcpu_locked current_locked, struct vcpu **next)
 {
 	(void)current_locked;
 	(void)next;
@@ -432,12 +431,11 @@
 	return (struct ffa_value){.func = FFA_INTERRUPT_32};
 }
 
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
-					     ffa_id_t vm_id,
-					     ffa_id_t receiver_vm_id,
-					     struct vcpu_locked receiver_locked,
-					     uint32_t func,  // NOLINTNEXTLINE
-					     enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+	struct vcpu_locked current_locked, ffa_id_t vm_id,
+	ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+	uint32_t func,	// NOLINTNEXTLINE
+	enum vcpu_state *next_state)
 {
 	(void)current_locked;
 	(void)vm_id;
@@ -449,14 +447,14 @@
 	return true;
 }
 
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+	struct vcpu_locked current_locked, struct vcpu_locked target_locked)
 {
 	(void)current_locked;
 	(void)target_locked;
 }
 
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
 {
@@ -471,21 +469,21 @@
 	return false;
 }
 
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
 	struct vcpu_locked current_locked, struct vcpu_locked next_locked)
 {
 	(void)current_locked;
 	(void)next_locked;
 }
 
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
-					struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+					      struct vm_locked vm_locked)
 {
 	(void)current_locked;
 	(void)vm_locked;
 }
 
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
 	struct vm *from, struct ffa_memory_region *memory_region,
 	uint32_t length, uint32_t fragment_length, struct mpool *page_pool)
 {
@@ -498,7 +496,7 @@
 	return (struct ffa_value){.func = FFA_ERROR_32};
 }
 
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
 	struct vm *to, ffa_memory_handle_t handle,
 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
 {
@@ -510,7 +508,7 @@
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
 
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
 	struct vm *from, void *fragment, uint32_t fragment_length,
 	ffa_memory_handle_t handle, struct mpool *page_pool)
 {
@@ -523,9 +521,9 @@
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
 
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
-				   ffa_id_t receiver_vm_id, uint32_t size,
-				   struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id, uint32_t size,
+				       struct vcpu *current, struct vcpu **next)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -536,10 +534,10 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_yield_prepare(struct vcpu current_locked,
-					struct vcpu **next,
-					uint32_t timeout_low,
-					uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu current_locked,
+					      struct vcpu **next,
+					      uint32_t timeout_low,
+					      uint32_t timeout_high)
 {
 	(void)current_locked;
 	(void)next;
@@ -559,8 +557,8 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_msg_recv(bool block, struct vcpu *current,
-				   struct vcpu **next)
+struct ffa_value ffa_indirect_msg_recv(bool block, struct vcpu *current,
+				       struct vcpu **next)
 {
 	(void)block;
 	(void)current;
@@ -569,17 +567,17 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
 {
 	return false;
 }
 
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
 {
 	return api_interrupt_get(current_locked);
 }
diff --git a/src/ffa/hypervisor.c b/src/ffa/hypervisor.c
index 20c6fbe..d79d14b 100644
--- a/src/ffa/hypervisor.c
+++ b/src/ffa/hypervisor.c
@@ -94,7 +94,7 @@
 	 * perspective and vice-versa.
 	 */
 	dlog_verbose("Setting up buffers for TEE.\n");
-	plat_ffa_rxtx_map_spmc(
+	ffa_setup_rxtx_map_spmc(
 		pa_from_va(va_from_ptr(other_world_vm->mailbox.recv)),
 		pa_from_va(va_from_ptr(other_world_vm->mailbox.send)),
 		HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
@@ -120,9 +120,9 @@
 	dlog_verbose("TEE finished setting up buffers.\n");
 }
 
-bool plat_ffa_intercept_call(struct vcpu_locked current_locked,
-			     struct vcpu_locked next_locked,
-			     struct ffa_value *signal_interrupt)
+bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
+				   struct vcpu_locked next_locked,
+				   struct ffa_value *signal_interrupt)
 {
 	(void)current_locked;
 	(void)next_locked;
@@ -200,9 +200,9 @@
  * If the recipient's receive buffer is busy, it can optionally register the
  * caller to be notified when the recipient's receive buffer becomes available.
  */
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
-				   ffa_id_t receiver_vm_id, uint32_t size,
-				   struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id, uint32_t size,
+				       struct vcpu *current, struct vcpu **next)
 {
 	struct vm *from = current->vm;
 	struct vm *to;
diff --git a/src/ffa/hypervisor/cpu_cycles.c b/src/ffa/hypervisor/cpu_cycles.c
index 667e0e9..af54f19 100644
--- a/src/ffa/hypervisor/cpu_cycles.c
+++ b/src/ffa/hypervisor/cpu_cycles.c
@@ -12,8 +12,8 @@
 #include "hf/ffa/indirect_messaging.h"
 #include "hf/vcpu.h"
 
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
-			  struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+				struct ffa_value *ret)
 {
 	/*
 	 * VM's requests should be forwarded to the SPMC, if target is an SP.
@@ -30,9 +30,9 @@
 /**
  * Check if current VM can resume target VM/SP using FFA_RUN ABI.
  */
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
-			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
-			 struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+			       ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			       struct ffa_value *run_ret, struct vcpu **next)
 {
 	(void)next;
 	(void)vcpu_idx;
@@ -56,18 +56,16 @@
  * to be compliant with version v1.0 of the FF-A specification. It serves as
  * a blocking call.
  */
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
-					   struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+	struct vcpu_locked current_locked, struct vcpu **next)
 {
-	return plat_ffa_msg_recv(true, current_locked, next);
+	return ffa_indirect_msg_recv(true, current_locked, next);
 }
 
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
-					     ffa_id_t vm_id,
-					     ffa_id_t receiver_vm_id,
-					     struct vcpu_locked receiver_locked,
-					     uint32_t func,
-					     enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+	struct vcpu_locked current_locked, ffa_id_t vm_id,
+	ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+	uint32_t func, enum vcpu_state *next_state)
 {
 	(void)current_locked;
 	(void)vm_id;
@@ -95,8 +93,8 @@
 	}
 }
 
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+	struct vcpu_locked current_locked, struct vcpu_locked target_locked)
 {
 	/* Scheduling mode not supported in the Hypervisor/VMs. */
 	(void)current_locked;
@@ -107,10 +105,10 @@
  * Prepare to yield execution back to the VM that allocated cpu cycles and move
  * to BLOCKED state.
  */
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
-					struct vcpu **next,
-					uint32_t timeout_low,
-					uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+					      struct vcpu **next,
+					      uint32_t timeout_low,
+					      uint32_t timeout_high)
 {
 	struct vcpu *current = current_locked.vcpu;
 	struct ffa_value ret = {
diff --git a/src/ffa/hypervisor/direct_messaging.c b/src/ffa/hypervisor/direct_messaging.c
index 2b78f4b..c5b5292 100644
--- a/src/ffa/hypervisor/direct_messaging.c
+++ b/src/ffa/hypervisor/direct_messaging.c
@@ -18,9 +18,9 @@
 /**
  * Check validity of a FF-A direct message request.
  */
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
-				      ffa_id_t sender_vm_id,
-				      ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+					    ffa_id_t sender_vm_id,
+					    ffa_id_t receiver_vm_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -33,8 +33,9 @@
 	       sender_vm_id == current_vm_id && vm_is_primary(current->vm);
 }
 
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
-					  struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+						struct vm *receiver_vm,
+						uint32_t func)
 {
 	(void)sender_vm;
 	(void)receiver_vm;
@@ -50,9 +51,9 @@
 /**
  * Check validity of a FF-A direct message response.
  */
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
-				       ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+					     ffa_id_t sender_vm_id,
+					     ffa_id_t receiver_vm_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -65,9 +66,9 @@
 	       receiver_vm_id == HF_PRIMARY_VM_ID;
 }
 
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
-				     struct ffa_value args,
-				     struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+					   struct ffa_value args,
+					   struct ffa_value *ret)
 {
 	if (!plat_ffa_is_tee_enabled()) {
 		dlog_verbose("Not forwarding: ffa_tee_enabled is false\n");
@@ -101,7 +102,7 @@
 	return true;
 }
 
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
 {
@@ -111,7 +112,7 @@
 	(void)sender_vm_id;
 }
 
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
 	struct vcpu_locked current_locked, struct vcpu_locked next_locked)
 {
 	/* Calls chains not supported in the Hypervisor/VMs. */
diff --git a/src/ffa/hypervisor/ffa_memory.c b/src/ffa/hypervisor/ffa_memory.c
index dfc5985..84cbcd4 100644
--- a/src/ffa/hypervisor/ffa_memory.c
+++ b/src/ffa/hypervisor/ffa_memory.c
@@ -18,7 +18,7 @@
 #include "hypervisor.h"
 #include "sysregs.h"
 
-enum ffa_memory_handle_allocator plat_ffa_memory_handle_allocator(void)
+enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
 {
 	return FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
 }
@@ -37,8 +37,8 @@
 /**
  * Check validity of the FF-A memory send function attempt.
  */
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
-				   uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+			      uint32_t share_func, bool multiple_borrower)
 {
 	/*
 	 * Currently memory interfaces are not forwarded from hypervisor to
@@ -54,18 +54,18 @@
 	return true;
 }
 
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
 {
 	return 0U;
 }
 
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
 {
 	(void)current;
 	return has_vhe_support();
 }
 
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
 {
 	(void)current;
 	return has_vhe_support();
@@ -263,7 +263,7 @@
 	return ret;
 }
 
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
 	struct vm *from, uint32_t share_func,
 	struct ffa_memory_region **memory_region, uint32_t length,
 	uint32_t fragment_length, struct mpool *page_pool)
@@ -417,7 +417,7 @@
 	return ret;
 }
 
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
 	struct vm *to, ffa_memory_handle_t handle,
 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
 {
@@ -655,7 +655,7 @@
 	return ret;
 }
 
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
 	struct vm *from, void *fragment, uint32_t fragment_length,
 	ffa_memory_handle_t handle, struct mpool *page_pool)
 {
@@ -684,7 +684,7 @@
 	return ret;
 }
 
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
 	ffa_memory_attributes_t attributes, uint32_t mode)
 {
 	(void)mode;
diff --git a/src/ffa/hypervisor/indirect_messaging.c b/src/ffa/hypervisor/indirect_messaging.c
index 0fd2b6c..fb11f9f 100644
--- a/src/ffa/hypervisor/indirect_messaging.c
+++ b/src/ffa/hypervisor/indirect_messaging.c
@@ -14,8 +14,8 @@
 #include "hf/ffa_internal.h"
 #include "hf/vm.h"
 
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
-					struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+				   struct vm_locked receiver_locked)
 {
 	(void)sender_locked;
 	(void)receiver_locked;
@@ -27,8 +27,9 @@
 	return true;
 }
 
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
-				struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+				    ffa_id_t sender_vm_id,
+				    struct ffa_value *ret)
 {
 	/* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */
 	if (vm_id_is_current_world(receiver_vm_id)) {
@@ -59,7 +60,7 @@
  * Checks whether the vCPU's attempt to wait for a message has already been
  * interrupted or whether it is allowed to block.
  */
-static bool plat_ffa_msg_recv_block_interrupted(
+static bool ffa_indirect_msg_recv_block_interrupted(
 	struct vcpu_locked current_locked)
 {
 	bool interrupted;
@@ -98,9 +99,9 @@
  *
  * No new messages can be received until the mailbox has been cleared.
  */
-struct ffa_value plat_ffa_msg_recv(bool block,
-				   struct vcpu_locked current_locked,
-				   struct vcpu **next)
+struct ffa_value ffa_indirect_msg_recv(bool block,
+				       struct vcpu_locked current_locked,
+				       struct vcpu **next)
 {
 	struct vm *vm = current_locked.vcpu->vm;
 	struct vcpu *current = current_locked.vcpu;
@@ -143,7 +144,7 @@
 	 * that time to FFA_SUCCESS.
 	 */
 	return_code = ffa_error(FFA_INTERRUPTED);
-	if (plat_ffa_msg_recv_block_interrupted(current_locked)) {
+	if (ffa_indirect_msg_recv_block_interrupted(current_locked)) {
 		goto out;
 	}
 
diff --git a/src/ffa/hypervisor/interrupts.c b/src/ffa/hypervisor/interrupts.c
index 512cc57..21d89ed 100644
--- a/src/ffa/hypervisor/interrupts.c
+++ b/src/ffa/hypervisor/interrupts.c
@@ -10,7 +10,8 @@
 #include "hf/check.h"
 #include "hf/vm.h"
 
-void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next)
+void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+					    struct vcpu **next)
 {
 	(void)current;
 	(void)next;
@@ -22,7 +23,7 @@
 	CHECK(false);
 }
 
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)
 {
@@ -36,8 +37,8 @@
 /**
  * Enable relevant virtual interrupts for VMs.
  */
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
-					struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+					      struct vm_locked vm_locked)
 {
 	struct vcpu *current;
 	struct interrupts *interrupts;
@@ -51,7 +52,7 @@
 	}
 }
 
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
 {
 	return api_interrupt_get(current_locked);
 }
diff --git a/src/ffa/hypervisor/notifications.c b/src/ffa/hypervisor/notifications.c
index 69ea010..f03f378 100644
--- a/src/ffa/hypervisor/notifications.c
+++ b/src/ffa/hypervisor/notifications.c
@@ -20,8 +20,8 @@
  * Check validity of the calls:
  * FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
  */
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
-	struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+							  ffa_id_t vm_id)
 {
 	/*
 	 * Call should only be used by the Hypervisor, so any attempt of
@@ -33,16 +33,15 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
-					  ffa_id_t sender_id,
-					  ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+				     ffa_id_t receiver_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 	/** If Hafnium is hypervisor, receiver needs to be current vm. */
 	return sender_id != receiver_id && current_vm_id == receiver_id;
 }
 
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
 	ffa_id_t receiver_id, ffa_id_t sender_id,
 	ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
 	bool is_bind, struct ffa_value *ret)
@@ -66,9 +65,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
-					ffa_id_t sender_id,
-					ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+				    ffa_id_t receiver_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -76,10 +74,10 @@
 	return sender_id == current_vm_id && sender_id != receiver_id;
 }
 
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id, uint32_t flags,
-				       ffa_notifications_bitmap_t bitmap,
-				       struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+				   ffa_id_t receiver_vm_id, uint32_t flags,
+				   ffa_notifications_bitmap_t bitmap,
+				   struct ffa_value *ret)
 {
 	/* Forward only if receiver is an SP. */
 	if (vm_id_is_current_world(receiver_vm_id)) {
@@ -103,9 +101,8 @@
 	return true;
 }
 
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
-					ffa_id_t receiver_id,
-					ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+				    ffa_notification_flags_t flags)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -115,8 +112,8 @@
 	return (current_vm_id == receiver_id);
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_create(
-	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+						 ffa_vcpu_count_t vcpu_count)
 {
 	(void)vm_id;
 	(void)vcpu_count;
@@ -124,15 +121,15 @@
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
 {
 	(void)vm_id;
 
 	return ffa_error(FFA_NOT_SUPPORTED);
 }
 
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
-					       ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+					  ffa_vcpu_count_t vcpu_count)
 {
 	struct ffa_value ret;
 
@@ -155,10 +152,10 @@
 	return true;
 }
 
-void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
-					    uint32_t *lists_sizes,
-					    uint32_t *lists_count,
-					    const uint32_t ids_count_max)
+void ffa_notifications_info_get_forward(uint16_t *ids, uint32_t *ids_count,
+					uint32_t *lists_sizes,
+					uint32_t *lists_count,
+					const uint32_t ids_count_max)
 {
 	CHECK(ids != NULL);
 	CHECK(ids_count != NULL);
@@ -228,7 +225,7 @@
 		 sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
 }
 
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
 	ffa_notifications_bitmap_t *from_sp)
 {
@@ -252,7 +249,7 @@
 	return ret;
 }
 
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
 	struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
 	ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
 {
@@ -292,12 +289,12 @@
  * intrastructure that encompasses the NWd, and we are not interested in testing
  * the flow of notifications between VMs only.
  */
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
 
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
@@ -306,7 +303,7 @@
  * Track that in current CPU there was a notification set with delay SRI
  * flag.
  */
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
 {
 	(void)cpu;
 }
diff --git a/src/ffa/hypervisor/power_management.c b/src/ffa/hypervisor/power_management.c
deleted file mode 100644
index e1e81b2..0000000
--- a/src/ffa/hypervisor/power_management.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2024 The Hafnium Authors.
- *
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file or at
- * https://opensource.org/licenses/BSD-3-Clause.
- */
-
-#include "hf/ffa/power_management.h"
-
-/**
- * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
- * non-secure FF-A instances.
- */
-bool plat_ffa_is_secondary_ep_register_supported(void)
-{
-	return false;
-}
diff --git a/src/ffa/hypervisor/setup_and_discovery.c b/src/ffa/hypervisor/setup_and_discovery.c
index da9ebf7..74afa60 100644
--- a/src/ffa/hypervisor/setup_and_discovery.c
+++ b/src/ffa/hypervisor/setup_and_discovery.c
@@ -10,7 +10,7 @@
 
 #include "hf/arch/other_world.h"
 
-#include "hf/ffa.h"
+#include "hf/check.h"
 #include "hf/ffa/vm.h"
 #include "hf/manifest.h"
 #include "hf/vm.h"
@@ -18,7 +18,7 @@
 #include "hypervisor.h"
 #include "smc.h"
 
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
 {
 	if (plat_ffa_is_tee_enabled()) {
 		/*
@@ -35,7 +35,16 @@
 				  .arg2 = FFA_NOT_SUPPORTED};
 }
 
-void plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
+/**
+ * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
+ * non-secure FF-A instances.
+ */
+bool ffa_setup_is_secondary_ep_register_supported(void)
+{
+	return false;
+}
+
+void ffa_setup_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
 {
 	struct ffa_value ret;
 
@@ -46,7 +55,7 @@
 	CHECK(ret.func == FFA_SUCCESS_32);
 }
 
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
 {
 	struct vm *vm = vm_locked.vm;
 	struct vm *other_world;
@@ -56,7 +65,7 @@
 		return;
 	}
 
-	if (!plat_ffa_vm_supports_indirect_messages(vm)) {
+	if (!ffa_vm_supports_indirect_messages(vm)) {
 		return;
 	}
 
@@ -71,14 +80,14 @@
 		vm->id, (uintptr_t)vm->mailbox.recv,
 		(uintptr_t)vm->mailbox.send);
 
-	plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+	ffa_setup_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
 
 	vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
 
 	dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id);
 }
 
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
 	struct ffa_value ret;
 	uint64_t func;
@@ -92,7 +101,7 @@
 		return;
 	}
 
-	if (!plat_ffa_vm_supports_indirect_messages(vm_locked.vm)) {
+	if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) {
 		return;
 	}
 
@@ -112,7 +121,7 @@
 	}
 }
 
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
 {
 	/*
 	 * Allow forwarding from the Hypervisor if TEE or SPMC exists and
@@ -125,7 +134,7 @@
  * Forward helper for FFA_PARTITION_INFO_GET.
  * Emits FFA_PARTITION_INFO_GET from Hypervisor to SPMC if allowed.
  */
-ffa_vm_count_t plat_ffa_partition_info_get_forward(
+ffa_vm_count_t ffa_setup_partition_info_get_forward(
 	const struct ffa_uuid *uuid, uint32_t flags,
 	struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
 {
@@ -185,12 +194,12 @@
 	return vm_count;
 }
 
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
-				       paddr_t fdt_addr,
-				       size_t fdt_allocated_size,
-				       const struct manifest_vm *manifest_vm,
-				       const struct boot_params *boot_params,
-				       struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+					paddr_t fdt_addr,
+					size_t fdt_allocated_size,
+					const struct manifest_vm *manifest_vm,
+					const struct boot_params *boot_params,
+					struct mpool *ppool)
 {
 	struct fdt partition_fdt;
 
@@ -214,7 +223,7 @@
 		       pa_add(fdt_addr, fdt_allocated_size), ppool) == true);
 }
 
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
 	ffa_id_t caller_id, const struct vm *target)
 {
 	ffa_partition_properties_t result = target->messaging_method;
@@ -234,14 +243,14 @@
 	return result;
 }
 
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
-				 struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+				  struct ffa_value *ret)
 {
 	struct vm *vm = vm_locked.vm;
 	ffa_id_t vm_id = vm->id;
 
 	if (!plat_ffa_is_tee_enabled() ||
-	    !plat_ffa_vm_supports_indirect_messages(vm)) {
+	    !ffa_vm_supports_indirect_messages(vm)) {
 		return false;
 	}
 
@@ -274,8 +283,8 @@
  *
  * Returns true if the ownership belongs to the hypervisor.
  */
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
-				  struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+				   struct ffa_value *ret)
 {
 	struct ffa_value other_world_ret;
 
@@ -286,7 +295,7 @@
 	 * - If the mailbox ownership hasn't been transferred to the SPMC.
 	 */
 	if (!plat_ffa_is_tee_enabled() ||
-	    !plat_ffa_vm_supports_indirect_messages(to_locked.vm) ||
+	    !ffa_vm_supports_indirect_messages(to_locked.vm) ||
 	    to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
 		return true;
 	}
diff --git a/src/ffa/hypervisor/vm.c b/src/ffa/hypervisor/vm.c
index 943ae66..08b1cff 100644
--- a/src/ffa/hypervisor/vm.c
+++ b/src/ffa/hypervisor/vm.c
@@ -8,20 +8,20 @@
 
 #include "hf/vm.h"
 
-bool plat_ffa_vm_supports_indirect_messages(struct vm *vm)
+bool ffa_vm_supports_indirect_messages(struct vm *vm)
 {
 	return vm->ffa_version >= FFA_VERSION_1_1 &&
 	       vm_supports_messaging_method(vm, FFA_PARTITION_INDIRECT_MSG);
 }
 
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
 {
 	(void)vm;
 
 	return false;
 }
 
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
 {
 	if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
 		return vm_find_locked(vm_id);
@@ -30,12 +30,12 @@
 	return (struct vm_locked){.vm = NULL};
 }
 
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
 {
-	return plat_ffa_vm_find_locked(vm_id);
+	return ffa_vm_find_locked(vm_id);
 }
 
-bool plat_ffa_vm_notifications_info_get(     // NOLINTNEXTLINE
+bool ffa_vm_notifications_info_get(	     // NOLINTNEXTLINE
 	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
 	uint32_t *lists_sizes,		     // NOLINTNEXTLINE
 	uint32_t *lists_count, const uint32_t ids_count_max)
@@ -49,13 +49,13 @@
 	return false;
 }
 
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
 {
 	/* Hypervisor never frees VM structs. */
 	(void)to_destroy_locked;
 }
 
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
diff --git a/src/ffa/spmc.c b/src/ffa/spmc.c
index ba59981..1b31a1a 100644
--- a/src/ffa/spmc.c
+++ b/src/ffa/spmc.c
@@ -38,7 +38,7 @@
 void plat_ffa_init(struct mpool *ppool)
 {
 	arch_ffa_init();
-	plat_ffa_vm_init(ppool);
+	ffa_vm_init(ppool);
 }
 
 static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
@@ -252,12 +252,10 @@
  * the current vcpu would transition upon the FF-A ABI invocation as determined
  * by the Partition runtime model.
  */
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
-					     ffa_id_t vm_id,
-					     ffa_id_t receiver_vm_id,
-					     struct vcpu_locked locked_vcpu,
-					     uint32_t func,
-					     enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+	struct vcpu_locked current_locked, ffa_id_t vm_id,
+	ffa_id_t receiver_vm_id, struct vcpu_locked locked_vcpu, uint32_t func,
+	enum vcpu_state *next_state)
 {
 	bool allowed = false;
 	struct vcpu *current = current_locked.vcpu;
@@ -421,9 +419,9 @@
 	target_vcpu->requires_deactivate_call = false;
 }
 
-bool plat_ffa_intercept_call(struct vcpu_locked current_locked,
-			     struct vcpu_locked next_locked,
-			     struct ffa_value *signal_interrupt)
+bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
+				   struct vcpu_locked next_locked,
+				   struct ffa_value *signal_interrupt)
 {
 	uint32_t intid;
 
@@ -462,7 +460,7 @@
  * invocation of FFA_MSG_SEND_DIRECT_REQ or FFA_MSG_SEND_DIRECT_REQ2 (FF-A v1.2)
  * ABI.
  */
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
 	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
 {
@@ -498,7 +496,7 @@
  * we need to return other world's id so that the SPMC can
  * return to the SPMD.
  */
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
 	struct vcpu_locked current_locked, struct vcpu_locked next_locked)
 {
 	struct vcpu *next = next_locked.vcpu;
@@ -521,9 +519,9 @@
 	}
 }
 
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
-				   ffa_id_t receiver_vm_id, uint32_t size,
-				   struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id, uint32_t size,
+				       struct vcpu *current, struct vcpu **next)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -560,7 +558,7 @@
 		atomic_store_explicit(&current->vm->aborting, true,
 				      memory_order_relaxed);
 
-		plat_ffa_free_vm_resources(vm_locked);
+		ffa_vm_free_resources(vm_locked);
 
 		if (sp_boot_next(current_locked, next)) {
 			goto out;
diff --git a/src/ffa/spmc/cpu_cycles.c b/src/ffa/spmc/cpu_cycles.c
index 0d1a99f..db152db 100644
--- a/src/ffa/spmc/cpu_cycles.c
+++ b/src/ffa/spmc/cpu_cycles.c
@@ -18,8 +18,8 @@
 void plat_ffa_vcpu_allow_interrupts(struct vcpu *current);
 bool sp_boot_next(struct vcpu_locked current_locked, struct vcpu **next);
 
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
-			  struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+				struct ffa_value *ret)
 {
 	(void)vm_id;
 	(void)vcpu_idx;
@@ -31,9 +31,9 @@
 /**
  * Check if current VM can resume target VM using FFA_RUN ABI.
  */
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
-			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
-			 struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+			       ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			       struct ffa_value *run_ret, struct vcpu **next)
 {
 	/*
 	 * Under the Partition runtime model specified in FF-A v1.1-Beta0 spec,
@@ -299,8 +299,8 @@
 	 * vCPU. Intercept call will set `ret` to FFA_INTERRUPT and the
 	 * respective interrupt id.
 	 */
-	if (plat_ffa_intercept_call(both_vcpu_locks.vcpu1,
-				    both_vcpu_locks.vcpu2, ffa_ret)) {
+	if (ffa_interrupts_intercept_call(both_vcpu_locks.vcpu1,
+					  both_vcpu_locks.vcpu2, ffa_ret)) {
 		*next = NULL;
 		ret = true;
 	}
@@ -316,8 +316,8 @@
  * from RUNNING to WAITING for the following Partition runtime models:
  * RTM_FFA_RUN, RTM_SEC_INTERRUPT, RTM_SP_INIT.
  */
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
-					   struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+	struct vcpu_locked current_locked, struct vcpu **next)
 {
 	struct ffa_value ret = api_ffa_interrupt_return(0);
 	struct vcpu *current = current_locked.vcpu;
@@ -417,8 +417,8 @@
  * Initialize the scheduling mode and/or Partition Runtime model of the target
  * SP upon being resumed by an FFA_RUN ABI.
  */
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
-					 struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+	struct vcpu_locked current_locked, struct vcpu_locked target_locked)
 {
 	struct vcpu *vcpu = target_locked.vcpu;
 	struct vcpu *current = current_locked.vcpu;
@@ -454,10 +454,10 @@
  * execution context by the SPMC to handle secure virtual interrupt, then
  * FFA_YIELD invocation is essentially a no-op.
  */
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
-					struct vcpu **next,
-					uint32_t timeout_low,
-					uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+					      struct vcpu **next,
+					      uint32_t timeout_low,
+					      uint32_t timeout_high)
 {
 	struct ffa_value ret_args = (struct ffa_value){.func = FFA_SUCCESS_32};
 	struct vcpu *current = current_locked.vcpu;
diff --git a/src/ffa/spmc/direct_messaging.c b/src/ffa/spmc/direct_messaging.c
index a06594f..f76987b 100644
--- a/src/ffa/spmc/direct_messaging.c
+++ b/src/ffa/spmc/direct_messaging.c
@@ -11,9 +11,9 @@
 #include "hf/ffa.h"
 #include "hf/vm.h"
 
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
-				      ffa_id_t sender_vm_id,
-				      ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+					    ffa_id_t sender_vm_id,
+					    ffa_id_t receiver_vm_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -36,8 +36,9 @@
  * sender supports sending direct messaging requests, in accordance to their
  * respective configurations at the partition's FF-A manifest.
  */
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
-					  struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+						struct vm *receiver_vm,
+						uint32_t func)
 {
 	uint16_t sender_method;
 	uint16_t receiver_method;
@@ -92,9 +93,9 @@
 }
 
 /** Check validity of a FF-A direct message response. */
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
-				       ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+					     ffa_id_t sender_vm_id,
+					     ffa_id_t receiver_vm_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -107,9 +108,9 @@
 	       vm_id_is_current_world(sender_vm_id);
 }
 
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
-				     struct ffa_value args,
-				     struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+					   struct ffa_value args,
+					   struct ffa_value *ret)
 {
 	/*
 	 * SPs are not supposed to issue requests to VMs.
diff --git a/src/ffa/spmc/ffa_memory.c b/src/ffa/spmc/ffa_memory.c
index 1532727..1043569 100644
--- a/src/ffa/spmc/ffa_memory.c
+++ b/src/ffa/spmc/ffa_memory.c
@@ -16,14 +16,14 @@
 
 #include "sysregs.h"
 
-enum ffa_memory_handle_allocator plat_ffa_memory_handle_allocator(void)
+enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
 {
 	return FFA_MEMORY_HANDLE_ALLOCATOR_SPMC;
 }
 
 /** Check validity of the FF-A memory send function attempt. */
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
-				   uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+			      uint32_t share_func, bool multiple_borrower)
 {
 	const bool is_receiver_sp = vm_id_is_current_world(receiver);
 	const bool is_sender_sp = vm_id_is_current_world(sender);
@@ -62,24 +62,24 @@
 	}
 }
 
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
 {
 	return MM_MODE_NS;
 }
 
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
 {
 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
 	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
 }
 
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
 {
 	/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
 	return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
 }
 
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
 	struct vm *from, uint32_t share_func,
 	struct ffa_memory_region **memory_region, uint32_t length,
 	uint32_t fragment_length, struct mpool *page_pool)
@@ -104,7 +104,7 @@
  * SPMC handles its memory share requests internally, so no forwarding of the
  * request is required.
  */
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
 	struct vm *to, ffa_memory_handle_t handle,
 	ffa_memory_region_flags_t flags, struct mpool *page_pool)
 {
@@ -117,7 +117,7 @@
 	return ffa_error(FFA_INVALID_PARAMETERS);
 }
 
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
 	struct vm *from, void *fragment, uint32_t fragment_length,
 	ffa_memory_handle_t handle, struct mpool *page_pool)
 {
@@ -134,7 +134,7 @@
  * Update the memory region attributes with the security state bit based on the
  * supplied mode.
  */
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
 	ffa_memory_attributes_t attributes, uint32_t mode)
 {
 	ffa_memory_attributes_t ret = attributes;
diff --git a/src/ffa/spmc/indirect_messaging.c b/src/ffa/spmc/indirect_messaging.c
index 5012424..0794b21 100644
--- a/src/ffa/spmc/indirect_messaging.c
+++ b/src/ffa/spmc/indirect_messaging.c
@@ -15,8 +15,8 @@
  * to their configurations in the respective partition's FF-A manifest.
  * Note: check is done at virtual FF-A instance only.
  */
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
-					struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+				   struct vm_locked receiver_locked)
 {
 	struct vm *sender_vm = sender_locked.vm;
 	struct vm *receiver_vm = receiver_locked.vm;
@@ -60,8 +60,9 @@
 
 	return true;
 }
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
-				struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+				    ffa_id_t sender_vm_id,
+				    struct ffa_value *ret)
 {
 	/* SPMC never needs to forward a FFA_MSG_SEND2, it always handles it. */
 	(void)receiver_vm_id;
diff --git a/src/ffa/spmc/interrupts.c b/src/ffa/spmc/interrupts.c
index 729c5cc..e06d20a 100644
--- a/src/ffa/spmc/interrupts.c
+++ b/src/ffa/spmc/interrupts.c
@@ -23,8 +23,8 @@
  *
  * Returns 0 on success, or -1 otherwise.
  */
-int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
-				      struct vcpu *current)
+int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
+				  struct vcpu *current)
 {
 	struct vcpu_locked current_locked;
 	uint32_t int_id;
@@ -478,7 +478,8 @@
  * execution is trapped into EL3. SPMD then routes the interrupt to SPMC
  * through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
  */
-void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next)
+void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+					    struct vcpu **next)
 {
 	struct vcpu *target_vcpu;
 	struct vcpu_locked target_vcpu_locked =
@@ -552,7 +553,7 @@
 				 memory_order_relaxed)) {
 		/* Clear fields corresponding to secure interrupt handling. */
 		vcpu_secure_interrupt_complete(target_vcpu_locked);
-		plat_ffa_disable_vm_interrupts(target_vm_locked);
+		ffa_vm_disable_interrupts(target_vm_locked);
 
 		/* Resume current vCPU. */
 		*next = NULL;
@@ -600,7 +601,7 @@
 	vm_unlock(&target_vm_locked);
 }
 
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
 	struct vcpu_locked target_locked, struct vcpu_locked current_locked,
 	struct vm_locked receiver_locked)
 {
@@ -629,7 +630,7 @@
 	return ret;
 }
 
-struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current_vcpu)
+struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
 {
 	struct vcpu *next;
 	struct two_vcpu_locked both_vcpu_locked;
@@ -702,7 +703,7 @@
 	interrupts = &current->interrupts;
 	vm = current->vm;
 
-	if (plat_ffa_vm_managed_exit_supported(vm)) {
+	if (ffa_vm_managed_exit_supported(vm)) {
 		vcpu_virt_interrupt_set_enabled(interrupts,
 						HF_MANAGED_EXIT_INTID);
 		/*
@@ -734,8 +735,8 @@
  * interface early during the boot stage as an S-EL0 SP need not call
  * HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
  */
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
-					struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+					      struct vm_locked vm_locked)
 {
 	struct vcpu *current;
 	struct interrupts *interrupts;
@@ -773,8 +774,8 @@
  * - Change the security state of the interrupt.
  * - Enable or disable the physical interrupt.
  */
-int64_t plat_ffa_interrupt_reconfigure(uint32_t int_id, uint32_t command,
-				       uint32_t value, struct vcpu *current)
+int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
+				   uint32_t value, struct vcpu *current)
 {
 	struct vm *vm = current->vm;
 	struct vm_locked vm_locked;
@@ -862,7 +863,7 @@
 }
 
 /* Returns the virtual interrupt id to be handled by SP. */
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
 {
 	uint32_t int_id;
 
diff --git a/src/ffa/spmc/notifications.c b/src/ffa/spmc/notifications.c
index 9d6b3d7..d27c76b 100644
--- a/src/ffa/spmc/notifications.c
+++ b/src/ffa/spmc/notifications.c
@@ -24,8 +24,8 @@
 /** Interrupt priority for the Schedule Receiver Interrupt. */
 #define SRI_PRIORITY UINT32_C(0xf0)
 
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
-	struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+							  ffa_id_t vm_id)
 {
 	/**
 	 * Create/Destroy interfaces to be called by the hypervisor, into the
@@ -52,9 +52,8 @@
  * - If bind call from NWd, current VM ID must be same as Hypervisor ID,
  * receiver's ID must be from NWd, and sender's ID from SWd.
  */
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
-					  ffa_id_t sender_id,
-					  ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+				     ffa_id_t receiver_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -100,7 +99,7 @@
 	return true;
 }
 
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
 	ffa_id_t receiver_id, ffa_id_t sender_id,
 	ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
 	bool is_bind, struct ffa_value *ret)
@@ -123,9 +122,8 @@
  * - If set call from NWd, current VM ID must be same as Hypervisor ID,
  * and receiver must be an SP.
  */
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
-					ffa_id_t sender_id,
-					ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+				    ffa_id_t receiver_id)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 
@@ -170,11 +168,11 @@
 	return true;
 }
 
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
-				       ffa_id_t receiver_vm_id,
-				       ffa_notification_flags_t flags,
-				       ffa_notifications_bitmap_t bitmap,
-				       struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+				   ffa_id_t receiver_vm_id,
+				   ffa_notification_flags_t flags,
+				   ffa_notifications_bitmap_t bitmap,
+				   struct ffa_value *ret)
 {
 	(void)sender_vm_id;
 	(void)receiver_vm_id;
@@ -185,9 +183,8 @@
 	return false;
 }
 
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
-					ffa_id_t receiver_id,
-					ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+				    ffa_notification_flags_t flags)
 {
 	ffa_id_t current_vm_id = current->vm->id;
 	/*
@@ -214,8 +211,8 @@
 	return caller_and_receiver_valid && flags_valid;
 }
 
-void plat_ffa_notification_info_get_forward(  // NOLINTNEXTLINE
-	uint16_t *ids, uint32_t *ids_count,   // NOLINTNEXTLINE
+void ffa_notifications_info_get_forward(     // NOLINTNEXTLINE
+	uint16_t *ids, uint32_t *ids_count,  // NOLINTNEXTLINE
 	uint32_t *lists_sizes, uint32_t *lists_count,
 	const uint32_t ids_count_max)
 {
@@ -226,8 +223,8 @@
 	(void)ids_count_max;
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_create(
-	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+						 ffa_vcpu_count_t vcpu_count)
 {
 	struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
 	struct vm_locked vm_locked;
@@ -254,7 +251,7 @@
 		vm_locked.vm->notifications.enabled = true;
 	} else {
 		/* Else should regard with NWd VM ID. */
-		vm_locked = plat_ffa_nwd_vm_create(vm_id);
+		vm_locked = ffa_vm_nwd_create(vm_id);
 
 		/* If received NULL, there are no slots for VM creation. */
 		if (vm_locked.vm == NULL) {
@@ -279,8 +276,8 @@
 	return ret;
 }
 
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
-					       ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+					  ffa_vcpu_count_t vcpu_count)
 {
 	(void)vm_id;
 	(void)vcpu_count;
@@ -288,10 +285,10 @@
 	return true;
 }
 
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
 {
 	struct ffa_value ret = {.func = FFA_SUCCESS_32};
-	struct vm_locked to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
+	struct vm_locked to_destroy_locked = ffa_vm_find_locked(vm_id);
 
 	if (to_destroy_locked.vm == NULL) {
 		dlog_verbose("Bitmap not created for VM: %u\n", vm_id);
@@ -315,7 +312,7 @@
 	vm_notifications_init(to_destroy_locked.vm,
 			      to_destroy_locked.vm->vcpu_count, NULL);
 	if (vm_id != HF_OTHER_WORLD_ID) {
-		plat_ffa_vm_destroy(to_destroy_locked);
+		ffa_vm_destroy(to_destroy_locked);
 	}
 
 out:
@@ -324,7 +321,7 @@
 	return ret;
 }
 
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
 	struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
 	ffa_notifications_bitmap_t *from_sp)
 {
@@ -334,7 +331,7 @@
 	return (struct ffa_value){.func = FFA_SUCCESS_32};
 }
 
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
 	struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
 	ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
 {
@@ -363,15 +360,16 @@
 	plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
 }
 
-static void plat_ffa_sri_set_delayed_internal(struct cpu *cpu, bool delayed)
+static void ffa_notifications_sri_set_delayed_internal(struct cpu *cpu,
+						       bool delayed)
 {
 	assert(cpu != NULL);
 	cpu->is_sri_delayed = delayed;
 }
 
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
 {
-	plat_ffa_sri_set_delayed_internal(cpu, true);
+	ffa_notifications_sri_set_delayed_internal(cpu, true);
 }
 
 static bool plat_ffa_is_sri_delayed(struct cpu *cpu)
@@ -380,27 +378,27 @@
 	return cpu->is_sri_delayed;
 }
 
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
 {
 	assert(cpu != NULL);
 
 	if (plat_ffa_is_sri_delayed(cpu)) {
 		plat_ffa_send_schedule_receiver_interrupt(cpu);
-		plat_ffa_sri_set_delayed_internal(cpu, false);
+		ffa_notifications_sri_set_delayed_internal(cpu, false);
 	}
 }
 
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
 {
 	/*
 	 * If flag to delay SRI isn't set, trigger SRI such that the
 	 * receiver scheduler is aware there are pending notifications.
 	 */
 	plat_ffa_send_schedule_receiver_interrupt(cpu);
-	plat_ffa_sri_set_delayed_internal(cpu, false);
+	ffa_notifications_sri_set_delayed_internal(cpu, false);
 }
 
-void plat_ffa_sri_init(struct cpu *cpu)
+void ffa_notifications_sri_init(struct cpu *cpu)
 {
 	/* Configure as Non Secure SGI. */
 	struct interrupt_descriptor sri_desc = {
diff --git a/src/ffa/spmc/power_management.c b/src/ffa/spmc/power_management.c
deleted file mode 100644
index f543059..0000000
--- a/src/ffa/spmc/power_management.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2024 The Hafnium Authors.
- *
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file or at
- * https://opensource.org/licenses/BSD-3-Clause.
- */
-
-#include "hf/ffa/power_management.h"
-
-/**
- * Returns FFA_SUCCESS as FFA_SECONDARY_EP_REGISTER is supported at the
- * secure virtual FF-A instance.
- */
-bool plat_ffa_is_secondary_ep_register_supported(void)
-{
-	return true;
-}
diff --git a/src/ffa/spmc/setup_and_discovery.c b/src/ffa/spmc/setup_and_discovery.c
index 3b22714..f0ad3de 100644
--- a/src/ffa/spmc/setup_and_discovery.c
+++ b/src/ffa/spmc/setup_and_discovery.c
@@ -8,13 +8,13 @@
 
 #include "hf/ffa/setup_and_discovery.h"
 
-#include "hf/ffa.h"
+#include "hf/check.h"
 #include "hf/manifest.h"
 #include "hf/vm.h"
 
 #include "smc.h"
 
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
 {
 	/*
 	 * Since we are running in the SPMC use FFA_ID_GET to fetch our
@@ -23,17 +23,26 @@
 	return smc_ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
 }
 
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+/**
+ * Returns FFA_SUCCESS as FFA_SECONDARY_EP_REGISTER is supported at the
+ * secure virtual FF-A instance.
+ */
+bool ffa_setup_is_secondary_ep_register_supported(void)
+{
+	return true;
+}
+
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
 {
 	(void)vm_locked;
 }
 
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
 {
 	/*
 	 * Allow forwarding from the SPMC to SPMD unconditionally.
@@ -42,7 +51,7 @@
 }
 
 /** Forward helper for FFA_PARTITION_INFO_GET. */
-ffa_vm_count_t plat_ffa_partition_info_get_forward(
+ffa_vm_count_t ffa_setup_partition_info_get_forward(
 	const struct ffa_uuid *uuid, uint32_t flags,
 	struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
 {
@@ -55,12 +64,12 @@
 	return vm_count;
 }
 
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
-				       paddr_t fdt_addr,
-				       size_t fdt_allocated_size,
-				       const struct manifest_vm *manifest_vm,
-				       const struct boot_params *boot_params,
-				       struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+					paddr_t fdt_addr,
+					size_t fdt_allocated_size,
+					const struct manifest_vm *manifest_vm,
+					const struct boot_params *boot_params,
+					struct mpool *ppool)
 {
 	(void)boot_params;
 	(void)stage1_locked;
@@ -72,7 +81,7 @@
 	CHECK(false);
 }
 
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
 	ffa_id_t caller_id, const struct vm *target)
 {
 	ffa_partition_properties_t result = target->messaging_method;
@@ -102,8 +111,8 @@
 	return result & final_mask;
 }
 
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
-				 struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+				  struct ffa_value *ret)
 {
 	(void)vm_locked;
 	(void)ret;
@@ -111,8 +120,8 @@
 	return false;
 }
 
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
-				  struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+				   struct ffa_value *ret)
 {
 	(void)to_locked;
 	(void)ret;
diff --git a/src/ffa/spmc/vm.c b/src/ffa/spmc/vm.c
index f999248..5ad2820 100644
--- a/src/ffa/spmc/vm.c
+++ b/src/ffa/spmc/vm.c
@@ -73,7 +73,7 @@
  * If a VM with the ID already exists return it.
  * Return NULL if it can't allocate a new VM.
  */
-struct vm_locked plat_ffa_nwd_vm_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id)
 {
 	struct vm_locked vm_locked;
 	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
@@ -106,7 +106,7 @@
 	return vm_locked;
 }
 
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
 {
 	struct vm *vm = to_destroy_locked.vm;
 	/*
@@ -121,7 +121,7 @@
 	}
 }
 
-void plat_ffa_vm_init(struct mpool *ppool)
+void ffa_vm_init(struct mpool *ppool)
 {
 	struct vm *other_world = vm_find(HF_OTHER_WORLD_ID);
 
@@ -144,12 +144,12 @@
 	}
 }
 
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
 {
 	return (vm->ns_interrupts_action == NS_ACTION_ME);
 }
 
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
 {
 	struct vm_locked to_ret_locked;
 
@@ -166,19 +166,18 @@
 	return to_ret_locked;
 }
 
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
 {
 	if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
 		return vm_find_locked(vm_id);
 	}
 
-	return plat_ffa_nwd_vm_create(vm_id);
+	return ffa_vm_nwd_create(vm_id);
 }
 
-bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
-					uint32_t *lists_sizes,
-					uint32_t *lists_count,
-					const uint32_t ids_count_max)
+bool ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
+				   uint32_t *lists_sizes, uint32_t *lists_count,
+				   const uint32_t ids_count_max)
 {
 	struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
 	struct vm_locked other_world_locked = vm_find_locked(HF_OTHER_WORLD_ID);
@@ -215,7 +214,7 @@
 	return list_full_and_more_pending;
 }
 
-void plat_ffa_disable_vm_interrupts(struct vm_locked vm_locked)
+void ffa_vm_disable_interrupts(struct vm_locked vm_locked)
 {
 	uint32_t core_pos = arch_find_core_pos();
 
@@ -237,10 +236,10 @@
 /**
  * Reclaim all resources belonging to VM in aborted state.
  */
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
 {
 	/*
 	 * Gracefully disable all interrupts belonging to SP.
 	 */
-	plat_ffa_disable_vm_interrupts(vm_locked);
+	ffa_vm_disable_interrupts(vm_locked);
 }
diff --git a/src/ffa/spmc/vm.h b/src/ffa/spmc/vm.h
index 383d191..0f24fa5 100644
--- a/src/ffa/spmc/vm.h
+++ b/src/ffa/spmc/vm.h
@@ -10,8 +10,8 @@
 
 #include "hf/vm.h"
 
-void plat_ffa_vm_init(struct mpool *ppool);
+void ffa_vm_init(struct mpool *ppool);
 
-struct vm_locked plat_ffa_nwd_vm_create(ffa_id_t vm_id);
+struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id);
 
-void plat_ffa_disable_vm_interrupts(struct vm_locked vm_locked);
+void ffa_vm_disable_interrupts(struct vm_locked vm_locked);