refactor: rename type for FF-A IDs

The FF-A ID is a unsigned 16-bit value that was named
ffa_vm_id_t. This patch changes it to simply ffa_id_t.
This is to make clearer the ID type is used for other
endpoints other than simply VMs.

Signed-off-by: J-Alves <joao.alves@arm.com>
Change-Id: I60319c08481b2380bd0063b108a35fc01e2af537
diff --git a/inc/hf/api.h b/inc/hf/api.h
index 55d515b..6bfd971 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -19,10 +19,10 @@
 struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current);
 void api_regs_state_saved(struct vcpu *vcpu);
 int64_t api_mailbox_writable_get(const struct vcpu *current);
-int64_t api_mailbox_waiter_get(ffa_vm_id_t vm_id, const struct vcpu *current);
+int64_t api_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current);
 struct vcpu *api_switch_to_vm(struct vcpu_locked current_locked,
 			      struct ffa_value to_ret,
-			      enum vcpu_state vcpu_state, ffa_vm_id_t to_id);
+			      enum vcpu_state vcpu_state, ffa_id_t to_id);
 struct vcpu *api_switch_to_primary(struct vcpu_locked current_locked,
 				   struct ffa_value primary_ret,
 				   enum vcpu_state secondary_state);
@@ -36,7 +36,7 @@
 int64_t api_interrupt_enable(uint32_t intid, bool enable,
 			     enum interrupt_type type, struct vcpu *current);
 uint32_t api_interrupt_get(struct vcpu *current);
-int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
+int64_t api_interrupt_inject(ffa_id_t target_vm_id,
 			     ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
 			     struct vcpu *current, struct vcpu **next);
 int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
@@ -45,22 +45,20 @@
 				    struct vcpu **next);
 void api_sri_send_if_delayed(struct vcpu *current);
 
-struct ffa_value api_ffa_msg_send(ffa_vm_id_t sender_vm_id,
-				  ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value api_ffa_msg_send(ffa_id_t sender_vm_id,
+				  ffa_id_t receiver_vm_id, uint32_t size,
 				  struct vcpu *current, struct vcpu **next);
-struct ffa_value api_ffa_msg_send2(ffa_vm_id_t sender_vm_id, uint32_t flags,
+struct ffa_value api_ffa_msg_send2(ffa_id_t sender_vm_id, uint32_t flags,
 				   struct vcpu *current);
-struct ffa_value api_ffa_rx_release(ffa_vm_id_t receiver_id,
-				    struct vcpu *current);
-struct ffa_value api_ffa_rx_acquire(ffa_vm_id_t receiver_id,
-				    struct vcpu *current);
+struct ffa_value api_ffa_rx_release(ffa_id_t receiver_id, struct vcpu *current);
+struct ffa_value api_ffa_rx_acquire(ffa_id_t receiver_id, struct vcpu *current);
 struct ffa_value api_vm_configure_pages(
 	struct mm_stage1_locked mm_stage1_locked, struct vm_locked vm_locked,
 	ipaddr_t send, ipaddr_t recv, uint32_t page_count,
 	struct mpool *local_page_pool);
 struct ffa_value api_ffa_rxtx_map(ipaddr_t send, ipaddr_t recv,
 				  uint32_t page_count, struct vcpu *current);
-struct ffa_value api_ffa_rxtx_unmap(ffa_vm_id_t allocator_id,
+struct ffa_value api_ffa_rxtx_unmap(ffa_id_t allocator_id,
 				    struct vcpu *current);
 struct ffa_value api_yield(struct vcpu *current, struct vcpu **next,
 			   struct ffa_value *args);
@@ -84,7 +82,7 @@
 				  uint32_t ffa_version);
 struct ffa_value api_ffa_msg_wait(struct vcpu *current, struct vcpu **next,
 				  struct ffa_value *args);
-struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+struct ffa_value api_ffa_run(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
 			     struct vcpu *current, struct vcpu **next);
 struct ffa_value api_ffa_mem_send(uint32_t share_func, uint32_t length,
 				  uint32_t fragment_length, ipaddr_t address,
@@ -99,19 +97,19 @@
 				     struct vcpu *current);
 struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
 				     uint32_t fragment_offset,
-				     ffa_vm_id_t sender_vm_id,
+				     ffa_id_t sender_vm_id,
 				     struct vcpu *current);
 struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
 				     uint32_t fragment_length,
-				     ffa_vm_id_t sender_vm_id,
+				     ffa_id_t sender_vm_id,
 				     struct vcpu *current);
-struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
-					     ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_msg_send_direct_req(ffa_id_t sender_vm_id,
+					     ffa_id_t receiver_vm_id,
 					     struct ffa_value args,
 					     struct vcpu *current,
 					     struct vcpu **next);
-struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
-					      ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_msg_send_direct_resp(ffa_id_t sender_vm_id,
+					      ffa_id_t receiver_vm_id,
 					      struct ffa_value args,
 					      struct vcpu *current,
 					      struct vcpu **next);
@@ -120,22 +118,22 @@
 struct vcpu *api_switch_to_other_world(struct vcpu_locked current_locked,
 				       struct ffa_value other_world_ret,
 				       enum vcpu_state vcpu_state);
-struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
+struct ffa_value api_ffa_notification_bitmap_create(ffa_id_t vm_id,
 						    ffa_vcpu_count_t vcpu_count,
 						    struct vcpu *current);
-struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
+struct ffa_value api_ffa_notification_bitmap_destroy(ffa_id_t vm_id,
 						     struct vcpu *current);
 
 struct ffa_value api_ffa_notification_update_bindings(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+	ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
 	ffa_notifications_bitmap_t notifications, bool is_bind,
 	struct vcpu *current);
 
 struct ffa_value api_ffa_notification_set(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+	ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
 	ffa_notifications_bitmap_t notifications, struct vcpu *current);
 
-struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_notification_get(ffa_id_t receiver_vm_id,
 					  uint16_t vcpu_id, uint32_t flags,
 					  struct vcpu *current);
 
@@ -150,6 +148,6 @@
 
 void api_ffa_resume_direct_resp_target(struct vcpu_locked current_locked,
 				       struct vcpu **next,
-				       ffa_vm_id_t receiver_vm_id,
+				       ffa_id_t receiver_vm_id,
 				       struct ffa_value to_ret,
 				       bool is_nwd_call_chain);
diff --git a/inc/hf/arch/ffa.h b/inc/hf/arch/ffa.h
index d36a4be..ca17b99 100644
--- a/inc/hf/arch/ffa.h
+++ b/inc/hf/arch/ffa.h
@@ -14,7 +14,7 @@
 struct ffa_value arch_ffa_features(uint32_t function_id);
 
 /** Returns the SPMC ID. */
-ffa_vm_id_t arch_ffa_spmc_id_get(void);
+ffa_id_t arch_ffa_spmc_id_get(void);
 
 /** Called once at boot time to initialize the platform ffa module. */
 void arch_ffa_init(void);
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index 8cc3d7e..e126320 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -172,7 +172,7 @@
 /**
  * Return the arch specific mm mode for send/recv pages of given VM ID.
  */
-uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id);
+uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id);
 
 /**
  * Execute any barriers or synchronization that is required
diff --git a/inc/hf/arch/plat/ffa.h b/inc/hf/arch/plat/ffa.h
index 7221dcd..8326e5d 100644
--- a/inc/hf/arch/plat/ffa.h
+++ b/inc/hf/arch/plat/ffa.h
@@ -59,18 +59,18 @@
 void plat_ffa_log_init(void);
 void plat_ffa_set_tee_enabled(bool tee_enabled);
 void plat_ffa_init(struct mpool *ppool);
-bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id,
 				   uint32_t share_func);
 
 bool plat_ffa_is_direct_request_valid(struct vcpu *current,
-				      ffa_vm_id_t sender_vm_id,
-				      ffa_vm_id_t receiver_vm_id);
+				      ffa_id_t sender_vm_id,
+				      ffa_id_t receiver_vm_id);
 bool plat_ffa_is_direct_response_valid(struct vcpu *current,
-				       ffa_vm_id_t sender_vm_id,
-				       ffa_vm_id_t receiver_vm_id);
+				       ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id);
 bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
 					  struct vm *receiver_vm);
-bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
 				     struct ffa_value args,
 				     struct ffa_value *ret);
 
@@ -83,33 +83,30 @@
 bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
 					struct vm_locked receiver_locked);
 
-bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
-				ffa_vm_id_t sender_vm_id,
+bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
 				struct ffa_value *ret);
 
 bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
-					    ffa_vm_id_t vm_id);
+					    ffa_id_t vm_id);
 
 bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
-					  ffa_vm_id_t sender_id,
-					  ffa_vm_id_t receiver_id);
+					  ffa_id_t sender_id,
+					  ffa_id_t receiver_id);
 bool plat_ffa_notifications_update_bindings_forward(
-	ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
+	ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
 	ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret);
 
 bool plat_ffa_is_notification_set_valid(struct vcpu *current,
-					ffa_vm_id_t sender_id,
-					ffa_vm_id_t receiver_id);
+					ffa_id_t sender_id,
+					ffa_id_t receiver_id);
 
-bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
-				       ffa_vm_id_t receiver_vm_id,
-				       uint32_t flags,
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+				       ffa_id_t receiver_vm_id, uint32_t flags,
 				       ffa_notifications_bitmap_t bitmap,
 				       struct ffa_value *ret);
 
 bool plat_ffa_is_notification_get_valid(struct vcpu *current,
-					ffa_vm_id_t receiver_id,
-					uint32_t flags);
+					ffa_id_t receiver_id, uint32_t flags);
 
 bool plat_ffa_notifications_get_from_sp(struct vm_locked receiver_locked,
 					ffa_vcpu_index_t vcpu_id,
@@ -155,55 +152,55 @@
  * to operate NS-memory. The function below returns the mode to use in the mm.h
  * library, depending on the memory ownder's id.
  */
-uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id);
+uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id);
 
 /**
  * Return the FF-A partition info VM/SP properties given the VM id.
  */
 ffa_partition_properties_t plat_ffa_partition_properties(
-	ffa_vm_id_t vm_id, const struct vm *target);
+	ffa_id_t vm_id, const struct vm *target);
 
 /**
  * Get NWd VM's structure.
  */
-struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id);
+struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id);
 
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id);
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id);
 
 /**
  * Creates a bitmap for the VM of the given ID.
  */
 struct ffa_value plat_ffa_notifications_bitmap_create(
-	ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count);
+	ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count);
 
 /**
  * Issues a FFA_NOTIFICATION_BITMAP_CREATE.
  * Returns true if the call goes well, and false if call returns with
  * FFA_ERROR_32.
  */
-bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
 					       ffa_vcpu_count_t vcpu_count);
 
 /**
  * Destroys the notifications bitmap for the given VM ID.
  */
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id);
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id);
 
 /**
  * Helper to get the struct notifications, depending on the sender's id.
  */
 struct notifications *plat_ffa_vm_get_notifications_senders_world(
-	struct vm_locked vm_locked, ffa_vm_id_t sender_id);
+	struct vm_locked vm_locked, ffa_id_t sender_id);
 
 /**
  * Helper to check if FF-A ID is a VM ID.
  */
-bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id);
+bool plat_ffa_is_vm_id(ffa_id_t vm_id);
 
 /**
  * Forward normal world calls of FFA_RUN ABI to other world.
  */
-bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
 			  struct ffa_value *ret);
 
 bool plat_ffa_notification_info_get_call(struct ffa_value *ret);
@@ -250,7 +247,7 @@
  * Check if current SP can resume target VM/SP using FFA_RUN ABI.
  */
 bool plat_ffa_run_checks(struct vcpu_locked current_locked,
-			 ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+			 ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
 			 struct ffa_value *run_ret, struct vcpu **next);
 
 /**
@@ -290,8 +287,8 @@
  * being performed.
  */
 bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
-					     ffa_vm_id_t vm_id,
-					     ffa_vm_id_t receiver_vm_id,
+					     ffa_id_t vm_id,
+					     ffa_id_t receiver_vm_id,
 					     struct vcpu_locked locked_vcpu,
 					     uint32_t func,
 					     enum vcpu_state *next_state);
@@ -302,7 +299,7 @@
 
 void plat_ffa_wind_call_chain_ffa_direct_req(
 	struct vcpu_locked current_locked,
-	struct vcpu_locked receiver_vcpu_locked, ffa_vm_id_t sender_vm_id);
+	struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id);
 
 void plat_ffa_unwind_call_chain_ffa_direct_resp(
 	struct vcpu_locked current_locked, struct vcpu_locked next_locked);
@@ -352,8 +349,8 @@
  * This FF-A v1.0 FFA_MSG_SEND interface.
  * Implemented for the Hypervisor, but not in the SPMC.
  */
-struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id,
-				   ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
+				   ffa_id_t receiver_vm_id, uint32_t size,
 				   struct vcpu *current, struct vcpu **next);
 
 struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
@@ -371,7 +368,7 @@
 struct ffa_value plat_ffa_error_32(struct vcpu *current, struct vcpu **next,
 				   uint32_t error_code);
 
-bool plat_ffa_is_spmd_lp_id(ffa_vm_id_t vm_id);
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id);
 
 struct ffa_value plat_ffa_msg_recv(bool block,
 				   struct vcpu_locked current_locked,
@@ -379,5 +376,4 @@
 
 int64_t plat_ffa_mailbox_writable_get(const struct vcpu *current);
 
-int64_t plat_ffa_mailbox_waiter_get(ffa_vm_id_t vm_id,
-				    const struct vcpu *current);
+int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current);
diff --git a/inc/hf/dlog.h b/inc/hf/dlog.h
index 9147549..99b10de 100644
--- a/inc/hf/dlog.h
+++ b/inc/hf/dlog.h
@@ -74,4 +74,4 @@
 		}                                      \
 	} while (0)
 
-void dlog_flush_vm_buffer(ffa_vm_id_t id, char buffer[], size_t length);
+void dlog_flush_vm_buffer(ffa_id_t id, char buffer[], size_t length);
diff --git a/inc/hf/ffa_memory.h b/inc/hf/ffa_memory.h
index a8519a1..8187e6f 100644
--- a/inc/hf/ffa_memory.h
+++ b/inc/hf/ffa_memory.h
@@ -30,7 +30,7 @@
 struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
 					      ffa_memory_handle_t handle,
 					      uint32_t fragment_offset,
-					      ffa_vm_id_t sender_vm_id,
+					      ffa_id_t sender_vm_id,
 					      struct mpool *page_pool);
 struct ffa_value ffa_memory_relinquish(
 	struct vm_locked from_locked,
diff --git a/inc/hf/ffa_memory_internal.h b/inc/hf/ffa_memory_internal.h
index 27608ed..5f4a522 100644
--- a/inc/hf/ffa_memory_internal.h
+++ b/inc/hf/ffa_memory_internal.h
@@ -172,7 +172,7 @@
 	uint32_t *orig_from_mode_ret);
 struct ffa_value ffa_memory_send_continue_validate(
 	struct share_states_locked share_states, ffa_memory_handle_t handle,
-	struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
+	struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
 	struct mpool *page_pool);
 struct ffa_value ffa_retrieve_check_transition(
 	struct vm_locked to, uint32_t share_func,
@@ -180,13 +180,13 @@
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
 	uint32_t memory_to_attributes, uint32_t *to_mode);
 struct ffa_value ffa_retrieve_check_update(
-	struct vm_locked to_locked, ffa_vm_id_t from_id,
+	struct vm_locked to_locked, ffa_id_t from_id,
 	struct ffa_memory_region_constituent **fragments,
 	uint32_t *fragment_constituent_counts, uint32_t fragment_count,
 	uint32_t memory_to_attributes, uint32_t share_func, bool clear,
 	struct mpool *page_pool);
 uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
-					ffa_vm_id_t receiver);
+					ffa_id_t receiver);
 bool ffa_region_group_identity_map(
 	struct vm_locked vm_locked,
 	struct ffa_memory_region_constituent **fragments,
diff --git a/inc/hf/ffa_partition_manifest.h b/inc/hf/ffa_partition_manifest.h
index c360f45..c94dd93 100644
--- a/inc/hf/ffa_partition_manifest.h
+++ b/inc/hf/ffa_partition_manifest.h
@@ -125,9 +125,9 @@
 	/** UUID - mandatory */
 	struct ffa_uuid uuid;
 	/** Partition id - optional */
-	ffa_vm_id_t id;
+	ffa_id_t id;
 	/** Aux ids for mem transactions - optional */
-	ffa_vm_id_t aux_id;
+	ffa_id_t aux_id;
 
 	/* NOTE: optional name field maps to VM debug_name field */
 
diff --git a/inc/hf/vcpu.h b/inc/hf/vcpu.h
index 71ab200..e9387f2 100644
--- a/inc/hf/vcpu.h
+++ b/inc/hf/vcpu.h
@@ -133,7 +133,7 @@
 	 * The value HF_INVALID_VM_ID implies the vCPU is not executing as
 	 * a result of a prior FFA_MSG_SEND_DIRECT_REQ invocation.
 	 */
-	ffa_vm_id_t direct_request_origin_vm_id;
+	ffa_id_t direct_request_origin_vm_id;
 
 	/** Determine whether partition is currently handling managed exit. */
 	bool processing_managed_exit;
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 7297ea3..8898d39 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -98,7 +98,7 @@
 	const void *send;
 
 	/** The ID of the VM which sent the message currently in `recv`. */
-	ffa_vm_id_t recv_sender;
+	ffa_id_t recv_sender;
 
 	/** The size of the message currently in `recv`. */
 	uint32_t recv_size;
@@ -147,7 +147,7 @@
 	 * The index in the bindings array relates to the notification
 	 * ID, and bit position in 'ffa_notifications_bitmap_t'.
 	 */
-	ffa_vm_id_t bindings_sender_id[MAX_FFA_NOTIFICATIONS];
+	ffa_id_t bindings_sender_id[MAX_FFA_NOTIFICATIONS];
 	ffa_notifications_bitmap_t bindings_per_vcpu;
 
 	/* The index of the array below relates to the ID of the VCPU.
@@ -199,7 +199,7 @@
 };
 
 struct vm {
-	ffa_vm_id_t id;
+	ffa_id_t id;
 	struct ffa_uuid uuid;
 	uint32_t ffa_version;
 	struct smc_whitelist smc_whitelist;
@@ -293,21 +293,21 @@
 	struct vm_locked vm2;
 };
 
-struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
+struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
 		   struct mpool *ppool, bool el0_partition);
 bool vm_init_next(ffa_vcpu_count_t vcpu_count, struct mpool *ppool,
 		  struct vm **new_vm, bool el0_partition);
 ffa_vm_count_t vm_get_count(void);
-struct vm *vm_find(ffa_vm_id_t id);
-struct vm_locked vm_find_locked(ffa_vm_id_t id);
+struct vm *vm_find(ffa_id_t id);
+struct vm_locked vm_find_locked(ffa_id_t id);
 struct vm *vm_find_index(uint16_t index);
 struct vm_locked vm_lock(struct vm *vm);
 struct two_vm_locked vm_lock_both(struct vm *vm1, struct vm *vm2);
 void vm_unlock(struct vm_locked *locked);
 struct vcpu *vm_get_vcpu(struct vm *vm, ffa_vcpu_index_t vcpu_index);
-struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm);
-ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
-bool vm_id_is_current_world(ffa_vm_id_t vm_id);
+struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm);
+ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry);
+bool vm_id_is_current_world(ffa_id_t vm_id);
 bool vm_is_mailbox_busy(struct vm_locked to);
 bool vm_is_mailbox_other_world_owned(struct vm_locked to);
 bool vm_identity_map(struct vm_locked vm_locked, paddr_t begin, paddr_t end,
@@ -338,14 +338,14 @@
 					bool is_from_vm, bool is_per_vcpu,
 					ffa_notifications_bitmap_t notif);
 bool vm_notifications_validate_bound_sender(
-	struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
+	struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
 	ffa_notifications_bitmap_t notifications);
 bool vm_notifications_validate_binding(struct vm_locked vm_locked,
-				       bool is_from_vm, ffa_vm_id_t sender_id,
+				       bool is_from_vm, ffa_id_t sender_id,
 				       ffa_notifications_bitmap_t notifications,
 				       bool is_per_vcpu);
 void vm_notifications_update_bindings(struct vm_locked vm_locked,
-				      bool is_from_vm, ffa_vm_id_t sender_id,
+				      bool is_from_vm, ffa_id_t sender_id,
 				      ffa_notifications_bitmap_t notifications,
 				      bool is_per_vcpu);
 void vm_notifications_partition_set_pending(
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index c391744..98d1b3f 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -105,7 +105,7 @@
 /**
  * Returns the VM's own ID.
  */
-static inline ffa_vm_id_t hf_vm_get_id(void)
+static inline ffa_id_t hf_vm_get_id(void)
 {
 	return ffa_id_get().arg2;
 }
@@ -113,7 +113,7 @@
 /**
  * Runs the given vCPU of the given VM.
  */
-static inline struct ffa_value ffa_run(ffa_vm_id_t vm_id,
+static inline struct ffa_value ffa_run(ffa_id_t vm_id,
 				       ffa_vcpu_index_t vcpu_idx)
 {
 	return ffa_call((struct ffa_value){.func = FFA_RUN_32,
@@ -195,8 +195,8 @@
  *  - BUSY: the message could not be delivered either because the mailbox
  *    was full or the target VM is not yet set up.
  */
-static inline struct ffa_value ffa_msg_send(ffa_vm_id_t sender_vm_id,
-					    ffa_vm_id_t target_vm_id,
+static inline struct ffa_value ffa_msg_send(ffa_id_t sender_vm_id,
+					    ffa_id_t target_vm_id,
 					    uint32_t size, uint32_t attributes)
 {
 	return ffa_call((struct ffa_value){
@@ -369,7 +369,7 @@
  * Returns -1 on failure or if there are no waiters; the VM id of the next
  * waiter otherwise.
  */
-static inline int64_t hf_mailbox_waiter_get(ffa_vm_id_t vm_id)
+static inline int64_t hf_mailbox_waiter_get(ffa_id_t vm_id)
 {
 	return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
 }
@@ -418,7 +418,7 @@
  *  - 1 if it was called by the primary VM and the primary VM now needs to wake
  *    up or kick the target vCPU.
  */
-static inline int64_t hf_interrupt_inject(ffa_vm_id_t target_vm_id,
+static inline int64_t hf_interrupt_inject(ffa_id_t target_vm_id,
 					  ffa_vcpu_index_t target_vcpu_idx,
 					  uint32_t intid)
 {
@@ -470,7 +470,7 @@
 }
 
 static inline struct ffa_value ffa_msg_send_direct_req(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
+	ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
 	uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
 {
 	return ffa_call((struct ffa_value){
@@ -485,7 +485,7 @@
 }
 
 static inline struct ffa_value ffa_msg_send_direct_resp(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t target_vm_id, uint32_t arg3,
+	ffa_id_t sender_vm_id, ffa_id_t target_vm_id, uint32_t arg3,
 	uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7)
 {
 	return ffa_call((struct ffa_value){
@@ -500,7 +500,7 @@
 }
 
 static inline struct ffa_value ffa_notification_bind(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+	ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
 	ffa_notifications_bitmap_t bitmap)
 {
 	return ffa_call((struct ffa_value){
@@ -513,7 +513,7 @@
 }
 
 static inline struct ffa_value ffa_notification_unbind(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id,
+	ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id,
 	ffa_notifications_bitmap_t bitmap)
 {
 	return ffa_call((struct ffa_value){
@@ -525,7 +525,7 @@
 }
 
 static inline struct ffa_value ffa_notification_set(
-	ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+	ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
 	ffa_notifications_bitmap_t bitmap)
 {
 	return ffa_call((struct ffa_value){
@@ -537,7 +537,7 @@
 	});
 }
 
-static inline struct ffa_value ffa_notification_get(ffa_vm_id_t receiver_vm_id,
+static inline struct ffa_value ffa_notification_get(ffa_id_t receiver_vm_id,
 						    ffa_vcpu_index_t vcpu_id,
 						    uint32_t flags)
 {
diff --git a/inc/vmapi/hf/ffa.h b/inc/vmapi/hf/ffa.h
index ca965a3..26e777a 100644
--- a/inc/vmapi/hf/ffa.h
+++ b/inc/vmapi/hf/ffa.h
@@ -163,7 +163,7 @@
 #define FFA_PAGE_SIZE ((size_t)4096)
 
 /** The ID of a VM. These are assigned sequentially starting with an offset. */
-typedef uint16_t ffa_vm_id_t;
+typedef uint16_t ffa_id_t;
 
 /**
  * Partition message header as specified by table 6.2 from FF-A v1.1 EAC0
@@ -185,7 +185,7 @@
 #define FFA_RXTX_ALLOCATOR_SHIFT 16
 
 static inline void ffa_rxtx_header_init(
-	ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t size,
+	ffa_id_t sender, ffa_id_t receiver, uint32_t size,
 	struct ffa_partition_rxtx_header *header)
 {
 	header->flags = 0;
@@ -196,16 +196,16 @@
 	header->size = size;
 }
 
-static inline ffa_vm_id_t ffa_rxtx_header_sender(
+static inline ffa_id_t ffa_rxtx_header_sender(
 	const struct ffa_partition_rxtx_header *h)
 {
-	return (ffa_vm_id_t)(h->sender_receiver >> FFA_RXTX_SENDER_SHIFT);
+	return (ffa_id_t)(h->sender_receiver >> FFA_RXTX_SENDER_SHIFT);
 }
 
-static inline ffa_vm_id_t ffa_rxtx_header_receiver(
+static inline ffa_id_t ffa_rxtx_header_receiver(
 	const struct ffa_partition_rxtx_header *h)
 {
-	return (ffa_vm_id_t)(h->sender_receiver);
+	return (ffa_id_t)(h->sender_receiver);
 }
 
 /* The maximum length possible for a single message. */
@@ -359,7 +359,7 @@
  * A count of VMs. This has the same range as the VM IDs but we give it a
  * different name to make the different semantics clear.
  */
-typedef ffa_vm_id_t ffa_vm_count_t;
+typedef ffa_id_t ffa_vm_count_t;
 
 /** The index of a vCPU within a particular VM. */
 typedef uint16_t ffa_vcpu_index_t;
@@ -406,12 +406,12 @@
 	return (int32_t)val.arg2;
 }
 
-static inline ffa_vm_id_t ffa_sender(struct ffa_value args)
+static inline ffa_id_t ffa_sender(struct ffa_value args)
 {
 	return (args.arg1 >> 16) & 0xffff;
 }
 
-static inline ffa_vm_id_t ffa_receiver(struct ffa_value args)
+static inline ffa_id_t ffa_receiver(struct ffa_value args)
 {
 	return args.arg1 & 0xffff;
 }
@@ -476,7 +476,7 @@
 				  .arg3 = (uint32_t)(handle >> 32)};
 }
 
-static inline ffa_vm_id_t ffa_vm_id(struct ffa_value args)
+static inline ffa_id_t ffa_vm_id(struct ffa_value args)
 {
 	return (args.arg1 >> 16) & 0xffff;
 }
@@ -486,13 +486,12 @@
 	return args.arg1 & 0xffff;
 }
 
-static inline uint64_t ffa_vm_vcpu(ffa_vm_id_t vm_id,
-				   ffa_vcpu_index_t vcpu_index)
+static inline uint64_t ffa_vm_vcpu(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_index)
 {
 	return ((uint32_t)vm_id << 16) | vcpu_index;
 }
 
-static inline ffa_vm_id_t ffa_frag_sender(struct ffa_value args)
+static inline ffa_id_t ffa_frag_sender(struct ffa_value args)
 {
 	return (args.arg4 >> 16) & 0xffff;
 }
@@ -573,7 +572,7 @@
  * in FF-A 1.1 EAC0 specification.
  */
 struct ffa_partition_info {
-	ffa_vm_id_t vm_id;
+	ffa_id_t vm_id;
 	ffa_vcpu_count_t vcpu_count;
 	ffa_partition_properties_t properties;
 	struct ffa_uuid uuid;
@@ -841,7 +840,7 @@
  */
 struct ffa_memory_region_attributes {
 	/** The ID of the VM to which the memory is being given or shared. */
-	ffa_vm_id_t receiver;
+	ffa_id_t receiver;
 	/**
 	 * The permissions with which the memory region should be mapped in the
 	 * receiver's page table.
@@ -917,7 +916,7 @@
 	 * The ID of the VM which originally sent the memory region, i.e. the
 	 * owner.
 	 */
-	ffa_vm_id_t sender;
+	ffa_id_t sender;
 	ffa_memory_attributes_t attributes;
 	/** Flags to control behaviour of the transaction. */
 	ffa_memory_region_flags_t flags;
@@ -959,7 +958,7 @@
 	ffa_memory_handle_t handle;
 	ffa_memory_region_flags_t flags;
 	uint32_t endpoint_count;
-	ffa_vm_id_t endpoints[];
+	ffa_id_t endpoints[];
 };
 
 /**
@@ -984,13 +983,13 @@
 static inline uint32_t ffa_mem_relinquish_init(
 	struct ffa_mem_relinquish *relinquish_request,
 	ffa_memory_handle_t handle, ffa_memory_region_flags_t flags,
-	ffa_vm_id_t sender)
+	ffa_id_t sender)
 {
 	relinquish_request->handle = handle;
 	relinquish_request->flags = flags;
 	relinquish_request->endpoint_count = 1;
 	relinquish_request->endpoints[0] = sender;
-	return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_vm_id_t);
+	return sizeof(struct ffa_mem_relinquish) + sizeof(ffa_id_t);
 }
 
 void ffa_copy_memory_region_constituents(
@@ -1003,7 +1002,7 @@
  * to the SPMC, in order to allow indirect messaging.
  */
 struct ffa_endpoint_rx_tx_descriptor {
-	ffa_vm_id_t endpoint_id;
+	ffa_id_t endpoint_id;
 	uint16_t reserved;
 
 	/*
@@ -1037,19 +1036,19 @@
 }
 
 void ffa_memory_region_init_header(struct ffa_memory_region *memory_region,
-				   ffa_vm_id_t sender,
+				   ffa_id_t sender,
 				   ffa_memory_attributes_t attributes,
 				   ffa_memory_region_flags_t flags,
 				   ffa_memory_handle_t handle, uint32_t tag,
 				   uint32_t receiver_count);
 void ffa_memory_access_init_permissions(
-	struct ffa_memory_access *receiver, ffa_vm_id_t receiver_id,
+	struct ffa_memory_access *receiver, ffa_id_t receiver_id,
 	enum ffa_data_access data_access,
 	enum ffa_instruction_access instruction_access,
 	ffa_memory_receiver_flags_t flags);
 uint32_t ffa_memory_region_init_single_receiver(
 	struct ffa_memory_region *memory_region, size_t memory_region_max_size,
-	ffa_vm_id_t sender, ffa_vm_id_t receiver,
+	ffa_id_t sender, ffa_id_t receiver,
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t tag,
 	ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
@@ -1059,7 +1058,7 @@
 	uint32_t *total_length);
 uint32_t ffa_memory_region_init(
 	struct ffa_memory_region *memory_region, size_t memory_region_max_size,
-	ffa_vm_id_t sender, struct ffa_memory_access receivers[],
+	ffa_id_t sender, struct ffa_memory_access receivers[],
 	uint32_t receiver_count,
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t tag,
@@ -1069,25 +1068,25 @@
 	uint32_t *total_length);
 uint32_t ffa_memory_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
-	ffa_vm_id_t sender, struct ffa_memory_access receivers[],
+	ffa_id_t sender, struct ffa_memory_access receivers[],
 	uint32_t receiver_count, uint32_t tag, ffa_memory_region_flags_t flags,
 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
 	enum ffa_memory_shareability shareability);
 uint32_t ffa_memory_retrieve_request_init_single_receiver(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
-	ffa_vm_id_t sender, ffa_vm_id_t receiver, uint32_t tag,
+	ffa_id_t sender, ffa_id_t receiver, uint32_t tag,
 	ffa_memory_region_flags_t flags, enum ffa_data_access data_access,
 	enum ffa_instruction_access instruction_access,
 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
 	enum ffa_memory_shareability shareability);
 uint32_t ffa_memory_lender_retrieve_request_init(
 	struct ffa_memory_region *memory_region, ffa_memory_handle_t handle,
-	ffa_vm_id_t sender);
+	ffa_id_t sender);
 uint32_t ffa_memory_fragment_init(
 	struct ffa_memory_region_constituent *fragment,
 	size_t fragment_max_size,
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t *fragment_length);
 void ffa_endpoint_rx_tx_descriptor_init(
-	struct ffa_endpoint_rx_tx_descriptor *desc, ffa_vm_id_t endpoint_id,
+	struct ffa_endpoint_rx_tx_descriptor *desc, ffa_id_t endpoint_id,
 	uint64_t rx_address, uint64_t tx_address);
diff --git a/inc/vmapi/hf/ffa_v1_0.h b/inc/vmapi/hf/ffa_v1_0.h
index 22824e4..980473f 100644
--- a/inc/vmapi/hf/ffa_v1_0.h
+++ b/inc/vmapi/hf/ffa_v1_0.h
@@ -25,7 +25,7 @@
  * in DEN0077A FF-A 1.0 REL specification.
  */
 struct ffa_partition_info_v1_0 {
-	ffa_vm_id_t vm_id;
+	ffa_id_t vm_id;
 	ffa_vcpu_count_t vcpu_count;
 	ffa_partition_properties_t properties;
 };
@@ -41,7 +41,7 @@
 	 * The ID of the VM which originally sent the memory region, i.e. the
 	 * owner.
 	 */
-	ffa_vm_id_t sender;
+	ffa_id_t sender;
 	uint8_t attributes;
 	/** Reserved field, must be 0. */
 	uint8_t reserved_0;
@@ -89,13 +89,13 @@
 }
 
 void ffa_memory_region_init_header_v1_0(
-	struct ffa_memory_region_v1_0 *memory_region, ffa_vm_id_t sender,
+	struct ffa_memory_region_v1_0 *memory_region, ffa_id_t sender,
 	ffa_memory_attributes_t attributes, ffa_memory_region_flags_t flags,
 	ffa_memory_handle_t handle, uint32_t tag, uint32_t receiver_count);
 
 uint32_t ffa_memory_region_init_v1_0(
 	struct ffa_memory_region_v1_0 *memory_region,
-	size_t memory_region_max_size, ffa_vm_id_t sender,
+	size_t memory_region_max_size, ffa_id_t sender,
 	struct ffa_memory_access receivers[], uint32_t receiver_count,
 	const struct ffa_memory_region_constituent constituents[],
 	uint32_t constituent_count, uint32_t tag,
@@ -106,7 +106,7 @@
 
 uint32_t ffa_memory_retrieve_request_init_v1_0(
 	struct ffa_memory_region_v1_0 *memory_region,
-	ffa_memory_handle_t handle, ffa_vm_id_t sender,
+	ffa_memory_handle_t handle, ffa_id_t sender,
 	struct ffa_memory_access receivers[], uint32_t receiver_count,
 	uint32_t tag, ffa_memory_region_flags_t flags,
 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,