refactor: rename type for FF-A IDs
The FF-A ID is a unsigned 16-bit value that was named
ffa_vm_id_t. This patch changes it to simply ffa_id_t.
This is to make clearer the ID type is used for other
endpoints other than simply VMs.
Signed-off-by: J-Alves <joao.alves@arm.com>
Change-Id: I60319c08481b2380bd0063b108a35fc01e2af537
diff --git a/src/api.c b/src/api.c
index 9494ffe..d511e8d 100644
--- a/src/api.c
+++ b/src/api.c
@@ -122,7 +122,7 @@
*/
struct vcpu *api_switch_to_vm(struct vcpu_locked current_locked,
struct ffa_value to_ret,
- enum vcpu_state vcpu_state, ffa_vm_id_t to_id)
+ enum vcpu_state vcpu_state, ffa_id_t to_id)
{
struct vm *to_vm = vm_find(to_id);
struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current_locked.vcpu);
@@ -498,7 +498,7 @@
static void api_ffa_fill_partitions_info_array(
struct ffa_partition_info *partitions, size_t partitions_len,
- const struct ffa_uuid *uuid, bool count_flag, ffa_vm_id_t vm_id,
+ const struct ffa_uuid *uuid, bool count_flag, ffa_id_t vm_id,
ffa_vm_count_t *vm_count_out)
{
ffa_vm_count_t vm_count = 0;
@@ -541,7 +541,7 @@
}
static inline void api_ffa_pack_vmid_count_props(
- uint64_t *xn, ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count,
+ uint64_t *xn, ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count,
ffa_partition_properties_t properties)
{
*xn = (uint64_t)vm_id;
@@ -1033,7 +1033,7 @@
*/
static bool api_release_mailbox(struct vm_locked vm_locked, int32_t *error_code)
{
- ffa_vm_id_t vm_id = vm_locked.vm->id;
+ ffa_id_t vm_id = vm_locked.vm->id;
int32_t error_code_to_ret = 0;
switch (vm_locked.vm->mailbox.state) {
@@ -1354,7 +1354,7 @@
return ret;
}
-struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+struct ffa_value api_ffa_run(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
struct vcpu *current, struct vcpu **next)
{
struct vm *vm;
@@ -1681,7 +1681,7 @@
static void api_get_rxtx_description(struct vm *current_vm, ipaddr_t *send,
ipaddr_t *recv, uint32_t *page_count,
- ffa_vm_id_t *owner_vm_id)
+ ffa_id_t *owner_vm_id)
{
/*
* If the message has been forwarded the effective addresses are in
@@ -1734,7 +1734,7 @@
struct vm_locked owner_vm_locked;
struct mm_stage1_locked mm_stage1_locked;
struct mpool local_page_pool;
- ffa_vm_id_t owner_vm_id;
+ ffa_id_t owner_vm_id;
/*
* Get the original buffer addresses and VM ID in case of forwarded
@@ -1790,12 +1790,11 @@
* behalf of the caller.
* - FFA_SUCCESS on success if no further action is needed.
*/
-struct ffa_value api_ffa_rxtx_unmap(ffa_vm_id_t allocator_id,
- struct vcpu *current)
+struct ffa_value api_ffa_rxtx_unmap(ffa_id_t allocator_id, struct vcpu *current)
{
struct vm *vm = current->vm;
struct vm_locked vm_locked;
- ffa_vm_id_t owner_vm_id;
+ ffa_id_t owner_vm_id;
struct mm_stage1_locked mm_stage1_locked;
paddr_t send_pa_begin;
paddr_t send_pa_end;
@@ -1886,19 +1885,19 @@
* Copies data from the sender's send buffer to the recipient's receive buffer
* and notifies the receiver.
*/
-struct ffa_value api_ffa_msg_send2(ffa_vm_id_t sender_vm_id, uint32_t flags,
+struct ffa_value api_ffa_msg_send2(ffa_id_t sender_vm_id, uint32_t flags,
struct vcpu *current)
{
struct vm *from = current->vm;
struct vm *to;
struct vm_locked to_locked;
- ffa_vm_id_t msg_sender_id;
+ ffa_id_t msg_sender_id;
struct vm_locked sender_locked;
const void *from_msg;
struct ffa_value ret;
struct ffa_partition_rxtx_header header;
- ffa_vm_id_t sender_id;
- ffa_vm_id_t receiver_id;
+ ffa_id_t sender_id;
+ ffa_id_t receiver_id;
uint32_t msg_size;
ffa_notifications_bitmap_t rx_buffer_full;
@@ -2060,14 +2059,13 @@
* needs to wake up or kick waiters. Waiters should be retrieved by calling
* hf_mailbox_waiter_get.
*/
-struct ffa_value api_ffa_rx_release(ffa_vm_id_t receiver_id,
- struct vcpu *current)
+struct ffa_value api_ffa_rx_release(ffa_id_t receiver_id, struct vcpu *current)
{
struct vm *current_vm = current->vm;
struct vm *vm;
struct vm_locked vm_locked;
- ffa_vm_id_t current_vm_id = current_vm->id;
- ffa_vm_id_t release_vm_id;
+ ffa_id_t current_vm_id = current_vm->id;
+ ffa_id_t release_vm_id;
struct ffa_value ret;
int32_t error_code;
@@ -2125,8 +2123,7 @@
* - FFA_INVALID_PARAMETERS: there is no buffer pair registered for the VM.
* - FFA_NOT_SUPPORTED: function not implemented at the FF-A instance.
*/
-struct ffa_value api_ffa_rx_acquire(ffa_vm_id_t receiver_id,
- struct vcpu *current)
+struct ffa_value api_ffa_rx_acquire(ffa_id_t receiver_id, struct vcpu *current)
{
struct vm_locked receiver_locked;
struct vm *receiver;
@@ -2291,7 +2288,7 @@
* - 1 if it was called by the primary VM and the primary VM now needs to wake
* up or kick the target vCPU.
*/
-int64_t api_interrupt_inject(ffa_vm_id_t target_vm_id,
+int64_t api_interrupt_inject(ffa_id_t target_vm_id,
ffa_vcpu_index_t target_vcpu_idx, uint32_t intid,
struct vcpu *current, struct vcpu **next)
{
@@ -2548,8 +2545,8 @@
/**
* Send an FF-A direct message request.
*/
-struct ffa_value api_ffa_msg_send_direct_req(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_msg_send_direct_req(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id,
struct ffa_value args,
struct vcpu *current,
struct vcpu **next)
@@ -2728,7 +2725,7 @@
*/
void api_ffa_resume_direct_resp_target(struct vcpu_locked current_locked,
struct vcpu **next,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t receiver_vm_id,
struct ffa_value to_ret,
bool is_nwd_call_chain)
{
@@ -2759,8 +2756,8 @@
/**
* Send an FF-A direct message response.
*/
-struct ffa_value api_ffa_msg_send_direct_resp(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_msg_send_direct_resp(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id,
struct ffa_value args,
struct vcpu *current,
struct vcpu **next)
@@ -3226,9 +3223,8 @@
* forwarding if needed.
*/
for (uint32_t i = 0U; i < memory_region->receiver_count; i++) {
- ffa_vm_id_t receiver_id =
- memory_region->receivers[i]
- .receiver_permissions.receiver;
+ ffa_id_t receiver_id = memory_region->receivers[i]
+ .receiver_permissions.receiver;
to = vm_find(receiver_id);
if (vm_id_is_current_world(receiver_id) &&
@@ -3483,7 +3479,7 @@
*/
length = sizeof(struct ffa_mem_relinquish) +
((struct ffa_mem_relinquish *)from_msg)->endpoint_count *
- sizeof(ffa_vm_id_t);
+ sizeof(ffa_id_t);
/*
* Copy the relinquish descriptor to an internal buffer, so that the
* caller can't change it underneath us.
@@ -3499,7 +3495,7 @@
memcpy_s(relinquish_request, message_buffer_size, from_msg, length);
if (sizeof(struct ffa_mem_relinquish) +
- relinquish_request->endpoint_count * sizeof(ffa_vm_id_t) !=
+ relinquish_request->endpoint_count * sizeof(ffa_id_t) !=
length) {
dlog_verbose(
"Endpoint count changed while copying to internal "
@@ -3540,7 +3536,7 @@
struct ffa_value api_ffa_mem_frag_rx(ffa_memory_handle_t handle,
uint32_t fragment_offset,
- ffa_vm_id_t sender_vm_id,
+ ffa_id_t sender_vm_id,
struct vcpu *current)
{
struct vm *to = current->vm;
@@ -3577,7 +3573,7 @@
struct ffa_value api_ffa_mem_frag_tx(ffa_memory_handle_t handle,
uint32_t fragment_length,
- ffa_vm_id_t sender_vm_id,
+ ffa_id_t sender_vm_id,
struct vcpu *current)
{
struct vm *from = current->vm;
@@ -3708,7 +3704,7 @@
return (struct ffa_value){.func = FFA_SUCCESS_32};
}
-struct ffa_value api_ffa_notification_bitmap_create(ffa_vm_id_t vm_id,
+struct ffa_value api_ffa_notification_bitmap_create(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count,
struct vcpu *current)
{
@@ -3721,7 +3717,7 @@
return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
}
-struct ffa_value api_ffa_notification_bitmap_destroy(ffa_vm_id_t vm_id,
+struct ffa_value api_ffa_notification_bitmap_destroy(ffa_id_t vm_id,
struct vcpu *current)
{
/*
@@ -3737,16 +3733,15 @@
}
struct ffa_value api_ffa_notification_update_bindings(
- ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+ ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t notifications, bool is_bind,
struct vcpu *current)
{
struct ffa_value ret = {.func = FFA_SUCCESS_32};
struct vm_locked receiver_locked;
const bool is_per_vcpu = (flags & FFA_NOTIFICATION_FLAG_PER_VCPU) != 0U;
- const ffa_vm_id_t id_to_update =
- is_bind ? sender_vm_id : HF_INVALID_VM_ID;
- const ffa_vm_id_t id_to_validate =
+ const ffa_id_t id_to_update = is_bind ? sender_vm_id : HF_INVALID_VM_ID;
+ const ffa_id_t id_to_validate =
is_bind ? HF_INVALID_VM_ID : sender_vm_id;
const uint32_t flags_mbz =
is_bind ? ~FFA_NOTIFICATIONS_FLAG_PER_VCPU : ~0U;
@@ -3831,7 +3826,7 @@
}
struct ffa_value api_ffa_notification_set(
- ffa_vm_id_t sender_vm_id, ffa_vm_id_t receiver_vm_id, uint32_t flags,
+ ffa_id_t sender_vm_id, ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t notifications, struct vcpu *current)
{
struct ffa_value ret;
@@ -3933,7 +3928,7 @@
};
}
-struct ffa_value api_ffa_notification_get(ffa_vm_id_t receiver_vm_id,
+struct ffa_value api_ffa_notification_get(ffa_id_t receiver_vm_id,
ffa_vcpu_index_t vcpu_id,
uint32_t flags, struct vcpu *current)
{
diff --git a/src/arch/aarch64/hftest/el0/mm.c b/src/arch/aarch64/hftest/el0/mm.c
index 6b4df50..c0c1fcc 100644
--- a/src/arch/aarch64/hftest/el0/mm.c
+++ b/src/arch/aarch64/hftest/el0/mm.c
@@ -26,7 +26,7 @@
{
}
-uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id)
+uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
{
(void)id;
return 0;
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index d494a25..08b9cc4 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -78,7 +78,7 @@
void arch_regs_reset(struct vcpu *vcpu)
{
- ffa_vm_id_t vm_id = vcpu->vm->id;
+ ffa_id_t vm_id = vcpu->vm->id;
bool is_primary = vm_id == HF_PRIMARY_VM_ID;
cpu_id_t vcpu_id = is_primary ? vcpu->cpu->id : vcpu_index(vcpu);
diff --git a/src/arch/aarch64/hypervisor/debug_el1.c b/src/arch/aarch64/hypervisor/debug_el1.c
index 516704e..484c544 100644
--- a/src/arch/aarch64/hypervisor/debug_el1.c
+++ b/src/arch/aarch64/hypervisor/debug_el1.c
@@ -133,8 +133,7 @@
* Processes an access (msr, mrs) to an EL1 debug register.
* Returns true if the access was allowed and performed, false otherwise.
*/
-bool debug_el1_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
- uintreg_t esr)
+bool debug_el1_process_access(struct vcpu *vcpu, ffa_id_t vm_id, uintreg_t esr)
{
/*
* For now, debug registers are not supported by secondary VMs.
diff --git a/src/arch/aarch64/hypervisor/debug_el1.h b/src/arch/aarch64/hypervisor/debug_el1.h
index 6ba154b..ce0c3ae 100644
--- a/src/arch/aarch64/hypervisor/debug_el1.h
+++ b/src/arch/aarch64/hypervisor/debug_el1.h
@@ -16,5 +16,5 @@
bool debug_el1_is_register_access(uintreg_t esr_el2);
-bool debug_el1_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
+bool debug_el1_process_access(struct vcpu *vcpu, ffa_id_t vm_id,
uintreg_t esr_el2);
diff --git a/src/arch/aarch64/hypervisor/ffa.c b/src/arch/aarch64/hypervisor/ffa.c
index b74cabb..07187e1 100644
--- a/src/arch/aarch64/hypervisor/ffa.c
+++ b/src/arch/aarch64/hypervisor/ffa.c
@@ -15,7 +15,7 @@
#include "smc.h"
-static ffa_vm_id_t spmc_id = HF_INVALID_VM_ID;
+static ffa_id_t spmc_id = HF_INVALID_VM_ID;
/**
* Returns information for features with arch specific implementation.
@@ -28,7 +28,7 @@
/**
* Returns the SPMC ID returned from the SPMD.
*/
-ffa_vm_id_t arch_ffa_spmc_id_get(void)
+ffa_id_t arch_ffa_spmc_id_get(void)
{
return spmc_id;
}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 31ccc83..19b0f05 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -333,9 +333,9 @@
*/
static bool spmd_handler(struct ffa_value *args, struct vcpu *current)
{
- ffa_vm_id_t sender = ffa_sender(*args);
- ffa_vm_id_t receiver = ffa_receiver(*args);
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t sender = ffa_sender(*args);
+ ffa_id_t receiver = ffa_receiver(*args);
+ ffa_id_t current_vm_id = current->vm->id;
uint32_t fwk_msg = ffa_fwk_msg(*args);
uint8_t fwk_msg_func_id = fwk_msg & SPMD_FWK_MSG_FUNC_MASK;
@@ -665,12 +665,12 @@
return true;
case FFA_NOTIFICATION_BITMAP_CREATE_32:
*args = api_ffa_notification_bitmap_create(
- (ffa_vm_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2,
+ (ffa_id_t)args->arg1, (ffa_vcpu_count_t)args->arg2,
current);
return true;
case FFA_NOTIFICATION_BITMAP_DESTROY_32:
*args = api_ffa_notification_bitmap_destroy(
- (ffa_vm_id_t)args->arg1, current);
+ (ffa_id_t)args->arg1, current);
return true;
case FFA_NOTIFICATION_BIND_32:
*args = api_ffa_notification_update_bindings(
@@ -1390,7 +1390,7 @@
void handle_system_register_access(uintreg_t esr_el2)
{
struct vcpu *vcpu = current();
- ffa_vm_id_t vm_id = vcpu->vm->id;
+ ffa_id_t vm_id = vcpu->vm->id;
uintreg_t ec = GET_ESR_EC(esr_el2);
CHECK(ec == EC_MSR);
diff --git a/src/arch/aarch64/hypervisor/perfmon.c b/src/arch/aarch64/hypervisor/perfmon.c
index f13b035..d4ac184 100644
--- a/src/arch/aarch64/hypervisor/perfmon.c
+++ b/src/arch/aarch64/hypervisor/perfmon.c
@@ -149,7 +149,7 @@
* Processes an access (msr, mrs) to a performance monitor register.
* Returns true if the access was allowed and performed, false otherwise.
*/
-bool perfmon_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id, uintreg_t esr)
+bool perfmon_process_access(struct vcpu *vcpu, ffa_id_t vm_id, uintreg_t esr)
{
/*
* For now, performance monitor registers are not supported by secondary
@@ -223,7 +223,7 @@
/**
* Returns the value register PMCCFILTR_EL0 should have at initialization.
*/
-uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_vm_id_t vm_id)
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_id_t vm_id)
{
if (vm_id != HF_PRIMARY_VM_ID) {
/* Disable cycle counting for secondary VMs. */
diff --git a/src/arch/aarch64/hypervisor/perfmon.h b/src/arch/aarch64/hypervisor/perfmon.h
index 81669ba..c4934c6 100644
--- a/src/arch/aarch64/hypervisor/perfmon.h
+++ b/src/arch/aarch64/hypervisor/perfmon.h
@@ -66,7 +66,7 @@
bool perfmon_is_register_access(uintreg_t esr_el2);
-bool perfmon_process_access(struct vcpu *vcpu, ffa_vm_id_t vm_id,
+bool perfmon_process_access(struct vcpu *vcpu, ffa_id_t vm_id,
uintreg_t esr_el2);
-uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_vm_id_t vm_id);
+uintreg_t perfmon_get_pmccfiltr_el0_init_value(ffa_id_t vm_id);
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 2f37fe1..d12dc12 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -984,7 +984,7 @@
/**
* Return the arch specific mm mode for send/recv pages of given VM ID.
*/
-uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id)
+uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
{
return ((id & HF_VM_ID_WORLD_MASK) == HF_HYPERVISOR_VM_ID) ? MM_MODE_NS
: 0;
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index dfae5cb..140b39d 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -25,7 +25,7 @@
}
ffa_partition_properties_t plat_ffa_partition_properties(
- ffa_vm_id_t vm_id, const struct vm *target)
+ ffa_id_t vm_id, const struct vm *target)
{
(void)vm_id;
(void)target;
@@ -49,8 +49,7 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
- uint32_t share_func)
+bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
{
(void)receiver_vm_id;
(void)share_func;
@@ -62,8 +61,8 @@
* Check validity of a FF-A direct message request.
*/
bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -85,8 +84,8 @@
* Check validity of a FF-A direct message response.
*/
bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -96,7 +95,7 @@
}
bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
(void)current;
(void)vm_id;
@@ -119,7 +118,7 @@
(void)vm_locked;
}
-bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
{
@@ -157,8 +156,8 @@
return false;
}
-bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
- ffa_vm_id_t sender_vm_id, struct ffa_value *ret)
+bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)sender_vm_id;
@@ -185,15 +184,15 @@
return 0U;
}
-uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id)
{
(void)owner_id;
return 0U;
}
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -202,7 +201,7 @@
}
bool plat_ffa_notifications_update_bindings_forward(
- ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
{
(void)ret;
@@ -217,8 +216,8 @@
}
bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -227,7 +226,7 @@
}
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_vm_id_t receiver_id, uint32_t flags)
+ ffa_id_t receiver_id, uint32_t flags)
{
(void)flags;
(void)current;
@@ -260,9 +259,8 @@
return false;
}
-bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
- uint32_t flags,
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap,
struct ffa_value *ret)
{
@@ -276,7 +274,7 @@
}
struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -284,7 +282,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
@@ -293,32 +291,32 @@
return false;
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_vm_id(ffa_id_t vm_id)
{
(void)vm_id;
return false;
}
-bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *ret)
{
(void)vm_id;
@@ -359,7 +357,7 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *run_ret, struct vcpu **next)
{
(void)current_locked;
@@ -465,8 +463,8 @@
}
bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_vm_id_t vm_id,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id,
struct vcpu_locked receiver_locked,
uint32_t func, // NOLINTNEXTLINE
enum vcpu_state *next_state)
@@ -490,14 +488,14 @@
void plat_ffa_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
- struct vcpu_locked receiver_vcpu_locked, ffa_vm_id_t sender_vm_id)
+ struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
(void)current_locked;
(void)receiver_vcpu_locked;
(void)sender_vm_id;
}
-bool plat_ffa_is_spmd_lp_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
{
(void)vm_id;
return false;
@@ -590,8 +588,8 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
@@ -641,8 +639,7 @@
return false;
}
-int64_t plat_ffa_mailbox_waiter_get(ffa_vm_id_t vm_id,
- const struct vcpu *current)
+int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current)
{
(void)vm_id;
(void)current;
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 878fed3..6c536b9 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -142,7 +142,7 @@
dlog_verbose("TEE finished setting up buffers.\n");
}
-bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *ret)
{
/*
@@ -160,8 +160,7 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
- uint32_t share_func)
+bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
{
/*
* Currently memory interfaces are not forwarded from hypervisor to
@@ -178,10 +177,10 @@
* Check validity of a FF-A direct message request.
*/
bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* The primary VM can send direct message request to
@@ -197,7 +196,7 @@
* Check validity of a FF-A notifications bitmap create.
*/
bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
/*
* Call should only be used by the Hypervisor, so any attempt of
@@ -226,10 +225,10 @@
* Check validity of a FF-A direct message response.
*/
bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* Secondary VMs can send direct message responses to
@@ -240,7 +239,7 @@
receiver_vm_id == HF_PRIMARY_VM_ID;
}
-bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
{
@@ -266,7 +265,7 @@
struct ffa_value *ret)
{
struct vm *vm = vm_locked.vm;
- ffa_vm_id_t vm_id = vm->id;
+ ffa_id_t vm_id = vm->id;
if (!ffa_tee_enabled || !vm_supports_indirect_messages(vm)) {
return false;
@@ -344,8 +343,8 @@
return true;
}
-bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
- ffa_vm_id_t sender_vm_id, struct ffa_value *ret)
+bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
/* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */
if (!vm_id_is_current_world(receiver_vm_id)) {
@@ -385,14 +384,14 @@
return 0U;
}
-uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id)
{
(void)owner_id;
return plat_ffa_other_world_mode();
}
ffa_partition_properties_t plat_ffa_partition_properties(
- ffa_vm_id_t vm_id, const struct vm *target)
+ ffa_id_t vm_id, const struct vm *target)
{
ffa_partition_properties_t result = target->messaging_method;
/*
@@ -419,16 +418,16 @@
}
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/** If Hafnium is hypervisor, receiver needs to be current vm. */
return sender_id != receiver_id && current_vm_id == receiver_id;
}
bool plat_ffa_notifications_update_bindings_forward(
- ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
{
CHECK(ret != NULL);
@@ -451,18 +450,17 @@
}
bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/* If Hafnium is hypervisor, sender needs to be current vm. */
return sender_id == current_vm_id && sender_id != receiver_id;
}
-bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
- uint32_t flags,
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap,
struct ffa_value *ret)
{
@@ -489,9 +487,9 @@
}
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_vm_id_t receiver_id, uint32_t flags)
+ ffa_id_t receiver_id, uint32_t flags)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
(void)flags;
@@ -500,7 +498,7 @@
}
struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -508,14 +506,14 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count)
{
struct ffa_value ret;
@@ -539,7 +537,7 @@
return true;
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
{
if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
return vm_find_locked(vm_id);
@@ -548,12 +546,12 @@
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
{
return plat_ffa_vm_find_locked(vm_id);
}
-bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_vm_id(ffa_id_t vm_id)
{
return vm_id_is_current_world(vm_id);
}
@@ -636,7 +634,7 @@
ffa_notifications_bitmap_t *from_sp,
struct ffa_value *ret)
{
- ffa_vm_id_t receiver_id = receiver_locked.vm->id;
+ ffa_id_t receiver_id = receiver_locked.vm->id;
assert(from_sp != NULL && ret != NULL);
@@ -659,7 +657,7 @@
struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
uint32_t flags, ffa_vcpu_index_t vcpu_id, struct ffa_value *ret)
{
- ffa_vm_id_t receiver_id = receiver_locked.vm->id;
+ ffa_id_t receiver_id = receiver_locked.vm->id;
ffa_notifications_bitmap_t spm_notifications = 0;
(void)flags;
@@ -745,7 +743,7 @@
{
struct ffa_value ret;
uint64_t func;
- ffa_vm_id_t id;
+ ffa_id_t id;
assert(vm_locked.vm != NULL);
@@ -791,7 +789,7 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *run_ret, struct vcpu **next)
{
(void)next;
@@ -1093,8 +1091,8 @@
}
bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_vm_id_t vm_id,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id,
struct vcpu_locked receiver_locked,
uint32_t func,
enum vcpu_state *next_state)
@@ -1133,7 +1131,7 @@
void plat_ffa_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
- struct vcpu_locked receiver_vcpu_locked, ffa_vm_id_t sender_vm_id)
+ struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
/* Calls chains not supported in the Hypervisor/VMs. */
(void)current_locked;
@@ -1141,7 +1139,7 @@
(void)sender_vm_id;
}
-bool plat_ffa_is_spmd_lp_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
{
(void)vm_id;
return false;
@@ -1198,7 +1196,7 @@
/** Forwards a memory send message on to the other world. */
static struct ffa_value memory_send_other_world_forward(
- struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id,
+ struct vm_locked other_world_locked, ffa_id_t sender_vm_id,
uint32_t share_func, struct ffa_memory_region *memory_region,
uint32_t memory_share_length, uint32_t fragment_length)
{
@@ -1422,7 +1420,7 @@
* Notifies the `to` VM about the message currently in its mailbox, possibly
* with the help of the primary VM.
*/
-static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
+static struct ffa_value deliver_msg(struct vm_locked to, ffa_id_t from_id,
struct vcpu_locked current_locked,
struct vcpu **next)
{
@@ -1929,7 +1927,7 @@
* Forwards a memory send continuation message on to the other world.
*/
static struct ffa_value memory_send_continue_other_world_forward(
- struct vm_locked other_world_locked, ffa_vm_id_t sender_vm_id,
+ struct vm_locked other_world_locked, ffa_id_t sender_vm_id,
void *fragment, uint32_t fragment_length, ffa_memory_handle_t handle)
{
struct ffa_value ret;
@@ -2179,8 +2177,8 @@
* If the recipient's receive buffer is busy, it can optionally register the
* caller to be notified when the recipient's receive buffer becomes available.
*/
-struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
struct vcpu *current, struct vcpu **next)
{
struct vm *from = current->vm;
@@ -2373,8 +2371,7 @@
* Returns -1 on failure or if there are no waiters; the VM id of the next
* waiter otherwise.
*/
-int64_t plat_ffa_mailbox_waiter_get(ffa_vm_id_t vm_id,
- const struct vcpu *current)
+int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current)
{
struct vm *vm;
struct vm_locked locked;
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index f21a5e6..e0421a8 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -106,7 +106,7 @@
}
static struct vm_locked plat_ffa_nwd_vm_find_locked(
- struct nwd_vms_locked nwd_vms_locked, ffa_vm_id_t vm_id)
+ struct nwd_vms_locked nwd_vms_locked, ffa_id_t vm_id)
{
assert(nwd_vms_locked.nwd_vms != NULL);
@@ -124,7 +124,7 @@
* If a VM with the ID already exists return it.
* Return NULL if it can't allocate a new VM.
*/
-static struct vm_locked plat_ffa_nwd_vm_create(ffa_vm_id_t vm_id)
+static struct vm_locked plat_ffa_nwd_vm_create(ffa_id_t vm_id)
{
struct vm_locked vm_locked;
struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
@@ -239,7 +239,7 @@
plat_ffa_vm_init(ppool);
}
-bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *ret)
{
(void)vm_id;
@@ -252,8 +252,7 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
- uint32_t share_func)
+bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
{
bool result = false;
@@ -354,7 +353,7 @@
*/
static bool plat_ffa_check_rtm_ffa_dir_req(struct vcpu_locked current_locked,
struct vcpu_locked locked_vcpu,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t receiver_vm_id,
uint32_t func,
enum vcpu_state *next_state)
{
@@ -488,8 +487,8 @@
* by the Partition runtime model.
*/
bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_vm_id_t vm_id,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id,
struct vcpu_locked locked_vcpu,
uint32_t func,
enum vcpu_state *next_state)
@@ -536,7 +535,7 @@
return allowed;
}
-bool plat_ffa_is_spmd_lp_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
{
return (vm_id >= EL3_SPMD_LP_ID_START && vm_id <= EL3_SPMD_LP_ID_END);
}
@@ -545,10 +544,10 @@
* Check validity of a FF-A direct message request.
*/
bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* The normal world can send direct message requests
@@ -592,10 +591,10 @@
* Check validity of a FF-A direct message response.
*/
bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* Direct message responses emitted from a SP target either the NWd,
@@ -606,7 +605,7 @@
vm_id_is_current_world(sender_vm_id);
}
-bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
{
@@ -676,8 +675,8 @@
return true;
}
-bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
- ffa_vm_id_t sender_vm_id, struct ffa_value *ret)
+bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
/* SPMC never needs to forward a FFA_MSG_SEND2, it always handles it. */
(void)receiver_vm_id;
@@ -688,7 +687,7 @@
}
bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
/**
* Create/Destroy interfaces to be called by the hypervisor, into the
@@ -699,10 +698,10 @@
}
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/**
* SPMC:
@@ -722,7 +721,7 @@
}
bool plat_ffa_notifications_update_bindings_forward(
- ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
{
(void)ret;
@@ -737,10 +736,10 @@
}
bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* SPMC:
@@ -759,9 +758,8 @@
vm_id_is_current_world(receiver_id)));
}
-bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
- uint32_t flags,
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap,
struct ffa_value *ret)
{
@@ -785,9 +783,9 @@
}
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_vm_id_t receiver_id, uint32_t flags)
+ ffa_id_t receiver_id, uint32_t flags)
{
- ffa_vm_id_t current_vm_id = current->vm->id;
+ ffa_id_t current_vm_id = current->vm->id;
/*
* SPMC:
* - A get call cannot be targeted to an SPMD logical partition.
@@ -842,14 +840,14 @@
return MM_MODE_NS;
}
-uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id)
{
return vm_id_is_current_world(owner_id) ? 0U
: plat_ffa_other_world_mode();
}
ffa_partition_properties_t plat_ffa_partition_properties(
- ffa_vm_id_t vm_id, const struct vm *target)
+ ffa_id_t vm_id, const struct vm *target)
{
ffa_partition_properties_t result = target->messaging_method;
/*
@@ -869,7 +867,7 @@
return (vm->ns_interrupts_action == NS_ACTION_ME);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
{
struct vm_locked to_ret_locked;
@@ -886,7 +884,7 @@
return to_ret_locked;
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
{
if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
return vm_find_locked(vm_id);
@@ -896,7 +894,7 @@
}
struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
{
struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
struct vm_locked vm_locked;
@@ -948,7 +946,7 @@
return ret;
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_vm_id_t vm_id,
+bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
@@ -957,7 +955,7 @@
return true;
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
struct ffa_value ret = {.func = FFA_SUCCESS_32};
struct vm_locked to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
@@ -993,7 +991,7 @@
return ret;
}
-bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_vm_id(ffa_id_t vm_id)
{
return !vm_id_is_current_world(vm_id);
}
@@ -1106,7 +1104,7 @@
* Check if current VM can resume target VM using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *run_ret, struct vcpu **next)
{
/*
@@ -2108,7 +2106,7 @@
static struct ffa_value plat_ffa_resume_direct_response(
struct vcpu_locked current_locked, struct vcpu **next)
{
- ffa_vm_id_t receiver_vm_id;
+ ffa_id_t receiver_vm_id;
struct vcpu *current = current_locked.vcpu;
struct ffa_value to_ret;
@@ -2362,7 +2360,7 @@
*/
void plat_ffa_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
- struct vcpu_locked receiver_vcpu_locked, ffa_vm_id_t sender_vm_id)
+ struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
struct vcpu *current = current_locked.vcpu;
struct vcpu *receiver_vcpu = receiver_vcpu_locked.vcpu;
@@ -2400,7 +2398,7 @@
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
struct vcpu *next = next_locked.vcpu;
- ffa_vm_id_t receiver_vm_id = next->vm->id;
+ ffa_id_t receiver_vm_id = next->vm->id;
struct vcpu *current = current_locked.vcpu;
assert(current->call_chain.next_node == NULL);
@@ -2611,8 +2609,8 @@
return ret;
}
-struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
@@ -2763,8 +2761,7 @@
return ret;
}
-int64_t plat_ffa_mailbox_waiter_get(ffa_vm_id_t vm_id,
- const struct vcpu *current)
+int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current)
{
(void)vm_id;
(void)current;
diff --git a/src/arch/aarch64/sysregs.c b/src/arch/aarch64/sysregs.c
index 9c18e5d..7fe6614 100644
--- a/src/arch/aarch64/sysregs.c
+++ b/src/arch/aarch64/sysregs.c
@@ -27,7 +27,7 @@
* Returns the value for HCR_EL2 for the particular VM.
* For now, the primary VM has one value and all secondary VMs share a value.
*/
-uintreg_t get_hcr_el2_value(ffa_vm_id_t vm_id, bool is_el0_partition)
+uintreg_t get_hcr_el2_value(ffa_id_t vm_id, bool is_el0_partition)
{
uintreg_t hcr_el2_value = 0;
diff --git a/src/arch/aarch64/sysregs.h b/src/arch/aarch64/sysregs.h
index 9b3fbbe..1ed66c8 100644
--- a/src/arch/aarch64/sysregs.h
+++ b/src/arch/aarch64/sysregs.h
@@ -15,7 +15,7 @@
#include "sysregs_defs.h"
/** HCR_EL2 */
-uintreg_t get_hcr_el2_value(ffa_vm_id_t vm_id, bool is_el0_partition);
+uintreg_t get_hcr_el2_value(ffa_id_t vm_id, bool is_el0_partition);
/** MDCR_EL2 */
uintreg_t get_mdcr_el2_value(void);
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 8374862..54f4203 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -20,7 +20,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-ffa_vm_id_t arch_ffa_spmc_id_get(void)
+ffa_id_t arch_ffa_spmc_id_get(void)
{
return HF_SPMC_VM_ID;
}
@@ -29,8 +29,7 @@
{
}
-bool plat_ffa_is_memory_send_valid(ffa_vm_id_t receiver_vm_id,
- uint32_t share_func)
+bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
{
(void)receiver_vm_id;
(void)share_func;
@@ -39,8 +38,8 @@
}
bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -59,8 +58,8 @@
}
bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id)
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -69,7 +68,7 @@
return true;
}
-bool plat_ffa_run_forward(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *ret)
{
(void)vm_id;
@@ -89,7 +88,7 @@
(void)vm_locked;
}
-bool plat_ffa_direct_request_forward(ffa_vm_id_t receiver_vm_id,
+bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
struct ffa_value args,
struct ffa_value *ret)
{
@@ -126,8 +125,8 @@
return false;
}
-bool plat_ffa_msg_send2_forward(ffa_vm_id_t receiver_vm_id,
- ffa_vm_id_t sender_vm_id, struct ffa_value *ret)
+bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)sender_vm_id;
@@ -153,15 +152,15 @@
return 0U;
}
-uint32_t plat_ffa_owner_world_mode(ffa_vm_id_t owner_id)
+uint32_t plat_ffa_owner_world_mode(ffa_id_t owner_id)
{
(void)owner_id;
return 0U;
}
bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -170,7 +169,7 @@
}
bool plat_ffa_notifications_update_bindings_forward(
- ffa_vm_id_t receiver_id, ffa_vm_id_t sender_id, uint32_t flags,
+ ffa_id_t receiver_id, ffa_id_t sender_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap, bool is_bind, struct ffa_value *ret)
{
(void)ret;
@@ -190,7 +189,7 @@
}
ffa_partition_properties_t plat_ffa_partition_properties(
- ffa_vm_id_t vm_id, const struct vm *target)
+ ffa_id_t vm_id, const struct vm *target)
{
(void)vm_id;
(void)target;
@@ -204,7 +203,7 @@
}
bool plat_ffa_is_notifications_create_valid(struct vcpu *current,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
(void)current;
(void)vm_id;
@@ -213,8 +212,8 @@
}
bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_vm_id_t sender_id,
- ffa_vm_id_t receiver_id)
+ ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -223,7 +222,7 @@
}
bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_vm_id_t receiver_id, uint32_t flags)
+ ffa_id_t receiver_id, uint32_t flags)
{
(void)flags;
(void)current;
@@ -258,9 +257,8 @@
return false;
}
-bool plat_ffa_notification_set_forward(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id,
- uint32_t flags,
+bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
ffa_notifications_bitmap_t bitmap,
struct ffa_value *ret)
{
@@ -274,7 +272,7 @@
}
struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_vm_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+ ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -282,26 +280,26 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_vm_id_t vm_id)
+struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_vm_id_t vm_id)
+struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-bool plat_ffa_is_vm_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_vm_id(ffa_id_t vm_id)
{
(void)vm_id;
return false;
@@ -337,7 +335,7 @@
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
struct ffa_value *run_ret, struct vcpu **next)
{
(void)current_locked;
@@ -428,8 +426,8 @@
}
bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_vm_id_t vm_id,
- ffa_vm_id_t receiver_vm_id,
+ ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id,
struct vcpu_locked receiver_locked,
uint32_t func, // NOLINTNEXTLINE
enum vcpu_state *next_state)
@@ -455,7 +453,7 @@
void plat_ffa_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
- struct vcpu_locked receiver_vcpu_locked, ffa_vm_id_t sender_vm_id)
+ struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
/* Calls chains not supported in the Hypervisor/VMs. */
(void)current_locked;
@@ -471,7 +469,7 @@
(void)next_locked;
}
-bool plat_ffa_is_spmd_lp_id(ffa_vm_id_t vm_id)
+bool plat_ffa_is_spmd_lp_id(ffa_id_t vm_id)
{
(void)vm_id;
return false;
@@ -559,8 +557,8 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_msg_send(ffa_vm_id_t sender_vm_id,
- ffa_vm_id_t receiver_vm_id, uint32_t size,
+struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
@@ -663,8 +661,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-int64_t plat_ffa_mailbox_waiter_get(ffa_vm_id_t vm_id,
- const struct vcpu *current)
+int64_t plat_ffa_mailbox_waiter_get(ffa_id_t vm_id, const struct vcpu *current)
{
(void)vm_id;
(void)current;
diff --git a/src/arch/fake/mm.c b/src/arch/fake/mm.c
index b67266e..2465bd1 100644
--- a/src/arch/fake/mm.c
+++ b/src/arch/fake/mm.c
@@ -182,7 +182,7 @@
return true;
}
-uint32_t arch_mm_extra_attributes_from_vm(ffa_vm_id_t id)
+uint32_t arch_mm_extra_attributes_from_vm(ffa_id_t id)
{
(void)id;
diff --git a/src/dlog.c b/src/dlog.c
index ca46bb6..784c1f6 100644
--- a/src/dlog.c
+++ b/src/dlog.c
@@ -221,7 +221,7 @@
* Send the contents of the given VM's log buffer to the log, preceded by the VM
* ID and followed by a newline.
*/
-void dlog_flush_vm_buffer(ffa_vm_id_t id, char buffer[], size_t length)
+void dlog_flush_vm_buffer(ffa_id_t id, char buffer[], size_t length)
{
lock();
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index ebc5db7..2f5776d 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -1017,7 +1017,7 @@
* Success is indicated by FFA_SUCCESS.
*/
struct ffa_value ffa_retrieve_check_update(
- struct vm_locked to_locked, ffa_vm_id_t from_id,
+ struct vm_locked to_locked, ffa_id_t from_id,
struct ffa_memory_region_constituent **fragments,
uint32_t *fragment_constituent_counts, uint32_t fragment_count,
uint32_t memory_to_attributes, uint32_t share_func, bool clear,
@@ -1109,7 +1109,7 @@
}
static struct ffa_value ffa_relinquish_check_update(
- struct vm_locked from_locked, ffa_vm_id_t owner_id,
+ struct vm_locked from_locked, ffa_id_t owner_id,
struct ffa_memory_region_constituent **fragments,
uint32_t *fragment_constituent_counts, uint32_t fragment_count,
struct mpool *page_pool, bool clear)
@@ -1437,9 +1437,8 @@
ffa_memory_access_permissions_t permissions =
memory_region->receivers[i]
.receiver_permissions.permissions;
- ffa_vm_id_t receiver_id =
- memory_region->receivers[i]
- .receiver_permissions.receiver;
+ ffa_id_t receiver_id = memory_region->receivers[i]
+ .receiver_permissions.receiver;
if (memory_region->sender == receiver_id) {
dlog_verbose("Can't share memory with itself.\n");
@@ -1578,7 +1577,7 @@
*/
struct ffa_value ffa_memory_send_continue_validate(
struct share_states_locked share_states, ffa_memory_handle_t handle,
- struct ffa_memory_share_state **share_state_ret, ffa_vm_id_t from_vm_id,
+ struct ffa_memory_share_state **share_state_ret, ffa_id_t from_vm_id,
struct mpool *page_pool)
{
struct ffa_memory_share_state *share_state;
@@ -1638,8 +1637,8 @@
struct ffa_memory_region *memory_region)
{
for (uint32_t i = 0; i < memory_region->receiver_count; i++) {
- ffa_vm_id_t receiver = memory_region->receivers[i]
- .receiver_permissions.receiver;
+ ffa_id_t receiver = memory_region->receivers[i]
+ .receiver_permissions.receiver;
if (!vm_id_is_current_world(receiver)) {
return true;
}
@@ -1735,7 +1734,7 @@
* that at this point it has been validated:
* - MBZ at virtual FF-A instance.
*/
- ffa_vm_id_t sender_to_ret =
+ ffa_id_t sender_to_ret =
(from_locked.vm->id == HF_OTHER_WORLD_ID)
? memory_region->sender
: 0;
@@ -1861,9 +1860,9 @@
*/
static bool ffa_retrieved_memory_region_init(
void *response, uint32_t ffa_version, size_t response_max_size,
- ffa_vm_id_t sender, ffa_memory_attributes_t attributes,
+ ffa_id_t sender, ffa_memory_attributes_t attributes,
ffa_memory_region_flags_t flags, ffa_memory_handle_t handle,
- ffa_vm_id_t receiver_id, ffa_memory_access_permissions_t permissions,
+ ffa_id_t receiver_id, ffa_memory_access_permissions_t permissions,
uint32_t page_count, uint32_t total_constituent_count,
const struct ffa_memory_region_constituent constituents[],
uint32_t fragment_constituent_count, uint32_t *total_length,
@@ -1970,7 +1969,7 @@
* in the array, return the region's 'receiver_count'.
*/
uint32_t ffa_memory_region_get_receiver(struct ffa_memory_region *memory_region,
- ffa_vm_id_t receiver)
+ ffa_id_t receiver)
{
struct ffa_memory_access *receivers;
uint32_t i;
@@ -2081,7 +2080,7 @@
*/
static struct ffa_value ffa_memory_retrieve_validate_memory_access_list(
struct ffa_memory_region *memory_region,
- struct ffa_memory_region *retrieve_request, ffa_vm_id_t to_vm_id,
+ struct ffa_memory_region *retrieve_request, ffa_id_t to_vm_id,
ffa_memory_access_permissions_t *permissions)
{
uint32_t retrieve_receiver_index;
@@ -2120,7 +2119,7 @@
&retrieve_request->receivers[i];
ffa_memory_access_permissions_t requested_permissions =
current_receiver->receiver_permissions.permissions;
- ffa_vm_id_t current_receiver_id =
+ ffa_id_t current_receiver_id =
current_receiver->receiver_permissions.receiver;
bool found_to_id = current_receiver_id == to_vm_id;
@@ -2245,7 +2244,7 @@
* memory sharing call.
*/
static struct ffa_value ffa_memory_retrieve_validate(
- ffa_vm_id_t receiver_id, struct ffa_memory_region *retrieve_request,
+ ffa_id_t receiver_id, struct ffa_memory_region *retrieve_request,
struct ffa_memory_region *memory_region, uint32_t *receiver_index,
uint32_t share_func)
{
@@ -2389,7 +2388,7 @@
struct ffa_composite_memory_region *composite;
uint32_t total_length;
uint32_t fragment_length;
- ffa_vm_id_t receiver_id = to_locked.vm->id;
+ ffa_id_t receiver_id = to_locked.vm->id;
bool is_send_complete = false;
ffa_memory_attributes_t attributes;
@@ -2634,7 +2633,7 @@
struct ffa_value ffa_memory_retrieve_continue(struct vm_locked to_locked,
ffa_memory_handle_t handle,
uint32_t fragment_offset,
- ffa_vm_id_t sender_vm_id,
+ ffa_id_t sender_vm_id,
struct mpool *page_pool)
{
struct ffa_memory_region *memory_region;
diff --git a/src/load.c b/src/load.c
index e990624..7c22d91 100644
--- a/src/load.c
+++ b/src/load.c
@@ -964,7 +964,7 @@
for (i = 0; i < manifest->vm_count; ++i) {
const struct manifest_vm *manifest_vm = &manifest->vm[i];
- ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+ ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
uint64_t mem_size;
paddr_t secondary_mem_begin;
paddr_t secondary_mem_end;
diff --git a/src/manifest.c b/src/manifest.c
index cd90005..075c743 100644
--- a/src/manifest.c
+++ b/src/manifest.c
@@ -147,7 +147,7 @@
allocated_mem_regions_index = 0;
}
-static inline size_t count_digits(ffa_vm_id_t vm_id)
+static inline size_t count_digits(ffa_id_t vm_id)
{
size_t digits = 0;
@@ -162,7 +162,7 @@
* Generates a string with the two letters "vm" followed by an integer.
* Assumes `buf` is of size VM_NAME_BUF_SIZE.
*/
-static void generate_vm_node_name(struct string *str, ffa_vm_id_t vm_id)
+static void generate_vm_node_name(struct string *str, ffa_id_t vm_id)
{
static const char *digits = "0123456789";
size_t vm_id_digits = count_digits(vm_id);
@@ -408,7 +408,7 @@
static enum manifest_return_code parse_vm_common(const struct fdt_node *node,
struct manifest_vm *vm,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
struct uint32list_iter smcs;
size_t idx;
@@ -445,7 +445,7 @@
static enum manifest_return_code parse_vm(struct fdt_node *node,
struct manifest_vm *vm,
- ffa_vm_id_t vm_id)
+ ffa_id_t vm_id)
{
TRY(read_optional_string(node, "kernel_filename",
&vm->kernel_filename));
@@ -1220,7 +1220,7 @@
static enum manifest_return_code parse_ffa_partition_package(
struct mm_stage1_locked stage1_locked, struct fdt_node *node,
- struct manifest_vm *vm, ffa_vm_id_t vm_id,
+ struct manifest_vm *vm, ffa_id_t vm_id,
const struct boot_params *boot_params, struct mpool *ppool)
{
enum manifest_return_code ret = MANIFEST_ERROR_NOT_COMPATIBLE;
@@ -1344,7 +1344,7 @@
/* Iterate over reserved VM IDs and check no such nodes exist. */
for (i = HF_VM_ID_BASE; i < HF_VM_ID_OFFSET; i++) {
- ffa_vm_id_t vm_id = (ffa_vm_id_t)i - HF_VM_ID_BASE;
+ ffa_id_t vm_id = (ffa_id_t)i - HF_VM_ID_BASE;
struct fdt_node vm_node = hyp_node;
generate_vm_node_name(&vm_name, vm_id);
@@ -1355,7 +1355,7 @@
/* Iterate over VM nodes until we find one that does not exist. */
for (i = 0; i <= MAX_VMS; ++i) {
- ffa_vm_id_t vm_id = HF_VM_ID_OFFSET + i;
+ ffa_id_t vm_id = HF_VM_ID_OFFSET + i;
struct fdt_node vm_node = hyp_node;
generate_vm_node_name(&vm_name, vm_id - HF_VM_ID_BASE);
diff --git a/src/vm.c b/src/vm.c
index 44bd112..838b134 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -45,7 +45,7 @@
return arch_vm_init_mm(vm, ppool);
}
-struct vm *vm_init(ffa_vm_id_t id, ffa_vcpu_count_t vcpu_count,
+struct vm *vm_init(ffa_id_t id, ffa_vcpu_count_t vcpu_count,
struct mpool *ppool, bool el0_partition)
{
uint32_t i;
@@ -128,7 +128,7 @@
/**
* Returns a pointer to the VM with the corresponding id.
*/
-struct vm *vm_find(ffa_vm_id_t id)
+struct vm *vm_find(ffa_id_t id)
{
uint16_t index;
@@ -152,7 +152,7 @@
/**
* Returns a locked instance of the VM with the corresponding id.
*/
-struct vm_locked vm_find_locked(ffa_vm_id_t id)
+struct vm_locked vm_find_locked(ffa_id_t id)
{
struct vm *vm = vm_find(id);
@@ -228,7 +228,7 @@
/**
* Gets `vm`'s wait entry for waiting on the `for_vm`.
*/
-struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_vm_id_t for_vm)
+struct wait_entry *vm_get_wait_entry(struct vm *vm, ffa_id_t for_vm)
{
uint16_t index;
@@ -259,7 +259,7 @@
/**
* Gets the ID of the VM which the given VM's wait entry is for.
*/
-ffa_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
+ffa_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
{
uint16_t index = entry - vm->wait_entries;
@@ -271,7 +271,7 @@
* i.e. the hypervisor or a normal world VM when running in the normal world, or
* the SPM or an SP when running in the secure world.
*/
-bool vm_id_is_current_world(ffa_vm_id_t vm_id)
+bool vm_id_is_current_world(ffa_id_t vm_id)
{
return (vm_id & HF_VM_ID_WORLD_MASK) !=
(HF_OTHER_WORLD_ID & HF_VM_ID_WORLD_MASK);
@@ -630,7 +630,7 @@
* are per VCPU or global, as specified.
*/
bool vm_notifications_validate_binding(struct vm_locked vm_locked,
- bool is_from_vm, ffa_vm_id_t sender_id,
+ bool is_from_vm, ffa_id_t sender_id,
ffa_notifications_bitmap_t notifications,
bool is_per_vcpu)
{
@@ -645,7 +645,7 @@
* notifications.
*/
void vm_notifications_update_bindings(struct vm_locked vm_locked,
- bool is_from_vm, ffa_vm_id_t sender_id,
+ bool is_from_vm, ffa_id_t sender_id,
ffa_notifications_bitmap_t notifications,
bool is_per_vcpu)
{
@@ -671,7 +671,7 @@
}
bool vm_notifications_validate_bound_sender(
- struct vm_locked vm_locked, bool is_from_vm, ffa_vm_id_t sender_id,
+ struct vm_locked vm_locked, bool is_from_vm, ffa_id_t sender_id,
ffa_notifications_bitmap_t notifications)
{
CHECK(vm_locked.vm != NULL);
@@ -807,7 +807,7 @@
}
static void vm_notifications_state_info_get(
- struct notifications_state *state, ffa_vm_id_t vm_id, bool is_per_vcpu,
+ struct notifications_state *state, ffa_id_t vm_id, bool is_per_vcpu,
ffa_vcpu_index_t vcpu_id, uint16_t *ids, uint32_t *ids_count,
uint32_t *lists_sizes, uint32_t *lists_count,
const uint32_t ids_max_count,