refactor(ffa): remove `plat` prefix
Rename files to remove the `plat` prefix and replace with a
module-specific prefix.
Change-Id: Ie64cefcdf91da7b20e520828d8e234af12ab5c85
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/ffa/hypervisor/cpu_cycles.c b/src/ffa/hypervisor/cpu_cycles.c
index 667e0e9..af54f19 100644
--- a/src/ffa/hypervisor/cpu_cycles.c
+++ b/src/ffa/hypervisor/cpu_cycles.c
@@ -12,8 +12,8 @@
#include "hf/ffa/indirect_messaging.h"
#include "hf/vcpu.h"
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *ret)
{
/*
* VM's requests should be forwarded to the SPMC, if target is an SP.
@@ -30,9 +30,9 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
(void)next;
(void)vcpu_idx;
@@ -56,18 +56,16 @@
* to be compliant with version v1.0 of the FF-A specification. It serves as
* a blocking call.
*/
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
- return plat_ffa_msg_recv(true, current_locked, next);
+ return ffa_indirect_msg_recv(true, current_locked, next);
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_id_t vm_id,
- ffa_id_t receiver_vm_id,
- struct vcpu_locked receiver_locked,
- uint32_t func,
- enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+ struct vcpu_locked current_locked, ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+ uint32_t func, enum vcpu_state *next_state)
{
(void)current_locked;
(void)vm_id;
@@ -95,8 +93,8 @@
}
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
- struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+ struct vcpu_locked current_locked, struct vcpu_locked target_locked)
{
/* Scheduling mode not supported in the Hypervisor/VMs. */
(void)current_locked;
@@ -107,10 +105,10 @@
* Prepare to yield execution back to the VM that allocated cpu cycles and move
* to BLOCKED state.
*/
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
- struct vcpu **next,
- uint32_t timeout_low,
- uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+ struct vcpu **next,
+ uint32_t timeout_low,
+ uint32_t timeout_high)
{
struct vcpu *current = current_locked.vcpu;
struct ffa_value ret = {
diff --git a/src/ffa/hypervisor/direct_messaging.c b/src/ffa/hypervisor/direct_messaging.c
index 2b78f4b..c5b5292 100644
--- a/src/ffa/hypervisor/direct_messaging.c
+++ b/src/ffa/hypervisor/direct_messaging.c
@@ -18,9 +18,9 @@
/**
* Check validity of a FF-A direct message request.
*/
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -33,8 +33,9 @@
sender_vm_id == current_vm_id && vm_is_primary(current->vm);
}
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
- struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+ struct vm *receiver_vm,
+ uint32_t func)
{
(void)sender_vm;
(void)receiver_vm;
@@ -50,9 +51,9 @@
/**
* Check validity of a FF-A direct message response.
*/
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -65,9 +66,9 @@
receiver_vm_id == HF_PRIMARY_VM_ID;
}
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
- struct ffa_value args,
- struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+ struct ffa_value args,
+ struct ffa_value *ret)
{
if (!plat_ffa_is_tee_enabled()) {
dlog_verbose("Not forwarding: ffa_tee_enabled is false\n");
@@ -101,7 +102,7 @@
return true;
}
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
@@ -111,7 +112,7 @@
(void)sender_vm_id;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
/* Calls chains not supported in the Hypervisor/VMs. */
diff --git a/src/ffa/hypervisor/ffa_memory.c b/src/ffa/hypervisor/ffa_memory.c
index dfc5985..84cbcd4 100644
--- a/src/ffa/hypervisor/ffa_memory.c
+++ b/src/ffa/hypervisor/ffa_memory.c
@@ -18,7 +18,7 @@
#include "hypervisor.h"
#include "sysregs.h"
-enum ffa_memory_handle_allocator plat_ffa_memory_handle_allocator(void)
+enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
{
return FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
}
@@ -37,8 +37,8 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
- uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t share_func, bool multiple_borrower)
{
/*
* Currently memory interfaces are not forwarded from hypervisor to
@@ -54,18 +54,18 @@
return true;
}
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
{
return 0U;
}
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
return has_vhe_support();
}
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
{
(void)current;
return has_vhe_support();
@@ -263,7 +263,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
struct vm *from, uint32_t share_func,
struct ffa_memory_region **memory_region, uint32_t length,
uint32_t fragment_length, struct mpool *page_pool)
@@ -417,7 +417,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
struct vm *to, ffa_memory_handle_t handle,
ffa_memory_region_flags_t flags, struct mpool *page_pool)
{
@@ -655,7 +655,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
struct vm *from, void *fragment, uint32_t fragment_length,
ffa_memory_handle_t handle, struct mpool *page_pool)
{
@@ -684,7 +684,7 @@
return ret;
}
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
ffa_memory_attributes_t attributes, uint32_t mode)
{
(void)mode;
diff --git a/src/ffa/hypervisor/indirect_messaging.c b/src/ffa/hypervisor/indirect_messaging.c
index 0fd2b6c..fb11f9f 100644
--- a/src/ffa/hypervisor/indirect_messaging.c
+++ b/src/ffa/hypervisor/indirect_messaging.c
@@ -14,8 +14,8 @@
#include "hf/ffa_internal.h"
#include "hf/vm.h"
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
- struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+ struct vm_locked receiver_locked)
{
(void)sender_locked;
(void)receiver_locked;
@@ -27,8 +27,9 @@
return true;
}
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
- struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+ ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
/* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */
if (vm_id_is_current_world(receiver_vm_id)) {
@@ -59,7 +60,7 @@
* Checks whether the vCPU's attempt to wait for a message has already been
* interrupted or whether it is allowed to block.
*/
-static bool plat_ffa_msg_recv_block_interrupted(
+static bool ffa_indirect_msg_recv_block_interrupted(
struct vcpu_locked current_locked)
{
bool interrupted;
@@ -98,9 +99,9 @@
*
* No new messages can be received until the mailbox has been cleared.
*/
-struct ffa_value plat_ffa_msg_recv(bool block,
- struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_indirect_msg_recv(bool block,
+ struct vcpu_locked current_locked,
+ struct vcpu **next)
{
struct vm *vm = current_locked.vcpu->vm;
struct vcpu *current = current_locked.vcpu;
@@ -143,7 +144,7 @@
* that time to FFA_SUCCESS.
*/
return_code = ffa_error(FFA_INTERRUPTED);
- if (plat_ffa_msg_recv_block_interrupted(current_locked)) {
+ if (ffa_indirect_msg_recv_block_interrupted(current_locked)) {
goto out;
}
diff --git a/src/ffa/hypervisor/interrupts.c b/src/ffa/hypervisor/interrupts.c
index 512cc57..21d89ed 100644
--- a/src/ffa/hypervisor/interrupts.c
+++ b/src/ffa/hypervisor/interrupts.c
@@ -10,7 +10,8 @@
#include "hf/check.h"
#include "hf/vm.h"
-void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next)
+void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+ struct vcpu **next)
{
(void)current;
(void)next;
@@ -22,7 +23,7 @@
CHECK(false);
}
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
@@ -36,8 +37,8 @@
/**
* Enable relevant virtual interrupts for VMs.
*/
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
- struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+ struct vm_locked vm_locked)
{
struct vcpu *current;
struct interrupts *interrupts;
@@ -51,7 +52,7 @@
}
}
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
{
return api_interrupt_get(current_locked);
}
diff --git a/src/ffa/hypervisor/notifications.c b/src/ffa/hypervisor/notifications.c
index 69ea010..f03f378 100644
--- a/src/ffa/hypervisor/notifications.c
+++ b/src/ffa/hypervisor/notifications.c
@@ -20,8 +20,8 @@
* Check validity of the calls:
* FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
*/
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+ ffa_id_t vm_id)
{
/*
* Call should only be used by the Hypervisor, so any attempt of
@@ -33,16 +33,15 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
/** If Hafnium is hypervisor, receiver needs to be current vm. */
return sender_id != receiver_id && current_vm_id == receiver_id;
}
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
ffa_id_t receiver_id, ffa_id_t sender_id,
ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
bool is_bind, struct ffa_value *ret)
@@ -66,9 +65,8 @@
return false;
}
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -76,10 +74,10 @@
return sender_id == current_vm_id && sender_id != receiver_id;
}
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
{
/* Forward only if receiver is an SP. */
if (vm_id_is_current_world(receiver_vm_id)) {
@@ -103,9 +101,8 @@
return true;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id,
- ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+ ffa_notification_flags_t flags)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -115,8 +112,8 @@
return (current_vm_id == receiver_id);
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -124,15 +121,15 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
struct ffa_value ret;
@@ -155,10 +152,10 @@
return true;
}
-void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
- uint32_t *lists_sizes,
- uint32_t *lists_count,
- const uint32_t ids_count_max)
+void ffa_notifications_info_get_forward(uint16_t *ids, uint32_t *ids_count,
+ uint32_t *lists_sizes,
+ uint32_t *lists_count,
+ const uint32_t ids_count_max)
{
CHECK(ids != NULL);
CHECK(ids_count != NULL);
@@ -228,7 +225,7 @@
sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
}
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
ffa_notifications_bitmap_t *from_sp)
{
@@ -252,7 +249,7 @@
return ret;
}
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
{
@@ -292,12 +289,12 @@
* intrastructure that encompasses the NWd, and we are not interested in testing
* the flow of notifications between VMs only.
*/
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
{
(void)cpu;
}
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
{
(void)cpu;
}
@@ -306,7 +303,7 @@
* Track that in current CPU there was a notification set with delay SRI
* flag.
*/
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
{
(void)cpu;
}
diff --git a/src/ffa/hypervisor/power_management.c b/src/ffa/hypervisor/power_management.c
deleted file mode 100644
index e1e81b2..0000000
--- a/src/ffa/hypervisor/power_management.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2024 The Hafnium Authors.
- *
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file or at
- * https://opensource.org/licenses/BSD-3-Clause.
- */
-
-#include "hf/ffa/power_management.h"
-
-/**
- * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
- * non-secure FF-A instances.
- */
-bool plat_ffa_is_secondary_ep_register_supported(void)
-{
- return false;
-}
diff --git a/src/ffa/hypervisor/setup_and_discovery.c b/src/ffa/hypervisor/setup_and_discovery.c
index da9ebf7..74afa60 100644
--- a/src/ffa/hypervisor/setup_and_discovery.c
+++ b/src/ffa/hypervisor/setup_and_discovery.c
@@ -10,7 +10,7 @@
#include "hf/arch/other_world.h"
-#include "hf/ffa.h"
+#include "hf/check.h"
#include "hf/ffa/vm.h"
#include "hf/manifest.h"
#include "hf/vm.h"
@@ -18,7 +18,7 @@
#include "hypervisor.h"
#include "smc.h"
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
{
if (plat_ffa_is_tee_enabled()) {
/*
@@ -35,7 +35,16 @@
.arg2 = FFA_NOT_SUPPORTED};
}
-void plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
+/**
+ * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
+ * non-secure FF-A instances.
+ */
+bool ffa_setup_is_secondary_ep_register_supported(void)
+{
+ return false;
+}
+
+void ffa_setup_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
{
struct ffa_value ret;
@@ -46,7 +55,7 @@
CHECK(ret.func == FFA_SUCCESS_32);
}
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
{
struct vm *vm = vm_locked.vm;
struct vm *other_world;
@@ -56,7 +65,7 @@
return;
}
- if (!plat_ffa_vm_supports_indirect_messages(vm)) {
+ if (!ffa_vm_supports_indirect_messages(vm)) {
return;
}
@@ -71,14 +80,14 @@
vm->id, (uintptr_t)vm->mailbox.recv,
(uintptr_t)vm->mailbox.send);
- plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+ ffa_setup_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id);
}
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
{
struct ffa_value ret;
uint64_t func;
@@ -92,7 +101,7 @@
return;
}
- if (!plat_ffa_vm_supports_indirect_messages(vm_locked.vm)) {
+ if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) {
return;
}
@@ -112,7 +121,7 @@
}
}
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
{
/*
* Allow forwarding from the Hypervisor if TEE or SPMC exists and
@@ -125,7 +134,7 @@
* Forward helper for FFA_PARTITION_INFO_GET.
* Emits FFA_PARTITION_INFO_GET from Hypervisor to SPMC if allowed.
*/
-ffa_vm_count_t plat_ffa_partition_info_get_forward(
+ffa_vm_count_t ffa_setup_partition_info_get_forward(
const struct ffa_uuid *uuid, uint32_t flags,
struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
{
@@ -185,12 +194,12 @@
return vm_count;
}
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
- paddr_t fdt_addr,
- size_t fdt_allocated_size,
- const struct manifest_vm *manifest_vm,
- const struct boot_params *boot_params,
- struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr,
+ size_t fdt_allocated_size,
+ const struct manifest_vm *manifest_vm,
+ const struct boot_params *boot_params,
+ struct mpool *ppool)
{
struct fdt partition_fdt;
@@ -214,7 +223,7 @@
pa_add(fdt_addr, fdt_allocated_size), ppool) == true);
}
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
ffa_id_t caller_id, const struct vm *target)
{
ffa_partition_properties_t result = target->messaging_method;
@@ -234,14 +243,14 @@
return result;
}
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
- struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+ struct ffa_value *ret)
{
struct vm *vm = vm_locked.vm;
ffa_id_t vm_id = vm->id;
if (!plat_ffa_is_tee_enabled() ||
- !plat_ffa_vm_supports_indirect_messages(vm)) {
+ !ffa_vm_supports_indirect_messages(vm)) {
return false;
}
@@ -274,8 +283,8 @@
*
* Returns true if the ownership belongs to the hypervisor.
*/
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
- struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+ struct ffa_value *ret)
{
struct ffa_value other_world_ret;
@@ -286,7 +295,7 @@
* - If the mailbox ownership hasn't been transferred to the SPMC.
*/
if (!plat_ffa_is_tee_enabled() ||
- !plat_ffa_vm_supports_indirect_messages(to_locked.vm) ||
+ !ffa_vm_supports_indirect_messages(to_locked.vm) ||
to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
return true;
}
diff --git a/src/ffa/hypervisor/vm.c b/src/ffa/hypervisor/vm.c
index 943ae66..08b1cff 100644
--- a/src/ffa/hypervisor/vm.c
+++ b/src/ffa/hypervisor/vm.c
@@ -8,20 +8,20 @@
#include "hf/vm.h"
-bool plat_ffa_vm_supports_indirect_messages(struct vm *vm)
+bool ffa_vm_supports_indirect_messages(struct vm *vm)
{
return vm->ffa_version >= FFA_VERSION_1_1 &&
vm_supports_messaging_method(vm, FFA_PARTITION_INDIRECT_MSG);
}
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
{
(void)vm;
return false;
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
{
if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
return vm_find_locked(vm_id);
@@ -30,12 +30,12 @@
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
{
- return plat_ffa_vm_find_locked(vm_id);
+ return ffa_vm_find_locked(vm_id);
}
-bool plat_ffa_vm_notifications_info_get( // NOLINTNEXTLINE
+bool ffa_vm_notifications_info_get( // NOLINTNEXTLINE
uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
uint32_t *lists_sizes, // NOLINTNEXTLINE
uint32_t *lists_count, const uint32_t ids_count_max)
@@ -49,13 +49,13 @@
return false;
}
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
{
/* Hypervisor never frees VM structs. */
(void)to_destroy_locked;
}
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
{
(void)vm_locked;
}