refactor(ffa): remove `plat` prefix
Rename files to remove the `plat` prefix and replace with a
module-specific prefix.
Change-Id: Ie64cefcdf91da7b20e520828d8e234af12ab5c85
Signed-off-by: Karl Meakin <karl.meakin@arm.com>
diff --git a/src/api.c b/src/api.c
index 81e7e24..5874f61 100644
--- a/src/api.c
+++ b/src/api.c
@@ -25,7 +25,6 @@
#include "hf/ffa/indirect_messaging.h"
#include "hf/ffa/interrupts.h"
#include "hf/ffa/notifications.h"
-#include "hf/ffa/power_management.h"
#include "hf/ffa/setup_and_discovery.h"
#include "hf/ffa/vm.h"
#include "hf/ffa_internal.h"
@@ -195,7 +194,7 @@
*/
static bool api_ffa_is_managed_exit_ongoing(struct vcpu_locked vcpu_locked)
{
- return (plat_ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
+ return (ffa_vm_managed_exit_supported(vcpu_locked.vcpu->vm) &&
vcpu_locked.vcpu->processing_managed_exit);
}
@@ -295,7 +294,7 @@
}
current_locked = vcpu_lock(current);
- transition_allowed = plat_ffa_check_runtime_state_transition(
+ transition_allowed = ffa_cpu_cycles_check_runtime_state_transition(
current_locked, current->vm->id, HF_INVALID_VM_ID, next_locked,
FFA_YIELD_32, &next_state);
@@ -310,13 +309,13 @@
* to be resumed immediately without ever moving to BLOCKED state. One
* such scenario occurs when an SP's execution context attempts to
* yield cycles while handling secure interrupt. Refer to the comments
- * in the SPMC variant of the plat_ffa_yield_prepare function.
+ * in the SPMC variant of the ffa_cpu_cycles_yield_prepare function.
*/
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_BLOCKED);
- ret = plat_ffa_yield_prepare(current_locked, next, timeout_low,
- timeout_high);
+ ret = ffa_cpu_cycles_yield_prepare(current_locked, next, timeout_low,
+ timeout_high);
out:
vcpu_unlock(¤t_locked);
return ret;
@@ -374,7 +373,7 @@
memory_order_relaxed);
vm_locked = vm_lock(current->vm);
- plat_ffa_free_vm_resources(vm_locked);
+ ffa_vm_free_resources(vm_locked);
vm_unlock(&vm_locked);
current_locked = vcpu_lock(current);
@@ -399,7 +398,7 @@
struct ffa_value ret;
/* Acquire receiver's RX buffer. */
- if (!plat_ffa_acquire_receiver_rx(vm_locked, &ret)) {
+ if (!ffa_setup_acquire_receiver_rx(vm_locked, &ret)) {
dlog_verbose("Failed to acquire RX buffer for VM %x\n", vm->id);
return ret;
}
@@ -487,7 +486,7 @@
{
ffa_partition_properties_t properties;
- properties = plat_ffa_partition_properties(caller_id, vm);
+ properties = ffa_setup_partition_properties(caller_id, vm);
properties |= FFA_PARTITION_AARCH64_EXEC;
if (vm->ffa_version >= FFA_VERSION_1_1) {
@@ -612,7 +611,7 @@
uint16_t curr_index = 0;
uint16_t start_index = 0;
- if (!plat_ffa_partition_info_get_regs_forward_allowed()) {
+ if (!ffa_setup_partition_info_get_regs_forward_allowed()) {
return true;
}
@@ -894,8 +893,8 @@
* - If UUID is non-Null and vm_count is zero it means there is no such
* partition identified in the system.
*/
- vm_count = plat_ffa_partition_info_get_forward(uuid, flags, partitions,
- vm_count);
+ vm_count = ffa_setup_partition_info_get_forward(uuid, flags, partitions,
+ vm_count);
/*
* Unrecognized UUID: does not match any of the VMs (or SPs)
@@ -1116,7 +1115,7 @@
{
struct vm_locked vm_locked;
- vm_locked = plat_ffa_vm_find_locked(current->vm->id);
+ vm_locked = ffa_vm_find_locked(current->vm->id);
if (vm_locked.vm == NULL) {
return;
}
@@ -1161,7 +1160,7 @@
}
current_locked = vcpu_lock(current);
- if (!plat_ffa_check_runtime_state_transition(
+ if (!ffa_cpu_cycles_check_runtime_state_transition(
current_locked, current->vm->id, HF_INVALID_VM_ID,
next_locked, FFA_MSG_WAIT_32, &next_state)) {
ret = ffa_error(FFA_DENIED);
@@ -1171,7 +1170,7 @@
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_WAITING);
- ret = plat_ffa_msg_wait_prepare(current_locked, next);
+ ret = ffa_cpu_cycles_msg_wait_prepare(current_locked, next);
/*
* To maintain partial ordering of locks, release vCPU lock before
@@ -1312,7 +1311,7 @@
assert(need_vm_lock == true);
if (!vm_locked.vm->el0_partition) {
- plat_ffa_inject_notification_pending_interrupt(
+ ffa_interrupts_inject_notification_pending_interrupt(
vcpu_next_locked, current_locked, vm_locked);
}
@@ -1322,7 +1321,7 @@
break;
case VCPU_STATE_BLOCKED_INTERRUPT:
if (need_vm_lock &&
- plat_ffa_inject_notification_pending_interrupt(
+ ffa_interrupts_inject_notification_pending_interrupt(
vcpu_next_locked, current_locked, vm_locked)) {
assert(vcpu_interrupt_count_get(vcpu_next_locked) > 0);
break;
@@ -1362,7 +1361,7 @@
case VCPU_STATE_PREEMPTED:
/* Check NPI is to be injected here. */
if (need_vm_lock) {
- plat_ffa_inject_notification_pending_interrupt(
+ ffa_interrupts_inject_notification_pending_interrupt(
vcpu_next_locked, current_locked, vm_locked);
}
break;
@@ -1376,7 +1375,8 @@
goto out;
}
- plat_ffa_init_schedule_mode_ffa_run(current_locked, vcpu_next_locked);
+ ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(current_locked,
+ vcpu_next_locked);
timer_migrate_to_other_cpu(current_locked.vcpu->cpu, vcpu_next_locked);
vcpu->cpu = current_locked.vcpu->cpu;
@@ -1409,11 +1409,12 @@
struct two_vcpu_locked vcpus_locked;
current_locked = vcpu_lock(current);
- if (!plat_ffa_run_checks(current_locked, vm_id, vcpu_idx, &ret, next)) {
+ if (!ffa_cpu_cycles_run_checks(current_locked, vm_id, vcpu_idx, &ret,
+ next)) {
goto out;
}
- if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
+ if (ffa_cpu_cycles_run_forward(vm_id, vcpu_idx, &ret)) {
goto out;
}
@@ -1450,7 +1451,7 @@
current_locked = vcpus_locked.vcpu1;
vcpu_next_locked = vcpus_locked.vcpu2;
- if (!plat_ffa_check_runtime_state_transition(
+ if (!ffa_cpu_cycles_check_runtime_state_transition(
current_locked, current->vm->id, HF_INVALID_VM_ID,
vcpu_next_locked, FFA_RUN_32, &next_state)) {
ret = ffa_error(FFA_DENIED);
@@ -1806,7 +1807,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- owner_vm_locked = plat_ffa_vm_find_locked_create(owner_vm_id);
+ owner_vm_locked = ffa_vm_find_locked_create(owner_vm_id);
if (owner_vm_locked.vm == NULL) {
dlog_error("Cannot map RX/TX for VM ID %#x, not found.\n",
owner_vm_id);
@@ -1829,7 +1830,7 @@
}
/* Forward buffer mapping to SPMC if coming from a VM. */
- plat_ffa_rxtx_map_forward(owner_vm_locked);
+ ffa_setup_rxtx_map_forward(owner_vm_locked);
ret = (struct ffa_value){.func = FFA_SUCCESS_32};
@@ -1883,7 +1884,7 @@
/* VM ID of which buffers have to be unmapped. */
owner_vm_id = (allocator_id != 0) ? allocator_id : vm->id;
- vm_locked = plat_ffa_vm_find_locked(owner_vm_id);
+ vm_locked = ffa_vm_find_locked(owner_vm_id);
vm = vm_locked.vm;
if (vm == NULL) {
dlog_error("Cannot unmap RX/TX for VM ID %#x, not found.\n",
@@ -1938,10 +1939,10 @@
vm->mailbox.send = NULL;
vm->mailbox.recv = NULL;
- plat_ffa_vm_destroy(vm_locked);
+ ffa_vm_destroy(vm_locked);
/* Forward buffer unmapping to SPMC if coming from a VM. */
- plat_ffa_rxtx_unmap_forward(vm_locked);
+ ffa_setup_rxtx_unmap_forward(vm_locked);
mm_unlock_stage1(&mm_stage1_locked);
@@ -1982,7 +1983,7 @@
* when the message is forwarded.
*/
msg_sender_id = (sender_vm_id != 0) ? sender_vm_id : from->id;
- sender_locked = plat_ffa_vm_find_locked(msg_sender_id);
+ sender_locked = ffa_vm_find_locked(msg_sender_id);
if (sender_locked.vm == NULL) {
dlog_error("Cannot send message from VM ID %#x, not found.\n",
msg_sender_id);
@@ -2047,12 +2048,12 @@
* Check if the message has to be forwarded to the SPMC, in
* this case return, the SPMC will handle the buffer copy.
*/
- if (plat_ffa_msg_send2_forward(receiver_id, sender_id, &ret)) {
+ if (ffa_indirect_msg_send2_forward(receiver_id, sender_id, &ret)) {
goto out_unlock_sender;
}
/* Ensure the receiver VM exists. */
- to_locked = plat_ffa_vm_find_locked(receiver_id);
+ to_locked = ffa_vm_find_locked(receiver_id);
to = to_locked.vm;
if (to == NULL) {
@@ -2067,7 +2068,7 @@
* Sender is the VM/SP who originally sent the message, not the
* hypervisor possibly relaying it.
*/
- if (!plat_ffa_is_indirect_msg_supported(sender_locked, to_locked)) {
+ if (!ffa_indirect_msg_is_supported(sender_locked, to_locked)) {
dlog_verbose("VM %#x doesn't support indirect message\n",
sender_id);
ret = ffa_error(FFA_DENIED);
@@ -2084,7 +2085,7 @@
}
/* Acquire receiver's RX buffer. */
- if (!plat_ffa_acquire_receiver_rx(to_locked, &ret)) {
+ if (!ffa_setup_acquire_receiver_rx(to_locked, &ret)) {
dlog_error("Failed to acquire RX buffer for VM %#x\n", to->id);
goto out;
}
@@ -2133,9 +2134,9 @@
if ((FFA_NOTIFICATIONS_FLAG_DELAY_SRI & flags) == 0) {
dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
vcpu_index(current));
- plat_ffa_sri_trigger_not_delayed(current->cpu);
+ ffa_notifications_sri_trigger_not_delayed(current->cpu);
} else {
- plat_ffa_sri_set_delayed(current->cpu);
+ ffa_notifications_sri_set_delayed(current->cpu);
}
}
@@ -2189,7 +2190,7 @@
release_vm_id = receiver_id;
}
- vm_locked = plat_ffa_vm_find_locked(release_vm_id);
+ vm_locked = ffa_vm_find_locked(release_vm_id);
vm = vm_locked.vm;
if (vm == NULL) {
dlog_error("No buffer registered for VM ID %#x.\n",
@@ -2197,7 +2198,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (plat_ffa_rx_release_forward(vm_locked, &ret)) {
+ if (ffa_setup_rx_release_forward(vm_locked, &ret)) {
goto out;
}
@@ -2236,7 +2237,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
- receiver_locked = plat_ffa_vm_find_locked(receiver_id);
+ receiver_locked = ffa_vm_find_locked(receiver_id);
receiver = receiver_locked.vm;
if (receiver == NULL || receiver->mailbox.recv == NULL) {
@@ -2853,13 +2854,13 @@
return ret;
}
- if (!plat_ffa_is_direct_request_valid(current, sender_vm_id,
- receiver_vm_id)) {
+ if (!ffa_direct_msg_is_direct_request_valid(current, sender_vm_id,
+ receiver_vm_id)) {
dlog_verbose("Invalid direct message request.\n");
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (plat_ffa_direct_request_forward(receiver_vm_id, args, &ret)) {
+ if (ffa_direct_msg_direct_request_forward(receiver_vm_id, args, &ret)) {
dlog_verbose("Direct message request forwarded\n");
return ret;
}
@@ -2882,8 +2883,8 @@
* Check if sender supports sending direct message req, and if
* receiver supports receipt of direct message requests.
*/
- if (!plat_ffa_is_direct_request_supported(current->vm, receiver_vm,
- args.func)) {
+ if (!ffa_direct_msg_is_direct_request_supported(
+ current->vm, receiver_vm, args.func)) {
dlog_verbose("Direct message request not supported\n");
return ffa_error(FFA_DENIED);
}
@@ -2926,7 +2927,7 @@
goto out;
}
- if (!plat_ffa_check_runtime_state_transition(
+ if (!ffa_cpu_cycles_check_runtime_state_transition(
current_locked, sender_vm_id, HF_INVALID_VM_ID,
receiver_vcpu_locked, args.func, &next_state)) {
ret = ffa_error(FFA_DENIED);
@@ -2984,7 +2985,7 @@
next_state == VCPU_STATE_BLOCKED);
current->state = VCPU_STATE_BLOCKED;
- plat_ffa_wind_call_chain_ffa_direct_req(
+ ffa_direct_msg_wind_call_chain_ffa_direct_req(
current_locked, receiver_vcpu_locked, sender_vm_id);
/* Switch to receiver vCPU targeted to by direct msg request */
@@ -2997,7 +2998,7 @@
* interrupt. Following call assumes that '*next' has been set
* to receiver_vcpu.
*/
- plat_ffa_inject_notification_pending_interrupt(
+ ffa_interrupts_inject_notification_pending_interrupt(
receiver_vcpu_locked, current_locked, receiver_locked);
}
@@ -3038,7 +3039,7 @@
} else if (vm_id_is_current_world(receiver_vm_id)) {
/*
* It is expected the receiver_vm_id to be from an SP, otherwise
- * 'plat_ffa_is_direct_response_valid' should have
+ * 'ffa_direct_msg_is_direct_response_valid' should have
* made function return error before getting to this point.
*/
*next = api_switch_to_vm(current_locked, to_ret,
@@ -3073,8 +3074,8 @@
}
}
- if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
- receiver_vm_id)) {
+ if (!ffa_direct_msg_is_direct_response_valid(current, sender_vm_id,
+ receiver_vm_id)) {
dlog_verbose("Invalid direct response call.\n");
return false;
}
@@ -3155,7 +3156,7 @@
current_locked = vcpu_lock(current);
- if (!plat_ffa_check_runtime_state_transition(
+ if (!ffa_cpu_cycles_check_runtime_state_transition(
current_locked, sender_vm_id, receiver_vm_id, next_locked,
args.func, &next_state)) {
ret = ffa_error(FFA_DENIED);
@@ -3215,15 +3216,16 @@
/* Inject timer interrupt if timer has expired. */
api_inject_arch_timer_interrupt(current_locked, next_locked);
- plat_ffa_unwind_call_chain_ffa_direct_resp(current_locked, next_locked);
+ ffa_direct_msg_unwind_call_chain_ffa_direct_resp(current_locked,
+ next_locked);
/*
* Check if there is a pending secure interrupt.
* If there is, return back to the caller with FFA_INTERRUPT,
* and set the `next` vcpu in a preempted state.
*/
- if (plat_ffa_intercept_call(current_locked, next_locked,
- &signal_interrupt)) {
+ if (ffa_interrupts_intercept_call(current_locked, next_locked,
+ &signal_interrupt)) {
ret = signal_interrupt;
*next = NULL;
}
@@ -3601,7 +3603,7 @@
goto out;
}
- if (!plat_ffa_is_memory_send_valid(
+ if (!ffa_memory_is_send_valid(
receiver_id, from->id, share_func,
memory_region->receiver_count > 1)) {
ret = ffa_error(FFA_DENIED);
@@ -3616,7 +3618,7 @@
}
if (targets_other_world) {
- ret = plat_ffa_other_world_mem_send(
+ ret = ffa_memory_other_world_mem_send(
from, share_func, &memory_region, length,
fragment_length, &api_page_pool);
} else {
@@ -3661,7 +3663,7 @@
request_v1_0->attributes.security == 0U &&
request_v1_0->flags == 0U && request_v1_0->tag == 0U &&
request_v1_0->receiver_count == 0U &&
- plat_ffa_memory_handle_allocated_by_current_world(
+ ffa_memory_is_handle_allocated_by_current_world(
request_v1_0->handle);
}
default:
@@ -3674,7 +3676,7 @@
request->memory_access_desc_size == 0U &&
request->receiver_count == 0U &&
request->receivers_offset == 0U &&
- plat_ffa_memory_handle_allocated_by_current_world(
+ ffa_memory_is_handle_allocated_by_current_world(
request->handle);
}
}
@@ -3738,7 +3740,7 @@
}
if ((vm_is_mailbox_other_world_owned(to_locked) &&
- !plat_ffa_acquire_receiver_rx(to_locked, &ret)) ||
+ !ffa_setup_acquire_receiver_rx(to_locked, &ret)) ||
vm_is_mailbox_busy(to_locked)) {
/*
* Can't retrieve memory information if the mailbox is
@@ -3779,7 +3781,7 @@
retrieve_request = retrieve_msg;
- if (plat_ffa_memory_handle_allocated_by_current_world(
+ if (ffa_memory_is_handle_allocated_by_current_world(
retrieve_request->handle)) {
ret = ffa_memory_retrieve(to_locked, retrieve_request, length,
&api_page_pool);
@@ -3938,7 +3940,7 @@
struct vm *to = current->vm;
struct ffa_value ret;
- if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
+ if (ffa_memory_is_handle_allocated_by_current_world(handle)) {
struct vm_locked to_locked = vm_lock(to);
ret = ffa_memory_reclaim(to_locked, handle, flags,
@@ -3946,8 +3948,8 @@
vm_unlock(&to_locked);
} else {
- ret = plat_ffa_other_world_mem_reclaim(to, handle, flags,
- &api_page_pool);
+ ret = ffa_memory_other_world_mem_reclaim(to, handle, flags,
+ &api_page_pool);
}
return ret;
@@ -4073,7 +4075,7 @@
* We can tell from the handle whether the memory transaction is for the
* other world or not.
*/
- if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
+ if (ffa_memory_is_handle_allocated_by_current_world(handle)) {
struct vm_locked from_locked = vm_lock(from);
ret = ffa_memory_send_continue(from_locked, fragment_copy,
@@ -4085,7 +4087,7 @@
*/
vm_unlock(&from_locked);
} else {
- ret = plat_ffa_other_world_mem_send_continue(
+ ret = ffa_memory_other_world_mem_send_continue(
from, fragment_copy, fragment_length, handle,
&api_page_pool);
}
@@ -4107,7 +4109,7 @@
* Reject if interface is not supported at this FF-A instance
* (DEN0077A FF-A v1.1 Beta0 Table 18.29) or the VM is UP.
*/
- if (!plat_ffa_is_secondary_ep_register_supported() ||
+ if (!ffa_setup_is_secondary_ep_register_supported() ||
vm_is_up(current->vm)) {
return ffa_error(FFA_NOT_SUPPORTED);
}
@@ -4146,7 +4148,7 @@
struct vcpu *current)
{
const struct ffa_value ret =
- plat_ffa_is_notifications_bitmap_access_valid(current, vm_id);
+ ffa_notifications_is_bitmap_access_valid(current, vm_id);
if (ffa_func_id(ret) != FFA_SUCCESS_32) {
dlog_verbose(
@@ -4156,14 +4158,14 @@
return ret;
}
- return plat_ffa_notifications_bitmap_create(vm_id, vcpu_count);
+ return ffa_notifications_bitmap_create(vm_id, vcpu_count);
}
struct ffa_value api_ffa_notification_bitmap_destroy(ffa_id_t vm_id,
struct vcpu *current)
{
const struct ffa_value ret =
- plat_ffa_is_notifications_bitmap_access_valid(current, vm_id);
+ ffa_notifications_is_bitmap_access_valid(current, vm_id);
if (ffa_func_id(ret) != FFA_SUCCESS_32) {
dlog_verbose(
@@ -4173,7 +4175,7 @@
return ret;
}
- return plat_ffa_notifications_bitmap_destroy(vm_id);
+ return ffa_notifications_bitmap_destroy(vm_id);
}
struct ffa_value api_ffa_notification_update_bindings(
@@ -4194,13 +4196,13 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (!plat_ffa_is_notifications_bind_valid(current, sender_vm_id,
- receiver_vm_id)) {
+ if (!ffa_notifications_is_bind_valid(current, sender_vm_id,
+ receiver_vm_id)) {
dlog_verbose("Invalid use of notifications bind interface.\n");
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (plat_ffa_notifications_update_bindings_forward(
+ if (ffa_notifications_update_bindings_forward(
receiver_vm_id, sender_vm_id, flags, notifications, is_bind,
&ret)) {
return ret;
@@ -4214,9 +4216,9 @@
/**
* This check assumes receiver is the current VM, and has been enforced
- * by 'plat_ffa_is_notifications_bind_valid'.
+ * by 'ffa_notifications_is_bind_valid'.
*/
- receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
+ receiver_locked = ffa_vm_find_locked(receiver_vm_id);
if (receiver_locked.vm == NULL) {
dlog_verbose("Receiver doesn't exist!\n");
@@ -4312,8 +4314,8 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (!plat_ffa_is_notification_set_valid(current, sender_vm_id,
- receiver_vm_id)) {
+ if (!ffa_notifications_is_set_valid(current, sender_vm_id,
+ receiver_vm_id)) {
dlog_verbose("Invalid use of notifications set interface.\n");
return ffa_error(FFA_INVALID_PARAMETERS);
}
@@ -4334,16 +4336,16 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (plat_ffa_notification_set_forward(sender_vm_id, receiver_vm_id,
- flags, notifications, &ret)) {
+ if (ffa_notifications_set_forward(sender_vm_id, receiver_vm_id, flags,
+ notifications, &ret)) {
return ret;
}
/*
* This check assumes receiver is the current VM, and has been enforced
- * by 'plat_ffa_is_notification_set_valid'.
+ * by 'ffa_notifications_is_set_valid'.
*/
- receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
+ receiver_locked = ffa_vm_find_locked(receiver_vm_id);
if (receiver_locked.vm == NULL) {
dlog_verbose("Receiver ID is not valid.\n");
@@ -4398,9 +4400,9 @@
if (!delay_sri) {
dlog_verbose("SRI was NOT delayed. vcpu: %u!\n",
vcpu_index(current));
- plat_ffa_sri_trigger_not_delayed(current->cpu);
+ ffa_notifications_sri_trigger_not_delayed(current->cpu);
} else {
- plat_ffa_sri_set_delayed(current->cpu);
+ ffa_notifications_sri_set_delayed(current->cpu);
}
ret = (struct ffa_value){.func = FFA_SUCCESS_32};
@@ -4454,8 +4456,7 @@
* depending on whether Hafnium is SPMC or hypervisor. On the
* rest of the function it is assumed this condition is met.
*/
- if (!plat_ffa_is_notification_get_valid(current, receiver_vm_id,
- flags)) {
+ if (!ffa_notifications_is_get_valid(current, receiver_vm_id, flags)) {
dlog_verbose("Invalid use of notifications get interface.\n");
return ffa_error(FFA_INVALID_PARAMETERS);
}
@@ -4464,7 +4465,7 @@
* This check assumes receiver is the current VM, and has been enforced
* by `plat_ffa_is_notifications_get_valid`.
*/
- receiver_locked = plat_ffa_vm_find_locked(receiver_vm_id);
+ receiver_locked = ffa_vm_find_locked(receiver_vm_id);
/*
* `plat_ffa_is_notifications_get_valid` ensures following is never
@@ -4483,8 +4484,8 @@
}
if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_SP) != 0U) {
- ret = plat_ffa_notifications_get_from_sp(
- receiver_locked, vcpu_id, &sp_notifications);
+ ret = ffa_notifications_get_from_sp(receiver_locked, vcpu_id,
+ &sp_notifications);
if (ret.func == FFA_ERROR_32) {
dlog_verbose("Failed to get notifications from sps.");
goto out;
@@ -4498,7 +4499,7 @@
if ((flags & FFA_NOTIFICATION_FLAG_BITMAP_HYP) != 0U ||
(flags & FFA_NOTIFICATION_FLAG_BITMAP_SPM) != 0U) {
- ret = plat_ffa_notifications_get_framework_notifications(
+ ret = ffa_notifications_get_framework_notifications(
receiver_locked, &framework_notifications, flags,
vcpu_id);
if (ret.func == FFA_ERROR_32) {
@@ -4596,9 +4597,9 @@
* Forward call to the other world, and fill the arrays used to assemble
* return.
*/
- plat_ffa_notification_info_get_forward(
- ids, &ids_count, lists_sizes, &lists_count,
- FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
+ ffa_notifications_info_get_forward(ids, &ids_count, lists_sizes,
+ &lists_count,
+ FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
list_is_full = ids_count == FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
@@ -4616,7 +4617,7 @@
if (!list_is_full) {
/* Grab notifications info from other world */
- plat_ffa_vm_notifications_info_get(
+ ffa_vm_notifications_info_get(
ids, &ids_count, lists_sizes, &lists_count,
FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
}
@@ -4640,7 +4641,7 @@
bool mode_ret = false;
uint32_t mode = 0;
- if (!plat_ffa_is_mem_perm_get_valid(current)) {
+ if (!ffa_memory_is_mem_perm_get_valid(current)) {
return ffa_error(FFA_DENIED);
}
@@ -4705,7 +4706,7 @@
uint32_t new_mode;
struct mpool local_page_pool;
- if (!plat_ffa_is_mem_perm_set_valid(current)) {
+ if (!ffa_memory_is_mem_perm_set_valid(current)) {
return ffa_error(FFA_DENIED);
}
diff --git a/src/arch/aarch64/hypervisor/ffa.c b/src/arch/aarch64/hypervisor/ffa.c
index 880a5e86..cf44ee3 100644
--- a/src/arch/aarch64/hypervisor/ffa.c
+++ b/src/arch/aarch64/hypervisor/ffa.c
@@ -32,7 +32,7 @@
*/
void arch_ffa_init(void)
{
- struct ffa_value ret = plat_ffa_spmc_id_get();
+ struct ffa_value ret = ffa_setup_spmc_id_get();
if (ret.func == FFA_SUCCESS_32) {
spmc_id = ret.arg2;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index d501eac..5b1021c 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -548,7 +548,7 @@
*args = api_yield(current, next, args);
return true;
case FFA_MSG_SEND_32:
- *args = plat_ffa_msg_send(
+ *args = ffa_indirect_msg_send(
ffa_sender(*args), ffa_receiver(*args),
ffa_msg_send_size(*args), current, next);
return true;
@@ -564,7 +564,7 @@
struct vcpu_locked current_locked;
current_locked = vcpu_lock(current);
- *args = plat_ffa_msg_recv(false, current_locked, next);
+ *args = ffa_indirect_msg_recv(false, current_locked, next);
vcpu_unlock(¤t_locked);
return true;
}
@@ -697,7 +697,7 @@
return true;
}
- plat_ffa_handle_secure_interrupt(current, next);
+ ffa_interrupts_handle_secure_interrupt(current, next);
/*
* If the next vCPU belongs to an SP, the next time the NWd
@@ -778,7 +778,7 @@
*/
if ((*next != NULL && (*next)->vm->id == HF_OTHER_WORLD_ID) ||
(*next == NULL && vcpu->vm->id == HF_OTHER_WORLD_ID)) {
- plat_ffa_sri_trigger_if_delayed(vcpu->cpu);
+ ffa_notifications_sri_trigger_if_delayed(vcpu->cpu);
}
#endif
if (func != FFA_VERSION_32) {
@@ -1032,12 +1032,12 @@
switch (args.func) {
#if SECURE_WORLD == 1
case HF_INTERRUPT_DEACTIVATE:
- vcpu->regs.r[0] = plat_ffa_interrupt_deactivate(
- args.arg1, args.arg2, vcpu);
+ vcpu->regs.r[0] =
+ ffa_interrupts_deactivate(args.arg1, args.arg2, vcpu);
break;
case HF_INTERRUPT_RECONFIGURE:
- vcpu->regs.r[0] = plat_ffa_interrupt_reconfigure(
+ vcpu->regs.r[0] = ffa_interrupts_reconfigure(
args.arg1, args.arg2, args.arg3, vcpu);
break;
@@ -1054,7 +1054,7 @@
struct vcpu_locked current_locked;
current_locked = vcpu_lock(vcpu);
- vcpu->regs.r[0] = plat_ffa_interrupt_get(current_locked);
+ vcpu->regs.r[0] = ffa_interrupts_get(current_locked);
vcpu_unlock(¤t_locked);
break;
}
@@ -1077,7 +1077,7 @@
#if SECURE_WORLD == 1
struct vcpu *next = NULL;
- plat_ffa_handle_secure_interrupt(current(), &next);
+ ffa_interrupts_handle_secure_interrupt(current(), &next);
/*
* Since we are in interrupt context, set the bit for the
@@ -1140,7 +1140,7 @@
*/
assert(current_vcpu->vm->ns_interrupts_action != NS_ACTION_QUEUED);
- if (plat_ffa_vm_managed_exit_supported(current_vcpu->vm)) {
+ if (ffa_vm_managed_exit_supported(current_vcpu->vm)) {
uint8_t pmr = plat_interrupts_get_priority_mask();
/*
@@ -1177,7 +1177,7 @@
* Unwind Normal World Scheduled Call chain in response to NS
* Interrupt.
*/
- return plat_ffa_unwind_nwd_call_chain_interrupt(current_vcpu);
+ return ffa_interrupts_unwind_nwd_call_chain(current_vcpu);
#else
return irq_lower();
#endif
diff --git a/src/arch/aarch64/plat/psci/hypervisor.c b/src/arch/aarch64/plat/psci/hypervisor.c
index b7506d6..deae71c 100644
--- a/src/arch/aarch64/plat/psci/hypervisor.c
+++ b/src/arch/aarch64/plat/psci/hypervisor.c
@@ -70,7 +70,7 @@
/* Reset the registers to give a clean start for vCPU. */
arch_regs_reset(vcpu);
- /* TODO: call plat_ffa_sri_init? */
+ /* TODO: call ffa_notifications_sri_init? */
return vcpu;
}
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index bef8b95..6b5132f 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -72,7 +72,7 @@
arch_cpu_init(c);
/* Initialize SRI for running core. */
- plat_ffa_sri_init(c);
+ ffa_notifications_sri_init(c);
vcpu = vm_get_vcpu(vm, vm_is_up(vm) ? 0 : cpu_index(c));
vcpu_locked = vcpu_lock(vcpu);
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index 555bcf0..88aef4a 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -22,8 +22,8 @@
{
}
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
- uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t share_func, bool multiple_borrower)
{
(void)share_func;
(void)receiver;
@@ -33,9 +33,9 @@
return true;
}
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -44,8 +44,9 @@
return true;
}
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
- struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+ struct vm *receiver_vm,
+ uint32_t func)
{
(void)sender_vm;
(void)receiver_vm;
@@ -54,9 +55,9 @@
return false;
}
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -65,8 +66,8 @@
return true;
}
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *ret)
{
(void)vm_id;
(void)vcpu_idx;
@@ -75,19 +76,19 @@
return false;
}
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
{
(void)to_destroy_locked;
}
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
- struct ffa_value args,
- struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+ struct ffa_value args,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)args;
@@ -95,8 +96,8 @@
return false;
}
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
- struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+ struct ffa_value *ret)
{
(void)vm_locked;
(void)ret;
@@ -104,8 +105,8 @@
return false;
}
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
- struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+ struct ffa_value *ret)
{
(void)to_locked;
(void)ret;
@@ -113,8 +114,8 @@
return false;
}
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
- struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+ struct vm_locked receiver_locked)
{
(void)sender_locked;
(void)receiver_locked;
@@ -122,8 +123,9 @@
return false;
}
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
- struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+ ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)sender_vm_id;
@@ -132,14 +134,13 @@
return false;
}
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
{
return 0U;
}
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -147,7 +148,7 @@
return false;
}
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
ffa_id_t receiver_id, ffa_id_t sender_id,
ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
bool is_bind, struct ffa_value *ret)
@@ -163,12 +164,12 @@
return false;
}
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
ffa_id_t caller_id, const struct vm *target)
{
(void)caller_id;
@@ -176,7 +177,7 @@
return 0;
}
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
{
(void)vm;
return false;
@@ -186,8 +187,8 @@
* Check validity of the calls:
* FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
*/
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+ ffa_id_t vm_id)
{
/*
* Call should only be used by the Hypervisor, so any attempt of
@@ -199,9 +200,8 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -209,9 +209,8 @@
return false;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id,
- ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+ ffa_notification_flags_t flags)
{
(void)flags;
(void)current;
@@ -219,7 +218,7 @@
return false;
}
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
ffa_notifications_bitmap_t *from_sp) // NOLINT
{
@@ -230,7 +229,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
struct vm_locked receiver_locked,
ffa_notifications_bitmap_t *from_fwk, // NOLINT
ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
@@ -243,11 +242,11 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id,
- ffa_notification_flags_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id,
+ ffa_notification_flags_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -258,8 +257,8 @@
return false;
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -267,26 +266,26 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-bool plat_ffa_vm_notifications_info_get( // NOLINTNEXTLINE
+bool ffa_vm_notifications_info_get( // NOLINTNEXTLINE
uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
uint32_t *lists_sizes, // NOLINTNEXTLINE
uint32_t *lists_count, const uint32_t ids_count_max)
@@ -300,13 +299,13 @@
return false;
}
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
return false;
}
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
{
(void)current;
return false;
@@ -315,9 +314,9 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
(void)current_locked;
(void)target_vm_id;
@@ -327,8 +326,8 @@
return true;
}
-void plat_ffa_notification_info_get_forward( // NOLINTNEXTLINE
- uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
+void ffa_notifications_info_get_forward( // NOLINTNEXTLINE
+ uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
uint32_t *lists_sizes, uint32_t *lists_count,
const uint32_t ids_count_max)
{
@@ -339,22 +338,22 @@
(void)ids_count_max;
}
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
{
(void)cpu;
}
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
{
(void)cpu;
}
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
{
(void)cpu;
}
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
@@ -381,9 +380,9 @@
return true;
}
-ffa_vm_count_t plat_ffa_partition_info_get_forward( // NOLINTNEXTLINE
- const struct ffa_uuid *uuid, // NOLINTNEXTLINE
- uint32_t flags, // NOLINTNEXTLINE
+ffa_vm_count_t ffa_setup_partition_info_get_forward( // NOLINTNEXTLINE
+ const struct ffa_uuid *uuid, // NOLINTNEXTLINE
+ uint32_t flags, // NOLINTNEXTLINE
struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
{
(void)uuid;
@@ -393,13 +392,13 @@
return vm_count;
}
-bool plat_ffa_is_secondary_ep_register_supported(void)
+bool ffa_setup_is_secondary_ep_register_supported(void)
{
return false;
}
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
(void)current_locked;
(void)next;
@@ -407,12 +406,11 @@
return (struct ffa_value){.func = FFA_INTERRUPT_32};
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_id_t vm_id,
- ffa_id_t receiver_vm_id,
- struct vcpu_locked receiver_locked,
- uint32_t func, // NOLINTNEXTLINE
- enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+ struct vcpu_locked current_locked, ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+ uint32_t func, // NOLINTNEXTLINE
+ enum vcpu_state *next_state)
{
/* Perform state transition checks only for Secure Partitions. */
(void)current_locked;
@@ -425,15 +423,15 @@
return true;
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
- struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+ struct vcpu_locked current_locked, struct vcpu_locked target_locked)
{
/* Scheduling mode not supported in the Hypervisor/VMs. */
(void)current_locked;
(void)target_locked;
}
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
@@ -443,7 +441,7 @@
(void)sender_vm_id;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
/* Calls chains not supported in the Hypervisor/VMs. */
@@ -457,14 +455,14 @@
return false;
}
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
- struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+ struct vm_locked vm_locked)
{
(void)current_locked;
(void)vm_locked;
}
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
struct vm *from, uint32_t share_func,
struct ffa_memory_region **memory_region, uint32_t length,
uint32_t fragment_length, struct mpool *page_pool)
@@ -479,7 +477,7 @@
return (struct ffa_value){0};
}
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
struct vm *to, ffa_memory_handle_t handle,
ffa_memory_region_flags_t flags, struct mpool *page_pool)
{
@@ -491,7 +489,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
struct vm *from, void *fragment, uint32_t fragment_length,
ffa_memory_handle_t handle, struct mpool *page_pool)
{
@@ -504,9 +502,9 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t size,
- struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
+ struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -517,10 +515,10 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
- struct vcpu **next,
- uint32_t timeout_low,
- uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+ struct vcpu **next,
+ uint32_t timeout_low,
+ uint32_t timeout_high)
{
(void)current_locked;
(void)next;
@@ -598,7 +596,7 @@
return true;
}
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
ffa_memory_attributes_t attributes, uint32_t mode)
{
(void)mode;
@@ -616,12 +614,12 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
{
return false;
}
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
{
(void)vm_locked;
}
@@ -642,7 +640,7 @@
return true;
}
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
{
(void)current_locked;
diff --git a/src/ffa/BUILD.gn b/src/ffa/BUILD.gn
index b43fba4..3b077bf 100644
--- a/src/ffa/BUILD.gn
+++ b/src/ffa/BUILD.gn
@@ -30,7 +30,6 @@
"hypervisor/indirect_messaging.c",
"hypervisor/interrupts.c",
"hypervisor/notifications.c",
- "hypervisor/power_management.c",
"hypervisor/setup_and_discovery.c",
"hypervisor/vm.c",
]
@@ -53,7 +52,6 @@
"spmc/indirect_messaging.c",
"spmc/interrupts.c",
"spmc/notifications.c",
- "spmc/power_management.c",
"spmc/setup_and_discovery.c",
"spmc/vm.c",
]
diff --git a/src/ffa/absent.c b/src/ffa/absent.c
index 9628b17..0cd9e1d 100644
--- a/src/ffa/absent.c
+++ b/src/ffa/absent.c
@@ -12,13 +12,13 @@
#include "hf/vcpu.h"
#include "hf/vm.h"
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
{
return (struct ffa_value){.func = FFA_ERROR_32,
.arg2 = FFA_NOT_SUPPORTED};
}
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
ffa_id_t caller_id, const struct vm *target)
{
(void)caller_id;
@@ -43,7 +43,7 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
+bool ffa_memory_is_send_valid(ffa_id_t receiver_vm_id, uint32_t share_func)
{
(void)receiver_vm_id;
(void)share_func;
@@ -54,9 +54,9 @@
/**
* Check validity of a FF-A direct message request.
*/
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -65,8 +65,9 @@
return false;
}
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
- struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+ struct vm *receiver_vm,
+ uint32_t func)
{
(void)sender_vm;
(void)receiver_vm;
@@ -78,9 +79,9 @@
/**
* Check validity of a FF-A direct message response.
*/
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
(void)current;
(void)sender_vm_id;
@@ -89,8 +90,8 @@
return false;
}
-bool plat_ffa_is_notifications_bitmap_access_valid(struct vcpu *current,
- ffa_id_t vm_id)
+bool ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+ ffa_id_t vm_id)
{
(void)current;
(void)vm_id;
@@ -98,24 +99,24 @@
return false;
}
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
{
(void)to_destroy_locked;
}
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
- struct ffa_value args,
- struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+ struct ffa_value args,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)args;
@@ -124,8 +125,8 @@
return false;
}
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
- struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+ struct ffa_value *ret)
{
(void)vm_locked;
(void)ret;
@@ -133,8 +134,8 @@
return false;
}
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
- struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+ struct ffa_value *ret)
{
(void)to_locked;
(void)ret;
@@ -142,8 +143,8 @@
return false;
}
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
- struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+ struct vm_locked receiver_locked)
{
(void)sender_locked;
(void)receiver_locked;
@@ -151,8 +152,9 @@
return false;
}
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
- struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+ ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
(void)receiver_vm_id;
(void)sender_vm_id;
@@ -161,14 +163,13 @@
return false;
}
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
{
return 0U;
}
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -176,7 +177,7 @@
return false;
}
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
ffa_id_t receiver_id, ffa_id_t sender_id,
ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
bool is_bind, struct ffa_value *ret)
@@ -192,9 +193,8 @@
return false;
}
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
(void)current;
(void)sender_id;
@@ -202,9 +202,8 @@
return false;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id,
- ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+ ffa_notification_flags_t flags)
{
(void)flags;
(void)current;
@@ -212,7 +211,7 @@
return false;
}
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
const ffa_notifications_bitmap_t *from_sp)
{
@@ -223,7 +222,7 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
struct vm_locked receiver_locked, // NOLINTNEXTLINE
ffa_notifications_bitmap_t *from_fwk, ffa_notification_flags_t flags)
{
@@ -234,11 +233,11 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id,
- ffa_notification_flags_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id,
+ ffa_notification_flags_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -249,8 +248,8 @@
return false;
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -258,8 +257,8 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -267,27 +266,27 @@
return false;
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
{
(void)vm_id;
return (struct vm_locked){.vm = NULL};
}
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *ret)
{
(void)vm_id;
(void)vcpu_idx;
@@ -296,11 +295,11 @@
return false;
}
-bool plat_ffa_vm_notifications_info_get(const uint16_t *ids,
- const uint32_t *ids_count,
- const uint32_t *lists_sizes,
- const uint32_t *lists_count,
- const uint32_t ids_count_max)
+bool ffa_vm_notifications_info_get(const uint16_t *ids,
+ const uint32_t *ids_count,
+ const uint32_t *lists_sizes,
+ const uint32_t *lists_count,
+ const uint32_t ids_count_max)
{
(void)ids;
(void)ids_count;
@@ -311,13 +310,13 @@
return false;
}
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
return false;
}
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
{
(void)current;
return false;
@@ -326,9 +325,9 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
(void)current_locked;
(void)target_vm_id;
@@ -338,9 +337,9 @@
return true;
}
-struct ffa_value plat_ffa_handle_secure_interrupt(struct vcpu *current,
- struct vcpu **next,
- bool from_normal_world)
+struct ffa_value ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+ struct vcpu **next,
+ bool from_normal_world)
{
(void)current;
(void)next;
@@ -354,11 +353,11 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-void plat_ffa_notification_info_get_forward(const uint16_t *ids,
- const uint32_t *ids_count,
- const uint32_t *lists_sizes,
- const uint32_t *lists_count,
- const uint32_t ids_count_max)
+void ffa_notifications_info_get_forward(const uint16_t *ids,
+ const uint32_t *ids_count,
+ const uint32_t *lists_sizes,
+ const uint32_t *lists_count,
+ const uint32_t ids_count_max)
{
(void)ids;
(void)ids_count;
@@ -367,11 +366,11 @@
(void)ids_count_max;
}
-void plat_ffa_sri_init(void)
+void ffa_notifications_sri_init(void)
{
}
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
@@ -382,9 +381,9 @@
return false;
}
-bool plat_ffa_partition_info_get_forward(const struct ffa_uuid *uuid,
- const ffa_notification_flags_t flags,
- const ffa_vm_count_t *ret_count)
+bool ffa_setup_partition_info_get_forward(const struct ffa_uuid *uuid,
+ const ffa_notification_flags_t flags,
+ const ffa_vm_count_t *ret_count)
{
(void)uuid;
(void)flags;
@@ -410,11 +409,11 @@
return true;
}
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
- paddr_t fdt_addr,
- size_t fdt_allocated_size,
- const struct manifest_vm *manifest_vm,
- struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr,
+ size_t fdt_allocated_size,
+ const struct manifest_vm *manifest_vm,
+ struct mpool *ppool)
{
(void)stage1_locked;
(void)fdt_addr;
@@ -423,8 +422,8 @@
(void)ppool;
}
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
(void)current_locked;
(void)next;
@@ -432,12 +431,11 @@
return (struct ffa_value){.func = FFA_INTERRUPT_32};
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_id_t vm_id,
- ffa_id_t receiver_vm_id,
- struct vcpu_locked receiver_locked,
- uint32_t func, // NOLINTNEXTLINE
- enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+ struct vcpu_locked current_locked, ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+ uint32_t func, // NOLINTNEXTLINE
+ enum vcpu_state *next_state)
{
(void)current_locked;
(void)vm_id;
@@ -449,14 +447,14 @@
return true;
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
- struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+ struct vcpu_locked current_locked, struct vcpu_locked target_locked)
{
(void)current_locked;
(void)target_locked;
}
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
@@ -471,21 +469,21 @@
return false;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
(void)current_locked;
(void)next_locked;
}
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
- struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+ struct vm_locked vm_locked)
{
(void)current_locked;
(void)vm_locked;
}
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
struct vm *from, struct ffa_memory_region *memory_region,
uint32_t length, uint32_t fragment_length, struct mpool *page_pool)
{
@@ -498,7 +496,7 @@
return (struct ffa_value){.func = FFA_ERROR_32};
}
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
struct vm *to, ffa_memory_handle_t handle,
ffa_memory_region_flags_t flags, struct mpool *page_pool)
{
@@ -510,7 +508,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
struct vm *from, void *fragment, uint32_t fragment_length,
ffa_memory_handle_t handle, struct mpool *page_pool)
{
@@ -523,9 +521,9 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t size,
- struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
+ struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -536,10 +534,10 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_yield_prepare(struct vcpu current_locked,
- struct vcpu **next,
- uint32_t timeout_low,
- uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu current_locked,
+ struct vcpu **next,
+ uint32_t timeout_low,
+ uint32_t timeout_high)
{
(void)current_locked;
(void)next;
@@ -559,8 +557,8 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_msg_recv(bool block, struct vcpu *current,
- struct vcpu **next)
+struct ffa_value ffa_indirect_msg_recv(bool block, struct vcpu *current,
+ struct vcpu **next)
{
(void)block;
(void)current;
@@ -569,17 +567,17 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
{
return false;
}
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
{
return api_interrupt_get(current_locked);
}
diff --git a/src/ffa/hypervisor.c b/src/ffa/hypervisor.c
index 20c6fbe..d79d14b 100644
--- a/src/ffa/hypervisor.c
+++ b/src/ffa/hypervisor.c
@@ -94,7 +94,7 @@
* perspective and vice-versa.
*/
dlog_verbose("Setting up buffers for TEE.\n");
- plat_ffa_rxtx_map_spmc(
+ ffa_setup_rxtx_map_spmc(
pa_from_va(va_from_ptr(other_world_vm->mailbox.recv)),
pa_from_va(va_from_ptr(other_world_vm->mailbox.send)),
HF_MAILBOX_SIZE / FFA_PAGE_SIZE);
@@ -120,9 +120,9 @@
dlog_verbose("TEE finished setting up buffers.\n");
}
-bool plat_ffa_intercept_call(struct vcpu_locked current_locked,
- struct vcpu_locked next_locked,
- struct ffa_value *signal_interrupt)
+bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
+ struct vcpu_locked next_locked,
+ struct ffa_value *signal_interrupt)
{
(void)current_locked;
(void)next_locked;
@@ -200,9 +200,9 @@
* If the recipient's receive buffer is busy, it can optionally register the
* caller to be notified when the recipient's receive buffer becomes available.
*/
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t size,
- struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
+ struct vcpu *current, struct vcpu **next)
{
struct vm *from = current->vm;
struct vm *to;
diff --git a/src/ffa/hypervisor/cpu_cycles.c b/src/ffa/hypervisor/cpu_cycles.c
index 667e0e9..af54f19 100644
--- a/src/ffa/hypervisor/cpu_cycles.c
+++ b/src/ffa/hypervisor/cpu_cycles.c
@@ -12,8 +12,8 @@
#include "hf/ffa/indirect_messaging.h"
#include "hf/vcpu.h"
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *ret)
{
/*
* VM's requests should be forwarded to the SPMC, if target is an SP.
@@ -30,9 +30,9 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
(void)next;
(void)vcpu_idx;
@@ -56,18 +56,16 @@
* to be compliant with version v1.0 of the FF-A specification. It serves as
* a blocking call.
*/
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
- return plat_ffa_msg_recv(true, current_locked, next);
+ return ffa_indirect_msg_recv(true, current_locked, next);
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_id_t vm_id,
- ffa_id_t receiver_vm_id,
- struct vcpu_locked receiver_locked,
- uint32_t func,
- enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+ struct vcpu_locked current_locked, ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id, struct vcpu_locked receiver_locked,
+ uint32_t func, enum vcpu_state *next_state)
{
(void)current_locked;
(void)vm_id;
@@ -95,8 +93,8 @@
}
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
- struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+ struct vcpu_locked current_locked, struct vcpu_locked target_locked)
{
/* Scheduling mode not supported in the Hypervisor/VMs. */
(void)current_locked;
@@ -107,10 +105,10 @@
* Prepare to yield execution back to the VM that allocated cpu cycles and move
* to BLOCKED state.
*/
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
- struct vcpu **next,
- uint32_t timeout_low,
- uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+ struct vcpu **next,
+ uint32_t timeout_low,
+ uint32_t timeout_high)
{
struct vcpu *current = current_locked.vcpu;
struct ffa_value ret = {
diff --git a/src/ffa/hypervisor/direct_messaging.c b/src/ffa/hypervisor/direct_messaging.c
index 2b78f4b..c5b5292 100644
--- a/src/ffa/hypervisor/direct_messaging.c
+++ b/src/ffa/hypervisor/direct_messaging.c
@@ -18,9 +18,9 @@
/**
* Check validity of a FF-A direct message request.
*/
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -33,8 +33,9 @@
sender_vm_id == current_vm_id && vm_is_primary(current->vm);
}
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
- struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+ struct vm *receiver_vm,
+ uint32_t func)
{
(void)sender_vm;
(void)receiver_vm;
@@ -50,9 +51,9 @@
/**
* Check validity of a FF-A direct message response.
*/
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -65,9 +66,9 @@
receiver_vm_id == HF_PRIMARY_VM_ID;
}
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
- struct ffa_value args,
- struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+ struct ffa_value args,
+ struct ffa_value *ret)
{
if (!plat_ffa_is_tee_enabled()) {
dlog_verbose("Not forwarding: ffa_tee_enabled is false\n");
@@ -101,7 +102,7 @@
return true;
}
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
@@ -111,7 +112,7 @@
(void)sender_vm_id;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
/* Calls chains not supported in the Hypervisor/VMs. */
diff --git a/src/ffa/hypervisor/ffa_memory.c b/src/ffa/hypervisor/ffa_memory.c
index dfc5985..84cbcd4 100644
--- a/src/ffa/hypervisor/ffa_memory.c
+++ b/src/ffa/hypervisor/ffa_memory.c
@@ -18,7 +18,7 @@
#include "hypervisor.h"
#include "sysregs.h"
-enum ffa_memory_handle_allocator plat_ffa_memory_handle_allocator(void)
+enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
{
return FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
}
@@ -37,8 +37,8 @@
/**
* Check validity of the FF-A memory send function attempt.
*/
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
- uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t share_func, bool multiple_borrower)
{
/*
* Currently memory interfaces are not forwarded from hypervisor to
@@ -54,18 +54,18 @@
return true;
}
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
{
return 0U;
}
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
{
(void)current;
return has_vhe_support();
}
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
{
(void)current;
return has_vhe_support();
@@ -263,7 +263,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
struct vm *from, uint32_t share_func,
struct ffa_memory_region **memory_region, uint32_t length,
uint32_t fragment_length, struct mpool *page_pool)
@@ -417,7 +417,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
struct vm *to, ffa_memory_handle_t handle,
ffa_memory_region_flags_t flags, struct mpool *page_pool)
{
@@ -655,7 +655,7 @@
return ret;
}
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
struct vm *from, void *fragment, uint32_t fragment_length,
ffa_memory_handle_t handle, struct mpool *page_pool)
{
@@ -684,7 +684,7 @@
return ret;
}
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
ffa_memory_attributes_t attributes, uint32_t mode)
{
(void)mode;
diff --git a/src/ffa/hypervisor/indirect_messaging.c b/src/ffa/hypervisor/indirect_messaging.c
index 0fd2b6c..fb11f9f 100644
--- a/src/ffa/hypervisor/indirect_messaging.c
+++ b/src/ffa/hypervisor/indirect_messaging.c
@@ -14,8 +14,8 @@
#include "hf/ffa_internal.h"
#include "hf/vm.h"
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
- struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+ struct vm_locked receiver_locked)
{
(void)sender_locked;
(void)receiver_locked;
@@ -27,8 +27,9 @@
return true;
}
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
- struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+ ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
/* FFA_MSG_SEND2 is forwarded to SPMC when the receiver is an SP. */
if (vm_id_is_current_world(receiver_vm_id)) {
@@ -59,7 +60,7 @@
* Checks whether the vCPU's attempt to wait for a message has already been
* interrupted or whether it is allowed to block.
*/
-static bool plat_ffa_msg_recv_block_interrupted(
+static bool ffa_indirect_msg_recv_block_interrupted(
struct vcpu_locked current_locked)
{
bool interrupted;
@@ -98,9 +99,9 @@
*
* No new messages can be received until the mailbox has been cleared.
*/
-struct ffa_value plat_ffa_msg_recv(bool block,
- struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_indirect_msg_recv(bool block,
+ struct vcpu_locked current_locked,
+ struct vcpu **next)
{
struct vm *vm = current_locked.vcpu->vm;
struct vcpu *current = current_locked.vcpu;
@@ -143,7 +144,7 @@
* that time to FFA_SUCCESS.
*/
return_code = ffa_error(FFA_INTERRUPTED);
- if (plat_ffa_msg_recv_block_interrupted(current_locked)) {
+ if (ffa_indirect_msg_recv_block_interrupted(current_locked)) {
goto out;
}
diff --git a/src/ffa/hypervisor/interrupts.c b/src/ffa/hypervisor/interrupts.c
index 512cc57..21d89ed 100644
--- a/src/ffa/hypervisor/interrupts.c
+++ b/src/ffa/hypervisor/interrupts.c
@@ -10,7 +10,8 @@
#include "hf/check.h"
#include "hf/vm.h"
-void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next)
+void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+ struct vcpu **next)
{
(void)current;
(void)next;
@@ -22,7 +23,7 @@
CHECK(false);
}
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
@@ -36,8 +37,8 @@
/**
* Enable relevant virtual interrupts for VMs.
*/
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
- struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+ struct vm_locked vm_locked)
{
struct vcpu *current;
struct interrupts *interrupts;
@@ -51,7 +52,7 @@
}
}
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
{
return api_interrupt_get(current_locked);
}
diff --git a/src/ffa/hypervisor/notifications.c b/src/ffa/hypervisor/notifications.c
index 69ea010..f03f378 100644
--- a/src/ffa/hypervisor/notifications.c
+++ b/src/ffa/hypervisor/notifications.c
@@ -20,8 +20,8 @@
* Check validity of the calls:
* FFA_NOTIFICATION_BITMAP_CREATE/FFA_NOTIFICATION_BITMAP_DESTROY.
*/
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+ ffa_id_t vm_id)
{
/*
* Call should only be used by the Hypervisor, so any attempt of
@@ -33,16 +33,15 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
/** If Hafnium is hypervisor, receiver needs to be current vm. */
return sender_id != receiver_id && current_vm_id == receiver_id;
}
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
ffa_id_t receiver_id, ffa_id_t sender_id,
ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
bool is_bind, struct ffa_value *ret)
@@ -66,9 +65,8 @@
return false;
}
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -76,10 +74,10 @@
return sender_id == current_vm_id && sender_id != receiver_id;
}
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
{
/* Forward only if receiver is an SP. */
if (vm_id_is_current_world(receiver_vm_id)) {
@@ -103,9 +101,8 @@
return true;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id,
- ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+ ffa_notification_flags_t flags)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -115,8 +112,8 @@
return (current_vm_id == receiver_id);
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -124,15 +121,15 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
(void)vm_id;
return ffa_error(FFA_NOT_SUPPORTED);
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
struct ffa_value ret;
@@ -155,10 +152,10 @@
return true;
}
-void plat_ffa_notification_info_get_forward(uint16_t *ids, uint32_t *ids_count,
- uint32_t *lists_sizes,
- uint32_t *lists_count,
- const uint32_t ids_count_max)
+void ffa_notifications_info_get_forward(uint16_t *ids, uint32_t *ids_count,
+ uint32_t *lists_sizes,
+ uint32_t *lists_count,
+ const uint32_t ids_count_max)
{
CHECK(ids != NULL);
CHECK(ids_count != NULL);
@@ -228,7 +225,7 @@
sizeof(ret.arg3) * FFA_NOTIFICATIONS_INFO_GET_REGS_RET);
}
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
ffa_notifications_bitmap_t *from_sp)
{
@@ -252,7 +249,7 @@
return ret;
}
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
{
@@ -292,12 +289,12 @@
* intrastructure that encompasses the NWd, and we are not interested in testing
* the flow of notifications between VMs only.
*/
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
{
(void)cpu;
}
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
{
(void)cpu;
}
@@ -306,7 +303,7 @@
* Track that in current CPU there was a notification set with delay SRI
* flag.
*/
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
{
(void)cpu;
}
diff --git a/src/ffa/hypervisor/power_management.c b/src/ffa/hypervisor/power_management.c
deleted file mode 100644
index e1e81b2..0000000
--- a/src/ffa/hypervisor/power_management.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2024 The Hafnium Authors.
- *
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file or at
- * https://opensource.org/licenses/BSD-3-Clause.
- */
-
-#include "hf/ffa/power_management.h"
-
-/**
- * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
- * non-secure FF-A instances.
- */
-bool plat_ffa_is_secondary_ep_register_supported(void)
-{
- return false;
-}
diff --git a/src/ffa/hypervisor/setup_and_discovery.c b/src/ffa/hypervisor/setup_and_discovery.c
index da9ebf7..74afa60 100644
--- a/src/ffa/hypervisor/setup_and_discovery.c
+++ b/src/ffa/hypervisor/setup_and_discovery.c
@@ -10,7 +10,7 @@
#include "hf/arch/other_world.h"
-#include "hf/ffa.h"
+#include "hf/check.h"
#include "hf/ffa/vm.h"
#include "hf/manifest.h"
#include "hf/vm.h"
@@ -18,7 +18,7 @@
#include "hypervisor.h"
#include "smc.h"
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
{
if (plat_ffa_is_tee_enabled()) {
/*
@@ -35,7 +35,16 @@
.arg2 = FFA_NOT_SUPPORTED};
}
-void plat_ffa_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
+/**
+ * Returns FFA_ERROR as FFA_SECONDARY_EP_REGISTER is not supported at the
+ * non-secure FF-A instances.
+ */
+bool ffa_setup_is_secondary_ep_register_supported(void)
+{
+ return false;
+}
+
+void ffa_setup_rxtx_map_spmc(paddr_t recv, paddr_t send, uint64_t page_count)
{
struct ffa_value ret;
@@ -46,7 +55,7 @@
CHECK(ret.func == FFA_SUCCESS_32);
}
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
{
struct vm *vm = vm_locked.vm;
struct vm *other_world;
@@ -56,7 +65,7 @@
return;
}
- if (!plat_ffa_vm_supports_indirect_messages(vm)) {
+ if (!ffa_vm_supports_indirect_messages(vm)) {
return;
}
@@ -71,14 +80,14 @@
vm->id, (uintptr_t)vm->mailbox.recv,
(uintptr_t)vm->mailbox.send);
- plat_ffa_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
+ ffa_setup_rxtx_map_spmc(pa_init(0), pa_init(0), 0);
vm_locked.vm->mailbox.state = MAILBOX_STATE_OTHER_WORLD_OWNED;
dlog_verbose("Mailbox of %x owned by SPMC.\n", vm_locked.vm->id);
}
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
{
struct ffa_value ret;
uint64_t func;
@@ -92,7 +101,7 @@
return;
}
- if (!plat_ffa_vm_supports_indirect_messages(vm_locked.vm)) {
+ if (!ffa_vm_supports_indirect_messages(vm_locked.vm)) {
return;
}
@@ -112,7 +121,7 @@
}
}
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
{
/*
* Allow forwarding from the Hypervisor if TEE or SPMC exists and
@@ -125,7 +134,7 @@
* Forward helper for FFA_PARTITION_INFO_GET.
* Emits FFA_PARTITION_INFO_GET from Hypervisor to SPMC if allowed.
*/
-ffa_vm_count_t plat_ffa_partition_info_get_forward(
+ffa_vm_count_t ffa_setup_partition_info_get_forward(
const struct ffa_uuid *uuid, uint32_t flags,
struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
{
@@ -185,12 +194,12 @@
return vm_count;
}
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
- paddr_t fdt_addr,
- size_t fdt_allocated_size,
- const struct manifest_vm *manifest_vm,
- const struct boot_params *boot_params,
- struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr,
+ size_t fdt_allocated_size,
+ const struct manifest_vm *manifest_vm,
+ const struct boot_params *boot_params,
+ struct mpool *ppool)
{
struct fdt partition_fdt;
@@ -214,7 +223,7 @@
pa_add(fdt_addr, fdt_allocated_size), ppool) == true);
}
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
ffa_id_t caller_id, const struct vm *target)
{
ffa_partition_properties_t result = target->messaging_method;
@@ -234,14 +243,14 @@
return result;
}
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
- struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+ struct ffa_value *ret)
{
struct vm *vm = vm_locked.vm;
ffa_id_t vm_id = vm->id;
if (!plat_ffa_is_tee_enabled() ||
- !plat_ffa_vm_supports_indirect_messages(vm)) {
+ !ffa_vm_supports_indirect_messages(vm)) {
return false;
}
@@ -274,8 +283,8 @@
*
* Returns true if the ownership belongs to the hypervisor.
*/
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
- struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+ struct ffa_value *ret)
{
struct ffa_value other_world_ret;
@@ -286,7 +295,7 @@
* - If the mailbox ownership hasn't been transferred to the SPMC.
*/
if (!plat_ffa_is_tee_enabled() ||
- !plat_ffa_vm_supports_indirect_messages(to_locked.vm) ||
+ !ffa_vm_supports_indirect_messages(to_locked.vm) ||
to_locked.vm->mailbox.state != MAILBOX_STATE_OTHER_WORLD_OWNED) {
return true;
}
diff --git a/src/ffa/hypervisor/vm.c b/src/ffa/hypervisor/vm.c
index 943ae66..08b1cff 100644
--- a/src/ffa/hypervisor/vm.c
+++ b/src/ffa/hypervisor/vm.c
@@ -8,20 +8,20 @@
#include "hf/vm.h"
-bool plat_ffa_vm_supports_indirect_messages(struct vm *vm)
+bool ffa_vm_supports_indirect_messages(struct vm *vm)
{
return vm->ffa_version >= FFA_VERSION_1_1 &&
vm_supports_messaging_method(vm, FFA_PARTITION_INDIRECT_MSG);
}
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
{
(void)vm;
return false;
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
{
if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
return vm_find_locked(vm_id);
@@ -30,12 +30,12 @@
return (struct vm_locked){.vm = NULL};
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
{
- return plat_ffa_vm_find_locked(vm_id);
+ return ffa_vm_find_locked(vm_id);
}
-bool plat_ffa_vm_notifications_info_get( // NOLINTNEXTLINE
+bool ffa_vm_notifications_info_get( // NOLINTNEXTLINE
uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
uint32_t *lists_sizes, // NOLINTNEXTLINE
uint32_t *lists_count, const uint32_t ids_count_max)
@@ -49,13 +49,13 @@
return false;
}
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
{
/* Hypervisor never frees VM structs. */
(void)to_destroy_locked;
}
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
{
(void)vm_locked;
}
diff --git a/src/ffa/spmc.c b/src/ffa/spmc.c
index ba59981..1b31a1a 100644
--- a/src/ffa/spmc.c
+++ b/src/ffa/spmc.c
@@ -38,7 +38,7 @@
void plat_ffa_init(struct mpool *ppool)
{
arch_ffa_init();
- plat_ffa_vm_init(ppool);
+ ffa_vm_init(ppool);
}
static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
@@ -252,12 +252,10 @@
* the current vcpu would transition upon the FF-A ABI invocation as determined
* by the Partition runtime model.
*/
-bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
- ffa_id_t vm_id,
- ffa_id_t receiver_vm_id,
- struct vcpu_locked locked_vcpu,
- uint32_t func,
- enum vcpu_state *next_state)
+bool ffa_cpu_cycles_check_runtime_state_transition(
+ struct vcpu_locked current_locked, ffa_id_t vm_id,
+ ffa_id_t receiver_vm_id, struct vcpu_locked locked_vcpu, uint32_t func,
+ enum vcpu_state *next_state)
{
bool allowed = false;
struct vcpu *current = current_locked.vcpu;
@@ -421,9 +419,9 @@
target_vcpu->requires_deactivate_call = false;
}
-bool plat_ffa_intercept_call(struct vcpu_locked current_locked,
- struct vcpu_locked next_locked,
- struct ffa_value *signal_interrupt)
+bool ffa_interrupts_intercept_call(struct vcpu_locked current_locked,
+ struct vcpu_locked next_locked,
+ struct ffa_value *signal_interrupt)
{
uint32_t intid;
@@ -462,7 +460,7 @@
* invocation of FFA_MSG_SEND_DIRECT_REQ or FFA_MSG_SEND_DIRECT_REQ2 (FF-A v1.2)
* ABI.
*/
-void plat_ffa_wind_call_chain_ffa_direct_req(
+void ffa_direct_msg_wind_call_chain_ffa_direct_req(
struct vcpu_locked current_locked,
struct vcpu_locked receiver_vcpu_locked, ffa_id_t sender_vm_id)
{
@@ -498,7 +496,7 @@
* we need to return other world's id so that the SPMC can
* return to the SPMD.
*/
-void plat_ffa_unwind_call_chain_ffa_direct_resp(
+void ffa_direct_msg_unwind_call_chain_ffa_direct_resp(
struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
struct vcpu *next = next_locked.vcpu;
@@ -521,9 +519,9 @@
}
}
-struct ffa_value plat_ffa_msg_send(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id, uint32_t size,
- struct vcpu *current, struct vcpu **next)
+struct ffa_value ffa_indirect_msg_send(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id, uint32_t size,
+ struct vcpu *current, struct vcpu **next)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -560,7 +558,7 @@
atomic_store_explicit(¤t->vm->aborting, true,
memory_order_relaxed);
- plat_ffa_free_vm_resources(vm_locked);
+ ffa_vm_free_resources(vm_locked);
if (sp_boot_next(current_locked, next)) {
goto out;
diff --git a/src/ffa/spmc/cpu_cycles.c b/src/ffa/spmc/cpu_cycles.c
index 0d1a99f..db152db 100644
--- a/src/ffa/spmc/cpu_cycles.c
+++ b/src/ffa/spmc/cpu_cycles.c
@@ -18,8 +18,8 @@
void plat_ffa_vcpu_allow_interrupts(struct vcpu *current);
bool sp_boot_next(struct vcpu_locked current_locked, struct vcpu **next);
-bool plat_ffa_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *ret)
+bool ffa_cpu_cycles_run_forward(ffa_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *ret)
{
(void)vm_id;
(void)vcpu_idx;
@@ -31,9 +31,9 @@
/**
* Check if current VM can resume target VM using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu_locked current_locked,
- ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
- struct ffa_value *run_ret, struct vcpu **next)
+bool ffa_cpu_cycles_run_checks(struct vcpu_locked current_locked,
+ ffa_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
/*
* Under the Partition runtime model specified in FF-A v1.1-Beta0 spec,
@@ -299,8 +299,8 @@
* vCPU. Intercept call will set `ret` to FFA_INTERRUPT and the
* respective interrupt id.
*/
- if (plat_ffa_intercept_call(both_vcpu_locks.vcpu1,
- both_vcpu_locks.vcpu2, ffa_ret)) {
+ if (ffa_interrupts_intercept_call(both_vcpu_locks.vcpu1,
+ both_vcpu_locks.vcpu2, ffa_ret)) {
*next = NULL;
ret = true;
}
@@ -316,8 +316,8 @@
* from RUNNING to WAITING for the following Partition runtime models:
* RTM_FFA_RUN, RTM_SEC_INTERRUPT, RTM_SP_INIT.
*/
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
- struct vcpu **next)
+struct ffa_value ffa_cpu_cycles_msg_wait_prepare(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
struct ffa_value ret = api_ffa_interrupt_return(0);
struct vcpu *current = current_locked.vcpu;
@@ -417,8 +417,8 @@
* Initialize the scheduling mode and/or Partition Runtime model of the target
* SP upon being resumed by an FFA_RUN ABI.
*/
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
- struct vcpu_locked target_locked)
+void ffa_cpu_cycles_init_schedule_mode_ffa_runeld_prepare(
+ struct vcpu_locked current_locked, struct vcpu_locked target_locked)
{
struct vcpu *vcpu = target_locked.vcpu;
struct vcpu *current = current_locked.vcpu;
@@ -454,10 +454,10 @@
* execution context by the SPMC to handle secure virtual interrupt, then
* FFA_YIELD invocation is essentially a no-op.
*/
-struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
- struct vcpu **next,
- uint32_t timeout_low,
- uint32_t timeout_high)
+struct ffa_value ffa_cpu_cycles_yield_prepare(struct vcpu_locked current_locked,
+ struct vcpu **next,
+ uint32_t timeout_low,
+ uint32_t timeout_high)
{
struct ffa_value ret_args = (struct ffa_value){.func = FFA_SUCCESS_32};
struct vcpu *current = current_locked.vcpu;
diff --git a/src/ffa/spmc/direct_messaging.c b/src/ffa/spmc/direct_messaging.c
index a06594f..f76987b 100644
--- a/src/ffa/spmc/direct_messaging.c
+++ b/src/ffa/spmc/direct_messaging.c
@@ -11,9 +11,9 @@
#include "hf/ffa.h"
#include "hf/vm.h"
-bool plat_ffa_is_direct_request_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_request_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -36,8 +36,9 @@
* sender supports sending direct messaging requests, in accordance to their
* respective configurations at the partition's FF-A manifest.
*/
-bool plat_ffa_is_direct_request_supported(struct vm *sender_vm,
- struct vm *receiver_vm, uint32_t func)
+bool ffa_direct_msg_is_direct_request_supported(struct vm *sender_vm,
+ struct vm *receiver_vm,
+ uint32_t func)
{
uint16_t sender_method;
uint16_t receiver_method;
@@ -92,9 +93,9 @@
}
/** Check validity of a FF-A direct message response. */
-bool plat_ffa_is_direct_response_valid(struct vcpu *current,
- ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id)
+bool ffa_direct_msg_is_direct_response_valid(struct vcpu *current,
+ ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -107,9 +108,9 @@
vm_id_is_current_world(sender_vm_id);
}
-bool plat_ffa_direct_request_forward(ffa_id_t receiver_vm_id,
- struct ffa_value args,
- struct ffa_value *ret)
+bool ffa_direct_msg_direct_request_forward(ffa_id_t receiver_vm_id,
+ struct ffa_value args,
+ struct ffa_value *ret)
{
/*
* SPs are not supposed to issue requests to VMs.
diff --git a/src/ffa/spmc/ffa_memory.c b/src/ffa/spmc/ffa_memory.c
index 1532727..1043569 100644
--- a/src/ffa/spmc/ffa_memory.c
+++ b/src/ffa/spmc/ffa_memory.c
@@ -16,14 +16,14 @@
#include "sysregs.h"
-enum ffa_memory_handle_allocator plat_ffa_memory_handle_allocator(void)
+enum ffa_memory_handle_allocator ffa_memory_get_handle_allocator(void)
{
return FFA_MEMORY_HANDLE_ALLOCATOR_SPMC;
}
/** Check validity of the FF-A memory send function attempt. */
-bool plat_ffa_is_memory_send_valid(ffa_id_t receiver, ffa_id_t sender,
- uint32_t share_func, bool multiple_borrower)
+bool ffa_memory_is_send_valid(ffa_id_t receiver, ffa_id_t sender,
+ uint32_t share_func, bool multiple_borrower)
{
const bool is_receiver_sp = vm_id_is_current_world(receiver);
const bool is_sender_sp = vm_id_is_current_world(sender);
@@ -62,24 +62,24 @@
}
}
-uint32_t plat_ffa_other_world_mode(void)
+uint32_t ffa_memory_get_other_world_mode(void)
{
return MM_MODE_NS;
}
-bool plat_ffa_is_mem_perm_get_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_get_valid(const struct vcpu *current)
{
/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
}
-bool plat_ffa_is_mem_perm_set_valid(const struct vcpu *current)
+bool ffa_memory_is_mem_perm_set_valid(const struct vcpu *current)
{
/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
return has_vhe_support() && (current->rt_model == RTM_SP_INIT);
}
-struct ffa_value plat_ffa_other_world_mem_send(
+struct ffa_value ffa_memory_other_world_mem_send(
struct vm *from, uint32_t share_func,
struct ffa_memory_region **memory_region, uint32_t length,
uint32_t fragment_length, struct mpool *page_pool)
@@ -104,7 +104,7 @@
* SPMC handles its memory share requests internally, so no forwarding of the
* request is required.
*/
-struct ffa_value plat_ffa_other_world_mem_reclaim(
+struct ffa_value ffa_memory_other_world_mem_reclaim(
struct vm *to, ffa_memory_handle_t handle,
ffa_memory_region_flags_t flags, struct mpool *page_pool)
{
@@ -117,7 +117,7 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-struct ffa_value plat_ffa_other_world_mem_send_continue(
+struct ffa_value ffa_memory_other_world_mem_send_continue(
struct vm *from, void *fragment, uint32_t fragment_length,
ffa_memory_handle_t handle, struct mpool *page_pool)
{
@@ -134,7 +134,7 @@
* Update the memory region attributes with the security state bit based on the
* supplied mode.
*/
-ffa_memory_attributes_t plat_ffa_memory_add_security_bit_from_mode(
+ffa_memory_attributes_t ffa_memory_add_security_bit_from_mode(
ffa_memory_attributes_t attributes, uint32_t mode)
{
ffa_memory_attributes_t ret = attributes;
diff --git a/src/ffa/spmc/indirect_messaging.c b/src/ffa/spmc/indirect_messaging.c
index 5012424..0794b21 100644
--- a/src/ffa/spmc/indirect_messaging.c
+++ b/src/ffa/spmc/indirect_messaging.c
@@ -15,8 +15,8 @@
* to their configurations in the respective partition's FF-A manifest.
* Note: check is done at virtual FF-A instance only.
*/
-bool plat_ffa_is_indirect_msg_supported(struct vm_locked sender_locked,
- struct vm_locked receiver_locked)
+bool ffa_indirect_msg_is_supported(struct vm_locked sender_locked,
+ struct vm_locked receiver_locked)
{
struct vm *sender_vm = sender_locked.vm;
struct vm *receiver_vm = receiver_locked.vm;
@@ -60,8 +60,9 @@
return true;
}
-bool plat_ffa_msg_send2_forward(ffa_id_t receiver_vm_id, ffa_id_t sender_vm_id,
- struct ffa_value *ret)
+bool ffa_indirect_msg_send2_forward(ffa_id_t receiver_vm_id,
+ ffa_id_t sender_vm_id,
+ struct ffa_value *ret)
{
/* SPMC never needs to forward a FFA_MSG_SEND2, it always handles it. */
(void)receiver_vm_id;
diff --git a/src/ffa/spmc/interrupts.c b/src/ffa/spmc/interrupts.c
index 729c5cc..e06d20a 100644
--- a/src/ffa/spmc/interrupts.c
+++ b/src/ffa/spmc/interrupts.c
@@ -23,8 +23,8 @@
*
* Returns 0 on success, or -1 otherwise.
*/
-int64_t plat_ffa_interrupt_deactivate(uint32_t pint_id, uint32_t vint_id,
- struct vcpu *current)
+int64_t ffa_interrupts_deactivate(uint32_t pint_id, uint32_t vint_id,
+ struct vcpu *current)
{
struct vcpu_locked current_locked;
uint32_t int_id;
@@ -478,7 +478,8 @@
* execution is trapped into EL3. SPMD then routes the interrupt to SPMC
* through FFA_INTERRUPT_32 ABI synchronously using eret conduit.
*/
-void plat_ffa_handle_secure_interrupt(struct vcpu *current, struct vcpu **next)
+void ffa_interrupts_handle_secure_interrupt(struct vcpu *current,
+ struct vcpu **next)
{
struct vcpu *target_vcpu;
struct vcpu_locked target_vcpu_locked =
@@ -552,7 +553,7 @@
memory_order_relaxed)) {
/* Clear fields corresponding to secure interrupt handling. */
vcpu_secure_interrupt_complete(target_vcpu_locked);
- plat_ffa_disable_vm_interrupts(target_vm_locked);
+ ffa_vm_disable_interrupts(target_vm_locked);
/* Resume current vCPU. */
*next = NULL;
@@ -600,7 +601,7 @@
vm_unlock(&target_vm_locked);
}
-bool plat_ffa_inject_notification_pending_interrupt(
+bool ffa_interrupts_inject_notification_pending_interrupt(
struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
@@ -629,7 +630,7 @@
return ret;
}
-struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current_vcpu)
+struct vcpu *ffa_interrupts_unwind_nwd_call_chain(struct vcpu *current_vcpu)
{
struct vcpu *next;
struct two_vcpu_locked both_vcpu_locked;
@@ -702,7 +703,7 @@
interrupts = ¤t->interrupts;
vm = current->vm;
- if (plat_ffa_vm_managed_exit_supported(vm)) {
+ if (ffa_vm_managed_exit_supported(vm)) {
vcpu_virt_interrupt_set_enabled(interrupts,
HF_MANAGED_EXIT_INTID);
/*
@@ -734,8 +735,8 @@
* interface early during the boot stage as an S-EL0 SP need not call
* HF_INTERRUPT_ENABLE hypervisor ABI explicitly.
*/
-void plat_ffa_enable_virtual_interrupts(struct vcpu_locked current_locked,
- struct vm_locked vm_locked)
+void ffa_interrupts_enable_virtual_interrupts(struct vcpu_locked current_locked,
+ struct vm_locked vm_locked)
{
struct vcpu *current;
struct interrupts *interrupts;
@@ -773,8 +774,8 @@
* - Change the security state of the interrupt.
* - Enable or disable the physical interrupt.
*/
-int64_t plat_ffa_interrupt_reconfigure(uint32_t int_id, uint32_t command,
- uint32_t value, struct vcpu *current)
+int64_t ffa_interrupts_reconfigure(uint32_t int_id, uint32_t command,
+ uint32_t value, struct vcpu *current)
{
struct vm *vm = current->vm;
struct vm_locked vm_locked;
@@ -862,7 +863,7 @@
}
/* Returns the virtual interrupt id to be handled by SP. */
-uint32_t plat_ffa_interrupt_get(struct vcpu_locked current_locked)
+uint32_t ffa_interrupts_get(struct vcpu_locked current_locked)
{
uint32_t int_id;
diff --git a/src/ffa/spmc/notifications.c b/src/ffa/spmc/notifications.c
index 9d6b3d7..d27c76b 100644
--- a/src/ffa/spmc/notifications.c
+++ b/src/ffa/spmc/notifications.c
@@ -24,8 +24,8 @@
/** Interrupt priority for the Schedule Receiver Interrupt. */
#define SRI_PRIORITY UINT32_C(0xf0)
-struct ffa_value plat_ffa_is_notifications_bitmap_access_valid(
- struct vcpu *current, ffa_id_t vm_id)
+struct ffa_value ffa_notifications_is_bitmap_access_valid(struct vcpu *current,
+ ffa_id_t vm_id)
{
/**
* Create/Destroy interfaces to be called by the hypervisor, into the
@@ -52,9 +52,8 @@
* - If bind call from NWd, current VM ID must be same as Hypervisor ID,
* receiver's ID must be from NWd, and sender's ID from SWd.
*/
-bool plat_ffa_is_notifications_bind_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_bind_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -100,7 +99,7 @@
return true;
}
-bool plat_ffa_notifications_update_bindings_forward(
+bool ffa_notifications_update_bindings_forward(
ffa_id_t receiver_id, ffa_id_t sender_id,
ffa_notification_flags_t flags, ffa_notifications_bitmap_t bitmap,
bool is_bind, struct ffa_value *ret)
@@ -123,9 +122,8 @@
* - If set call from NWd, current VM ID must be same as Hypervisor ID,
* and receiver must be an SP.
*/
-bool plat_ffa_is_notification_set_valid(struct vcpu *current,
- ffa_id_t sender_id,
- ffa_id_t receiver_id)
+bool ffa_notifications_is_set_valid(struct vcpu *current, ffa_id_t sender_id,
+ ffa_id_t receiver_id)
{
ffa_id_t current_vm_id = current->vm->id;
@@ -170,11 +168,11 @@
return true;
}
-bool plat_ffa_notification_set_forward(ffa_id_t sender_vm_id,
- ffa_id_t receiver_vm_id,
- ffa_notification_flags_t flags,
- ffa_notifications_bitmap_t bitmap,
- struct ffa_value *ret)
+bool ffa_notifications_set_forward(ffa_id_t sender_vm_id,
+ ffa_id_t receiver_vm_id,
+ ffa_notification_flags_t flags,
+ ffa_notifications_bitmap_t bitmap,
+ struct ffa_value *ret)
{
(void)sender_vm_id;
(void)receiver_vm_id;
@@ -185,9 +183,8 @@
return false;
}
-bool plat_ffa_is_notification_get_valid(struct vcpu *current,
- ffa_id_t receiver_id,
- ffa_notification_flags_t flags)
+bool ffa_notifications_is_get_valid(struct vcpu *current, ffa_id_t receiver_id,
+ ffa_notification_flags_t flags)
{
ffa_id_t current_vm_id = current->vm->id;
/*
@@ -214,8 +211,8 @@
return caller_and_receiver_valid && flags_valid;
}
-void plat_ffa_notification_info_get_forward( // NOLINTNEXTLINE
- uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
+void ffa_notifications_info_get_forward( // NOLINTNEXTLINE
+ uint16_t *ids, uint32_t *ids_count, // NOLINTNEXTLINE
uint32_t *lists_sizes, uint32_t *lists_count,
const uint32_t ids_count_max)
{
@@ -226,8 +223,8 @@
(void)ids_count_max;
}
-struct ffa_value plat_ffa_notifications_bitmap_create(
- ffa_id_t vm_id, ffa_vcpu_count_t vcpu_count)
+struct ffa_value ffa_notifications_bitmap_create(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
struct vm_locked vm_locked;
@@ -254,7 +251,7 @@
vm_locked.vm->notifications.enabled = true;
} else {
/* Else should regard with NWd VM ID. */
- vm_locked = plat_ffa_nwd_vm_create(vm_id);
+ vm_locked = ffa_vm_nwd_create(vm_id);
/* If received NULL, there are no slots for VM creation. */
if (vm_locked.vm == NULL) {
@@ -279,8 +276,8 @@
return ret;
}
-bool plat_ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
- ffa_vcpu_count_t vcpu_count)
+bool ffa_notifications_bitmap_create_call(ffa_id_t vm_id,
+ ffa_vcpu_count_t vcpu_count)
{
(void)vm_id;
(void)vcpu_count;
@@ -288,10 +285,10 @@
return true;
}
-struct ffa_value plat_ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
+struct ffa_value ffa_notifications_bitmap_destroy(ffa_id_t vm_id)
{
struct ffa_value ret = {.func = FFA_SUCCESS_32};
- struct vm_locked to_destroy_locked = plat_ffa_vm_find_locked(vm_id);
+ struct vm_locked to_destroy_locked = ffa_vm_find_locked(vm_id);
if (to_destroy_locked.vm == NULL) {
dlog_verbose("Bitmap not created for VM: %u\n", vm_id);
@@ -315,7 +312,7 @@
vm_notifications_init(to_destroy_locked.vm,
to_destroy_locked.vm->vcpu_count, NULL);
if (vm_id != HF_OTHER_WORLD_ID) {
- plat_ffa_vm_destroy(to_destroy_locked);
+ ffa_vm_destroy(to_destroy_locked);
}
out:
@@ -324,7 +321,7 @@
return ret;
}
-struct ffa_value plat_ffa_notifications_get_from_sp(
+struct ffa_value ffa_notifications_get_from_sp(
struct vm_locked receiver_locked, ffa_vcpu_index_t vcpu_id,
ffa_notifications_bitmap_t *from_sp)
{
@@ -334,7 +331,7 @@
return (struct ffa_value){.func = FFA_SUCCESS_32};
}
-struct ffa_value plat_ffa_notifications_get_framework_notifications(
+struct ffa_value ffa_notifications_get_framework_notifications(
struct vm_locked receiver_locked, ffa_notifications_bitmap_t *from_fwk,
ffa_notification_flags_t flags, ffa_vcpu_index_t vcpu_id)
{
@@ -363,15 +360,16 @@
plat_interrupts_send_sgi(HF_SCHEDULE_RECEIVER_INTID, cpu, false);
}
-static void plat_ffa_sri_set_delayed_internal(struct cpu *cpu, bool delayed)
+static void ffa_notifications_sri_set_delayed_internal(struct cpu *cpu,
+ bool delayed)
{
assert(cpu != NULL);
cpu->is_sri_delayed = delayed;
}
-void plat_ffa_sri_set_delayed(struct cpu *cpu)
+void ffa_notifications_sri_set_delayed(struct cpu *cpu)
{
- plat_ffa_sri_set_delayed_internal(cpu, true);
+ ffa_notifications_sri_set_delayed_internal(cpu, true);
}
static bool plat_ffa_is_sri_delayed(struct cpu *cpu)
@@ -380,27 +378,27 @@
return cpu->is_sri_delayed;
}
-void plat_ffa_sri_trigger_if_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_if_delayed(struct cpu *cpu)
{
assert(cpu != NULL);
if (plat_ffa_is_sri_delayed(cpu)) {
plat_ffa_send_schedule_receiver_interrupt(cpu);
- plat_ffa_sri_set_delayed_internal(cpu, false);
+ ffa_notifications_sri_set_delayed_internal(cpu, false);
}
}
-void plat_ffa_sri_trigger_not_delayed(struct cpu *cpu)
+void ffa_notifications_sri_trigger_not_delayed(struct cpu *cpu)
{
/*
* If flag to delay SRI isn't set, trigger SRI such that the
* receiver scheduler is aware there are pending notifications.
*/
plat_ffa_send_schedule_receiver_interrupt(cpu);
- plat_ffa_sri_set_delayed_internal(cpu, false);
+ ffa_notifications_sri_set_delayed_internal(cpu, false);
}
-void plat_ffa_sri_init(struct cpu *cpu)
+void ffa_notifications_sri_init(struct cpu *cpu)
{
/* Configure as Non Secure SGI. */
struct interrupt_descriptor sri_desc = {
diff --git a/src/ffa/spmc/power_management.c b/src/ffa/spmc/power_management.c
deleted file mode 100644
index f543059..0000000
--- a/src/ffa/spmc/power_management.c
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright 2024 The Hafnium Authors.
- *
- * Use of this source code is governed by a BSD-style
- * license that can be found in the LICENSE file or at
- * https://opensource.org/licenses/BSD-3-Clause.
- */
-
-#include "hf/ffa/power_management.h"
-
-/**
- * Returns FFA_SUCCESS as FFA_SECONDARY_EP_REGISTER is supported at the
- * secure virtual FF-A instance.
- */
-bool plat_ffa_is_secondary_ep_register_supported(void)
-{
- return true;
-}
diff --git a/src/ffa/spmc/setup_and_discovery.c b/src/ffa/spmc/setup_and_discovery.c
index 3b22714..f0ad3de 100644
--- a/src/ffa/spmc/setup_and_discovery.c
+++ b/src/ffa/spmc/setup_and_discovery.c
@@ -8,13 +8,13 @@
#include "hf/ffa/setup_and_discovery.h"
-#include "hf/ffa.h"
+#include "hf/check.h"
#include "hf/manifest.h"
#include "hf/vm.h"
#include "smc.h"
-struct ffa_value plat_ffa_spmc_id_get(void)
+struct ffa_value ffa_setup_spmc_id_get(void)
{
/*
* Since we are running in the SPMC use FFA_ID_GET to fetch our
@@ -23,17 +23,26 @@
return smc_ffa_call((struct ffa_value){.func = FFA_ID_GET_32});
}
-void plat_ffa_rxtx_map_forward(struct vm_locked vm_locked)
+/**
+ * Returns FFA_SUCCESS as FFA_SECONDARY_EP_REGISTER is supported at the
+ * secure virtual FF-A instance.
+ */
+bool ffa_setup_is_secondary_ep_register_supported(void)
+{
+ return true;
+}
+
+void ffa_setup_rxtx_map_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-void plat_ffa_rxtx_unmap_forward(struct vm_locked vm_locked)
+void ffa_setup_rxtx_unmap_forward(struct vm_locked vm_locked)
{
(void)vm_locked;
}
-bool plat_ffa_partition_info_get_regs_forward_allowed(void)
+bool ffa_setup_partition_info_get_regs_forward_allowed(void)
{
/*
* Allow forwarding from the SPMC to SPMD unconditionally.
@@ -42,7 +51,7 @@
}
/** Forward helper for FFA_PARTITION_INFO_GET. */
-ffa_vm_count_t plat_ffa_partition_info_get_forward(
+ffa_vm_count_t ffa_setup_partition_info_get_forward(
const struct ffa_uuid *uuid, uint32_t flags,
struct ffa_partition_info *partitions, ffa_vm_count_t vm_count)
{
@@ -55,12 +64,12 @@
return vm_count;
}
-void plat_ffa_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
- paddr_t fdt_addr,
- size_t fdt_allocated_size,
- const struct manifest_vm *manifest_vm,
- const struct boot_params *boot_params,
- struct mpool *ppool)
+void ffa_setup_parse_partition_manifest(struct mm_stage1_locked stage1_locked,
+ paddr_t fdt_addr,
+ size_t fdt_allocated_size,
+ const struct manifest_vm *manifest_vm,
+ const struct boot_params *boot_params,
+ struct mpool *ppool)
{
(void)boot_params;
(void)stage1_locked;
@@ -72,7 +81,7 @@
CHECK(false);
}
-ffa_partition_properties_t plat_ffa_partition_properties(
+ffa_partition_properties_t ffa_setup_partition_properties(
ffa_id_t caller_id, const struct vm *target)
{
ffa_partition_properties_t result = target->messaging_method;
@@ -102,8 +111,8 @@
return result & final_mask;
}
-bool plat_ffa_rx_release_forward(struct vm_locked vm_locked,
- struct ffa_value *ret)
+bool ffa_setup_rx_release_forward(struct vm_locked vm_locked,
+ struct ffa_value *ret)
{
(void)vm_locked;
(void)ret;
@@ -111,8 +120,8 @@
return false;
}
-bool plat_ffa_acquire_receiver_rx(struct vm_locked to_locked,
- struct ffa_value *ret)
+bool ffa_setup_acquire_receiver_rx(struct vm_locked to_locked,
+ struct ffa_value *ret)
{
(void)to_locked;
(void)ret;
diff --git a/src/ffa/spmc/vm.c b/src/ffa/spmc/vm.c
index f999248..5ad2820 100644
--- a/src/ffa/spmc/vm.c
+++ b/src/ffa/spmc/vm.c
@@ -73,7 +73,7 @@
* If a VM with the ID already exists return it.
* Return NULL if it can't allocate a new VM.
*/
-struct vm_locked plat_ffa_nwd_vm_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id)
{
struct vm_locked vm_locked;
struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
@@ -106,7 +106,7 @@
return vm_locked;
}
-void plat_ffa_vm_destroy(struct vm_locked to_destroy_locked)
+void ffa_vm_destroy(struct vm_locked to_destroy_locked)
{
struct vm *vm = to_destroy_locked.vm;
/*
@@ -121,7 +121,7 @@
}
}
-void plat_ffa_vm_init(struct mpool *ppool)
+void ffa_vm_init(struct mpool *ppool)
{
struct vm *other_world = vm_find(HF_OTHER_WORLD_ID);
@@ -144,12 +144,12 @@
}
}
-bool plat_ffa_vm_managed_exit_supported(struct vm *vm)
+bool ffa_vm_managed_exit_supported(struct vm *vm)
{
return (vm->ns_interrupts_action == NS_ACTION_ME);
}
-struct vm_locked plat_ffa_vm_find_locked(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked(ffa_id_t vm_id)
{
struct vm_locked to_ret_locked;
@@ -166,19 +166,18 @@
return to_ret_locked;
}
-struct vm_locked plat_ffa_vm_find_locked_create(ffa_id_t vm_id)
+struct vm_locked ffa_vm_find_locked_create(ffa_id_t vm_id)
{
if (vm_id_is_current_world(vm_id) || vm_id == HF_OTHER_WORLD_ID) {
return vm_find_locked(vm_id);
}
- return plat_ffa_nwd_vm_create(vm_id);
+ return ffa_vm_nwd_create(vm_id);
}
-bool plat_ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
- uint32_t *lists_sizes,
- uint32_t *lists_count,
- const uint32_t ids_count_max)
+bool ffa_vm_notifications_info_get(uint16_t *ids, uint32_t *ids_count,
+ uint32_t *lists_sizes, uint32_t *lists_count,
+ const uint32_t ids_count_max)
{
struct nwd_vms_locked nwd_vms_locked = nwd_vms_lock();
struct vm_locked other_world_locked = vm_find_locked(HF_OTHER_WORLD_ID);
@@ -215,7 +214,7 @@
return list_full_and_more_pending;
}
-void plat_ffa_disable_vm_interrupts(struct vm_locked vm_locked)
+void ffa_vm_disable_interrupts(struct vm_locked vm_locked)
{
uint32_t core_pos = arch_find_core_pos();
@@ -237,10 +236,10 @@
/**
* Reclaim all resources belonging to VM in aborted state.
*/
-void plat_ffa_free_vm_resources(struct vm_locked vm_locked)
+void ffa_vm_free_resources(struct vm_locked vm_locked)
{
/*
* Gracefully disable all interrupts belonging to SP.
*/
- plat_ffa_disable_vm_interrupts(vm_locked);
+ ffa_vm_disable_interrupts(vm_locked);
}
diff --git a/src/ffa/spmc/vm.h b/src/ffa/spmc/vm.h
index 383d191..0f24fa5 100644
--- a/src/ffa/spmc/vm.h
+++ b/src/ffa/spmc/vm.h
@@ -10,8 +10,8 @@
#include "hf/vm.h"
-void plat_ffa_vm_init(struct mpool *ppool);
+void ffa_vm_init(struct mpool *ppool);
-struct vm_locked plat_ffa_nwd_vm_create(ffa_id_t vm_id);
+struct vm_locked ffa_vm_nwd_create(ffa_id_t vm_id);
-void plat_ffa_disable_vm_interrupts(struct vm_locked vm_locked);
+void ffa_vm_disable_interrupts(struct vm_locked vm_locked);
diff --git a/src/ffa_memory.c b/src/ffa_memory.c
index 43d6a2c..c8b6130 100644
--- a/src/ffa_memory.c
+++ b/src/ffa_memory.c
@@ -89,7 +89,7 @@
if (handle == FFA_MEMORY_HANDLE_INVALID) {
memory_region->handle =
- plat_ffa_memory_handle_make(i);
+ ffa_memory_make_handle(i);
} else {
memory_region->handle = handle;
}
@@ -146,7 +146,7 @@
* First look for a share_state allocated by us, in which case the
* handle is based on the index.
*/
- if (plat_ffa_memory_handle_allocated_by_current_world(handle)) {
+ if (ffa_memory_is_handle_allocated_by_current_world(handle)) {
uint64_t index = ffa_memory_handle_index(handle);
if (index < MAX_MEM_SHARES) {
@@ -388,8 +388,8 @@
}
/* Set the security state bit if necessary. */
- if ((default_mode & plat_ffa_other_world_mode()) != 0) {
- mode |= plat_ffa_other_world_mode();
+ if ((default_mode & ffa_memory_get_other_world_mode()) != 0) {
+ mode |= ffa_memory_get_other_world_mode();
}
mode |= default_mode & MM_MODE_D;
@@ -1012,7 +1012,7 @@
* allow the secure access from the SP.
*/
if (memory_protected) {
- *to_mode &= ~plat_ffa_other_world_mode();
+ *to_mode &= ~ffa_memory_get_other_world_mode();
}
}
@@ -1297,10 +1297,11 @@
*/
bool ret;
struct mm_stage1_locked stage1_locked = mm_lock_stage1();
- void *ptr = mm_identity_map(stage1_locked, begin, end,
- MM_MODE_W | (extra_mode_attributes &
- plat_ffa_other_world_mode()),
- ppool);
+ void *ptr =
+ mm_identity_map(stage1_locked, begin, end,
+ MM_MODE_W | (extra_mode_attributes &
+ ffa_memory_get_other_world_mode()),
+ ppool);
size_t size = pa_difference(begin, end);
if (!ptr) {
@@ -1571,9 +1572,10 @@
* perform a non-secure memory access. In such case `clean_mode` takes
* the same mode as `orig_from_mode`.
*/
- clean_mode = (memory_protected != NULL && *memory_protected)
- ? orig_from_mode & ~plat_ffa_other_world_mode()
- : orig_from_mode;
+ clean_mode =
+ (memory_protected != NULL && *memory_protected)
+ ? orig_from_mode & ~ffa_memory_get_other_world_mode()
+ : orig_from_mode;
/* Clear the memory so no VM or device can see the previous contents. */
if (clear && !ffa_clear_memory_constituents(
@@ -3380,7 +3382,7 @@
share_state->fragment_count;
/* VMs acquire the RX buffer from SPMC. */
- CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
+ CHECK(ffa_setup_acquire_receiver_rx(to_locked, &ret));
/*
* Copy response to RX buffer of caller and deliver the message.
@@ -3392,7 +3394,7 @@
* Set the security state in the memory retrieve response attributes
* if specified by the target mode.
*/
- attributes = plat_ffa_memory_add_security_bit_from_mode(
+ attributes = ffa_memory_add_security_bit_from_mode(
memory_region->attributes, retrieve_mode);
/*
@@ -3492,7 +3494,7 @@
share_state->hypervisor_fragment_count = 1;
/* VMs acquire the RX buffer from SPMC. */
- CHECK(plat_ffa_acquire_receiver_rx(to_locked, &ret));
+ CHECK(ffa_setup_acquire_receiver_rx(to_locked, &ret));
/*
* Copy response to RX buffer of caller and deliver the message.
@@ -3512,7 +3514,7 @@
* Set the security state in the memory retrieve response attributes
* if specified by the target mode.
*/
- attributes = plat_ffa_memory_add_security_bit_from_mode(
+ attributes = ffa_memory_add_security_bit_from_mode(
memory_region->attributes, share_state->sender_orig_mode);
receiver = ffa_memory_region_get_receiver(memory_region, 0);
@@ -3774,7 +3776,7 @@
* When hafnium is the hypervisor, acquire the RX buffer of a VM, that
* is currently ownder by the SPMC.
*/
- assert(plat_ffa_acquire_receiver_rx(to_locked, &ret));
+ assert(ffa_setup_acquire_receiver_rx(to_locked, &ret));
remaining_constituent_count = ffa_memory_fragment_init(
(struct ffa_memory_region_constituent *)retrieve_continue_page,
diff --git a/src/hf_ipi.c b/src/hf_ipi.c
index 0964c13..568efde 100644
--- a/src/hf_ipi.c
+++ b/src/hf_ipi.c
@@ -85,7 +85,7 @@
switch (target_vcpu->state) {
case VCPU_STATE_WAITING:
- plat_ffa_sri_trigger_not_delayed(target_vcpu->cpu);
+ ffa_notifications_sri_trigger_not_delayed(target_vcpu->cpu);
return true;
case VCPU_STATE_RUNNING:
case VCPU_STATE_BLOCKED:
diff --git a/src/load.c b/src/load.c
index c3ae898..14692fc 100644
--- a/src/load.c
+++ b/src/load.c
@@ -260,7 +260,7 @@
vcpu_update_boot(vm_get_vcpu(vm_locked.vm, 0));
if (vm_locked_are_notifications_enabled(vm_locked) &&
- !plat_ffa_notifications_bitmap_create_call(
+ !ffa_notifications_bitmap_create_call(
vm_locked.vm->id, vm_locked.vm->vcpu_count)) {
return false;
}
@@ -711,7 +711,7 @@
}
if (manifest_vm->is_ffa_partition) {
- plat_ffa_parse_partition_manifest(
+ ffa_setup_parse_partition_manifest(
stage1_locked, fdt_addr, fdt_allocated_size,
manifest_vm, boot_params, ppool);
}
@@ -836,7 +836,8 @@
for (n = 0; n < manifest_vm->secondary.vcpu_count; n++) {
vcpu = vm_get_vcpu(vm, n);
vcpu_locked = vcpu_lock(vcpu);
- plat_ffa_enable_virtual_interrupts(vcpu_locked, vm_locked);
+ ffa_interrupts_enable_virtual_interrupts(vcpu_locked,
+ vm_locked);
vcpu_unlock(&vcpu_locked);
}