refactor(ffa): change the runtime states of an ..
endpoint execution context based on FF-A v1.1. Allow FFA_RUN from SP.
The various state enumerations defined in the specification along
with few other auxiliary states needed for state transitions, such
as VCPU_STATE_OFF and VCPU_STATE_ABORTED, are described as follows:
/** The vCPU is switched off. */
VCPU_STATE_OFF,
/** The vCPU is currently running. */
VCPU_STATE_RUNNING,
/** The vCPU is waiting to be allocated CPU cycles to do work.*/
VCPU_STATE_WAITING,
/** The vCPU is blocked and waiting for some work to complete
on its behalf. */
VCPU_STATE_BLOCKED,
/** The vCPU has been preempted by an interrupt. */
VCPU_STATE_PREEMPTED,
/** The vCPU is waiting for an interrupt. */
VCPU_STATE_BLOCKED_INTERRUPT,
/** The vCPU has aborted. */
VCPU_STATE_ABORTED,
Moreover, this patch also removes the constraint on FFA_RUN. As per
FF-A v1.1 spec, any SP can invoke FFA_RUN. We leverage this capability
for secure interrupt signal completion in further patches.
Change-Id: I3801b4a053df56a4b5a2803e74d4cbf743ad2678
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
diff --git a/src/api.c b/src/api.c
index 47cae90..d73f330 100644
--- a/src/api.c
+++ b/src/api.c
@@ -71,11 +71,13 @@
* If VM is UP then return first vCPU.
* If VM is MP then return vCPU whose index matches current CPU index.
*/
-static struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
+struct vcpu *api_ffa_get_vm_vcpu(struct vm *vm, struct vcpu *current)
{
ffa_vcpu_index_t current_cpu_index = cpu_index(current->cpu);
struct vcpu *vcpu = NULL;
+ CHECK((vm != NULL) && (current != NULL));
+
if (vm->vcpu_count == 1) {
vcpu = vm_get_vcpu(vm, 0);
} else if (current_cpu_index < vm->vcpu_count) {
@@ -123,9 +125,9 @@
* This triggers the scheduling logic to run. Run in the context of secondary VM
* to cause FFA_RUN to return and the primary VM to regain control of the CPU.
*/
-static struct vcpu *api_switch_to_primary(struct vcpu *current,
- struct ffa_value primary_ret,
- enum vcpu_state secondary_state)
+struct vcpu *api_switch_to_primary(struct vcpu *current,
+ struct ffa_value primary_ret,
+ enum vcpu_state secondary_state)
{
/*
* If the secondary is blocked but has a timer running, sleep until the
@@ -242,7 +244,7 @@
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
- return api_switch_to_primary(current, ret, VCPU_STATE_READY);
+ return api_switch_to_primary(current, ret, VCPU_STATE_PREEMPTED);
}
/**
@@ -280,9 +282,9 @@
}
/**
- * Returns to the primary VM to allow this CPU to be used for other tasks as the
- * vCPU does not have work to do at this moment. The current vCPU is marked as
- * ready to be scheduled again.
+ * The current vCPU is blocked on some resource and needs to relinquish
+ * control back to the execution context of the endpoint that originally
+ * allocated cycles to it.
*/
struct ffa_value api_yield(struct vcpu *current, struct vcpu **next)
{
@@ -309,7 +311,7 @@
(struct ffa_value){.func = FFA_YIELD_32,
.arg1 = ffa_vm_vcpu(current->vm->id,
vcpu_index(current))},
- VCPU_STATE_READY);
+ VCPU_STATE_BLOCKED);
return ret;
}
@@ -325,7 +327,7 @@
.arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
vcpu_index(target_vcpu)),
};
- return api_switch_to_primary(current, ret, VCPU_STATE_READY);
+ return api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
}
/**
@@ -600,6 +602,7 @@
struct vm_locked vm_locked;
bool need_vm_lock;
bool ret;
+ uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
/*
* Check that the registers are available so that the vCPU can be run.
@@ -622,7 +625,7 @@
#endif
/* The VM needs to be locked to deliver mailbox messages. */
- need_vm_lock = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX;
+ need_vm_lock = vcpu->state == VCPU_STATE_WAITING;
if (need_vm_lock) {
vcpu_unlock(&vcpu_locked);
vm_locked = vm_lock(vcpu->vm);
@@ -670,7 +673,16 @@
ret = false;
goto out;
- case VCPU_STATE_BLOCKED_MAILBOX:
+ case VCPU_STATE_WAITING:
+ /*
+ * An initial FFA_RUN is necessary for secondary VM/SP to reach
+ * the message wait loop.
+ */
+ if (!vcpu->is_bootstrapped) {
+ vcpu->is_bootstrapped = true;
+ break;
+ }
+
/*
* A pending message allows the vCPU to run so the message can
* be delivered directly.
@@ -681,15 +693,31 @@
vcpu->vm->mailbox.state = MAILBOX_STATE_READ;
break;
}
- /* Fall through. */
+
+ if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
+ break;
+ }
+
+ if (arch_timer_enabled(&vcpu->regs)) {
+ timer_remaining_ns =
+ arch_timer_remaining_ns(&vcpu->regs);
+ if (timer_remaining_ns == 0) {
+ break;
+ }
+ } else {
+ dlog_verbose("Timer disabled\n");
+ }
+ run_ret->func = FFA_MSG_WAIT_32;
+ run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
+ run_ret->arg2 = timer_remaining_ns;
+ ret = false;
+ goto out;
case VCPU_STATE_BLOCKED_INTERRUPT:
/* Allow virtual interrupts to be delivered. */
if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
break;
}
- uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
-
if (arch_timer_enabled(&vcpu->regs)) {
timer_remaining_ns =
arch_timer_remaining_ns(&vcpu->regs);
@@ -707,17 +735,25 @@
* The vCPU is not ready to run, return the appropriate code to
* the primary which called vcpu_run.
*/
- run_ret->func = vcpu->state == VCPU_STATE_BLOCKED_MAILBOX
- ? FFA_MSG_WAIT_32
- : HF_FFA_RUN_WAIT_FOR_INTERRUPT;
+ run_ret->func = HF_FFA_RUN_WAIT_FOR_INTERRUPT;
run_ret->arg1 = ffa_vm_vcpu(vcpu->vm->id, vcpu_index(vcpu));
run_ret->arg2 = timer_remaining_ns;
ret = false;
goto out;
- case VCPU_STATE_READY:
+ case VCPU_STATE_BLOCKED:
+ /* A blocked vCPU is run unconditionally. Fall through. */
+ case VCPU_STATE_PREEMPTED:
break;
+ default:
+ /*
+ * Execution not expected to reach here. Deny the request
+ * gracefully.
+ */
+ *run_ret = ffa_error(FFA_DENIED);
+ ret = false;
+ goto out;
}
/* It has been decided that the vCPU should be run. */
@@ -743,21 +779,14 @@
}
struct ffa_value api_ffa_run(ffa_vm_id_t vm_id, ffa_vcpu_index_t vcpu_idx,
- const struct vcpu *current, struct vcpu **next)
+ struct vcpu *current, struct vcpu **next)
{
struct vm *vm;
struct vcpu *vcpu;
struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
- /* Only the primary VM can switch vCPUs. */
- if (current->vm->id != HF_PRIMARY_VM_ID) {
- ret.arg2 = FFA_DENIED;
- goto out;
- }
-
- /* Only secondary VM vCPUs can be run. */
- if (vm_id == HF_PRIMARY_VM_ID) {
- goto out;
+ if (!plat_ffa_run_checks(current, vm_id, &ret, next)) {
+ return ret;
}
if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
@@ -854,7 +883,7 @@
*/
*next = api_switch_to_primary(
current, (struct ffa_value){.func = FFA_RX_RELEASE_32},
- VCPU_STATE_READY);
+ VCPU_STATE_WAITING);
return (struct ffa_value){.func = FFA_SUCCESS_32};
}
@@ -1218,7 +1247,7 @@
to.vm->mailbox.state = MAILBOX_STATE_READ;
*next = api_switch_to_primary(current, primary_ret,
- VCPU_STATE_READY);
+ VCPU_STATE_BLOCKED);
return ret;
}
@@ -1244,7 +1273,7 @@
/* Return to the primary VM directly or with a switch. */
if (from_id != HF_PRIMARY_VM_ID) {
*next = api_switch_to_primary(current, primary_ret,
- VCPU_STATE_READY);
+ VCPU_STATE_BLOCKED);
}
return ret;
@@ -1428,7 +1457,7 @@
/* Return to other world if caller is a SP. */
*next = api_switch_to_other_world(
current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
- VCPU_STATE_BLOCKED_MAILBOX);
+ VCPU_STATE_WAITING);
} else {
/* Switch back to primary VM to block. */
struct ffa_value run_return = {
@@ -1437,7 +1466,7 @@
};
*next = api_switch_to_primary(current, run_return,
- VCPU_STATE_BLOCKED_MAILBOX);
+ VCPU_STATE_WAITING);
}
out:
sl_unlock(&vm->lock);
@@ -1948,14 +1977,15 @@
case VCPU_STATE_OFF:
case VCPU_STATE_RUNNING:
case VCPU_STATE_ABORTED:
- case VCPU_STATE_READY:
case VCPU_STATE_BLOCKED_INTERRUPT:
+ case VCPU_STATE_BLOCKED:
+ case VCPU_STATE_PREEMPTED:
ret = ffa_error(FFA_BUSY);
goto out;
- case VCPU_STATE_BLOCKED_MAILBOX:
+ case VCPU_STATE_WAITING:
/*
- * Expect target vCPU to be blocked after having called
- * ffa_msg_wait or sent a direct message response.
+ * We expect target vCPU to be in WAITING state after either
+ * having called ffa_msg_wait or sent a direct message response.
*/
break;
}
@@ -1977,7 +2007,7 @@
arch_regs_set_retval(&receiver_vcpu->regs, api_ffa_dir_msg_value(args));
- current->state = VCPU_STATE_BLOCKED_MAILBOX;
+ current->state = VCPU_STATE_BLOCKED;
/* Switch to receiver vCPU targeted to by direct msg request */
*next = receiver_vcpu;
@@ -2055,11 +2085,21 @@
vcpu_unlock(¤t_locked);
if (!vm_id_is_current_world(receiver_vm_id)) {
- *next = api_switch_to_other_world(current, to_ret,
- VCPU_STATE_BLOCKED_MAILBOX);
+ *next = api_switch_to_other_world(
+ current, to_ret,
+ /*
+ * Current vcpu sent a direct response. It moves to
+ * waiting state.
+ */
+ VCPU_STATE_WAITING);
} else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
- *next = api_switch_to_primary(current, to_ret,
- VCPU_STATE_BLOCKED_MAILBOX);
+ *next = api_switch_to_primary(
+ current, to_ret,
+ /*
+ * Current vcpu sent a direct response. It moves to
+ * waiting state.
+ */
+ VCPU_STATE_WAITING);
} else if (vm_id_is_current_world(receiver_vm_id)) {
/*
* It is expected the receiver_vm_id to be from an SP, otherwise
@@ -2067,8 +2107,11 @@
* made function return error before getting to this point.
*/
*next = api_switch_to_vm(current, to_ret,
- VCPU_STATE_BLOCKED_MAILBOX,
- receiver_vm_id);
+ /*
+ * current vcpu sent a direct response.
+ * It moves to waiting state.
+ */
+ VCPU_STATE_WAITING, receiver_vm_id);
} else {
panic("Invalid direct message response invocation");
}
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 587cf95..5e8221e 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -328,12 +328,14 @@
current_vm_locked = vm_lock(current->vm);
if (current_vm_locked.vm->initialized == false) {
current_vm_locked.vm->initialized = true;
+ current->is_bootstrapped = true;
dlog_verbose("Initialized VM: %#x, boot_order: %u\n",
current_vm_locked.vm->id,
current_vm_locked.vm->boot_order);
if (current_vm_locked.vm->next_boot != NULL) {
- current->state = VCPU_STATE_BLOCKED_MAILBOX;
+ /* Refer FF-A v1.1 Beta0 section 7.5 Rule 2. */
+ current->state = VCPU_STATE_WAITING;
vm_next = current_vm_locked.vm->next_boot;
CHECK(vm_next->initialized == false);
*next = vm_get_vcpu(vm_next, vcpu_index(current));
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 392eb17..6da2e09 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -244,3 +244,17 @@
(void)current;
return false;
}
+
+/**
+ * Check if current VM can resume target VM/SP using FFA_RUN ABI.
+ */
+bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
+ struct ffa_value *run_ret, struct vcpu **next)
+{
+ (void)current;
+ (void)target_vm_id;
+ (void)run_ret;
+ (void)next;
+
+ return true;
+}
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 010a982..f7144ea 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -403,3 +403,25 @@
(void)current;
return has_vhe_support();
}
+
+/**
+ * Check if current VM can resume target VM/SP using FFA_RUN ABI.
+ */
+bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
+ struct ffa_value *run_ret, struct vcpu **next)
+{
+ (void)next;
+
+ /* Only the primary VM can switch vCPUs. */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ run_ret->arg2 = FFA_DENIED;
+ return false;
+ }
+
+ /* Only secondary VM vCPUs can be run. */
+ if (target_vm_id == HF_PRIMARY_VM_ID) {
+ return false;
+ }
+
+ return true;
+}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index c533b04..c720fd9 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -9,6 +9,7 @@
#include "hf/arch/ffa.h"
#include "hf/arch/sve.h"
+#include "hf/api.h"
#include "hf/dlog.h"
#include "hf/ffa.h"
#include "hf/ffa_internal.h"
@@ -555,3 +556,49 @@
/* FFA_MEM_PERM_SET/GET is only valid before SPs are initialized */
return has_vhe_support() && (current->vm->initialized == false);
}
+
+/**
+ * Check if current VM can resume target VM using FFA_RUN ABI.
+ */
+bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
+ struct ffa_value *run_ret, struct vcpu **next)
+{
+ (void)next;
+ /*
+ * Under the Partition runtime model specified in FF-A v1.1-Beta0 spec,
+ * SP can invoke FFA_RUN to resume target SP.
+ */
+ struct vcpu *target_vcpu;
+ struct vcpu_locked target_locked;
+ bool ret = true;
+ struct vm *vm;
+
+ vm = vm_find(target_vm_id);
+ if (vm == NULL) {
+ return false;
+ }
+
+ target_vcpu = api_ffa_get_vm_vcpu(vm_find(target_vm_id), current);
+
+ /* Lock target vCPU before accessing its state. */
+ target_locked = vcpu_lock(target_vcpu);
+
+ /* Only the primary VM can turn ON a vCPU that is currently OFF. */
+ if (current->vm->id != HF_PRIMARY_VM_ID &&
+ target_vcpu->state == VCPU_STATE_OFF) {
+ run_ret->arg2 = FFA_DENIED;
+ ret = false;
+ goto out;
+ }
+
+ /* A SP cannot invoke FFA_RUN to resume a normal world VM. */
+ if (!vm_id_is_current_world(target_vm_id)) {
+ run_ret->arg2 = FFA_DENIED;
+ ret = false;
+ goto out;
+ }
+out:
+ vcpu_unlock(&target_locked);
+
+ return ret;
+}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index a5d8536..baf14e3 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -228,3 +228,17 @@
(void)current;
return false;
}
+
+/**
+ * Check if current VM can resume target VM/SP using FFA_RUN ABI.
+ */
+bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
+ struct ffa_value *run_ret, struct vcpu **next)
+{
+ (void)current;
+ (void)target_vm_id;
+ (void)run_ret;
+ (void)next;
+
+ return true;
+}
diff --git a/src/vcpu.c b/src/vcpu.c
index 68acd00..2c79ae5 100644
--- a/src/vcpu.c
+++ b/src/vcpu.c
@@ -66,12 +66,12 @@
/**
* Initialise the registers for the given vCPU and set the state to
- * VCPU_STATE_READY. The caller must hold the vCPU lock while calling this.
+ * VCPU_STATE_WAITING. The caller must hold the vCPU lock while calling this.
*/
void vcpu_on(struct vcpu_locked vcpu, ipaddr_t entry, uintreg_t arg)
{
arch_regs_set_pc_arg(&vcpu.vcpu->regs, entry, arg);
- vcpu.vcpu->state = VCPU_STATE_READY;
+ vcpu.vcpu->state = VCPU_STATE_WAITING;
}
ffa_vcpu_index_t vcpu_index(const struct vcpu *vcpu)
@@ -84,28 +84,14 @@
/**
* Check whether the given vcpu_state is an off state, for the purpose of
- * turning vCPUs on and off. Note that aborted still counts as on in this
- * context.
+ * turning vCPUs on and off. Note that Aborted still counts as ON for the
+ * purposes of PSCI, because according to the PSCI specification (section
+ * 5.7.1) a core is only considered to be off if it has been turned off
+ * with a CPU_OFF call or hasn't yet been turned on with a CPU_ON call.
*/
bool vcpu_is_off(struct vcpu_locked vcpu)
{
- switch (vcpu.vcpu->state) {
- case VCPU_STATE_OFF:
- return true;
- case VCPU_STATE_READY:
- case VCPU_STATE_RUNNING:
- case VCPU_STATE_BLOCKED_MAILBOX:
- case VCPU_STATE_BLOCKED_INTERRUPT:
- case VCPU_STATE_ABORTED:
- /*
- * Aborted still counts as ON for the purposes of PSCI,
- * because according to the PSCI specification (section
- * 5.7.1) a core is only considered to be off if it has
- * been turned off with a CPU_OFF call or hasn't yet
- * been turned on with a CPU_ON call.
- */
- return false;
- }
+ return (vcpu.vcpu->state == VCPU_STATE_OFF);
}
/**