refactor: pass locked vcpu structures to ff-a helper functions
While handling FF-A ABI call, hafnium has to alter the state of
multiple vCPUs. It is necessary for hafnium to lock the vCPUs (to
protect from concurrent accesses due to execution on other physical
cores) before modifying its properties and unlock once done.
Currently, this is done in a piecemeal approach which could lead to
deadlocks. This patch refactors the helper functions to receive
locked vCPU(s) by locking them as early as possible and unlocking
only at the tail end of FF-A ABI handler.
Also, in order to adhere to the rule stating a VM's lock must be
acquired before any of its vCPU's lock, this patch makes few changes
to momentarily release vCPU lock and acquire the lock immediately.
Signed-off-by: Madhukar Pappireddy <madhukar.pappireddy@arm.com>
Change-Id: I392f053f7384d7c34f22924a57a6d8e9f62ddb2e
diff --git a/src/api.c b/src/api.c
index 7637eda..558d093 100644
--- a/src/api.c
+++ b/src/api.c
@@ -120,11 +120,12 @@
* - UP migratable.
* - MP with pinned Execution Contexts.
*/
-struct vcpu *api_switch_to_vm(struct vcpu *current, struct ffa_value to_ret,
+struct vcpu *api_switch_to_vm(struct vcpu_locked current_locked,
+ struct ffa_value to_ret,
enum vcpu_state vcpu_state, ffa_vm_id_t to_id)
{
struct vm *to_vm = vm_find(to_id);
- struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current);
+ struct vcpu *next = api_ffa_get_vm_vcpu(to_vm, current_locked.vcpu);
CHECK(next != NULL);
@@ -132,9 +133,7 @@
arch_regs_set_retval(&next->regs, to_ret);
/* Set the current vCPU state. */
- sl_lock(¤t->lock);
- current->state = vcpu_state;
- sl_unlock(¤t->lock);
+ current_locked.vcpu->state = vcpu_state;
return next;
}
@@ -145,7 +144,7 @@
* This triggers the scheduling logic to run. Run in the context of secondary VM
* to cause FFA_RUN to return and the primary VM to regain control of the CPU.
*/
-struct vcpu *api_switch_to_primary(struct vcpu *current,
+struct vcpu *api_switch_to_primary(struct vcpu_locked current_locked,
struct ffa_value primary_ret,
enum vcpu_state secondary_state)
{
@@ -182,7 +181,7 @@
break;
}
- return api_switch_to_vm(current, primary_ret, secondary_state,
+ return api_switch_to_vm(current_locked, primary_ret, secondary_state,
HF_PRIMARY_VM_ID);
}
@@ -195,11 +194,11 @@
* Called in context of a direct message response from a secure
* partition to a VM.
*/
-struct vcpu *api_switch_to_other_world(struct vcpu *current,
+struct vcpu *api_switch_to_other_world(struct vcpu_locked current_locked,
struct ffa_value other_world_ret,
enum vcpu_state vcpu_state)
{
- return api_switch_to_vm(current, other_world_ret, vcpu_state,
+ return api_switch_to_vm(current_locked, other_world_ret, vcpu_state,
HF_OTHER_WORLD_ID);
}
@@ -227,12 +226,18 @@
*/
struct vcpu *api_preempt(struct vcpu *current)
{
+ struct vcpu_locked current_locked;
+ struct vcpu *next;
struct ffa_value ret = {
.func = FFA_INTERRUPT_32,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
- return api_switch_to_primary(current, ret, VCPU_STATE_PREEMPTED);
+ current_locked = vcpu_lock(current);
+ next = api_switch_to_primary(current_locked, ret, VCPU_STATE_PREEMPTED);
+ vcpu_unlock(¤t_locked);
+
+ return next;
}
/**
@@ -241,13 +246,19 @@
*/
struct vcpu *api_wait_for_interrupt(struct vcpu *current)
{
+ struct vcpu_locked current_locked;
+ struct vcpu *next;
struct ffa_value ret = {
.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
- return api_switch_to_primary(current, ret,
+ current_locked = vcpu_lock(current);
+ next = api_switch_to_primary(current_locked, ret,
VCPU_STATE_BLOCKED_INTERRUPT);
+ vcpu_unlock(¤t_locked);
+
+ return next;
}
/**
@@ -255,18 +266,24 @@
*/
struct vcpu *api_vcpu_off(struct vcpu *current)
{
+ struct vcpu_locked current_locked;
+ struct vcpu *next;
struct ffa_value ret = {
.func = HF_FFA_RUN_WAIT_FOR_INTERRUPT,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
};
+ current_locked = vcpu_lock(current);
/*
* Disable the timer, so the scheduler doesn't get told to call back
* based on it.
*/
arch_timer_disable_current();
- return api_switch_to_primary(current, ret, VCPU_STATE_OFF);
+ next = api_switch_to_primary(current_locked, ret, VCPU_STATE_OFF);
+ vcpu_unlock(¤t_locked);
+
+ return next;
}
/**
@@ -283,6 +300,9 @@
enum vcpu_state next_state = VCPU_STATE_RUNNING;
uint32_t timeout_low = 0;
uint32_t timeout_high = 0;
+ struct vcpu_locked next_locked = (struct vcpu_locked){
+ .vcpu = NULL,
+ };
if (args != NULL) {
if (args->arg4 != 0U || args->arg5 != 0U || args->arg6 != 0U ||
@@ -303,12 +323,12 @@
current_locked = vcpu_lock(current);
transition_allowed = plat_ffa_check_runtime_state_transition(
- current, current->vm->id, HF_INVALID_VM_ID, NULL, FFA_YIELD_32,
- &next_state);
- vcpu_unlock(¤t_locked);
+ current_locked, current->vm->id, HF_INVALID_VM_ID, next_locked,
+ FFA_YIELD_32, &next_state);
if (!transition_allowed) {
- return ffa_error(FFA_DENIED);
+ ret = ffa_error(FFA_DENIED);
+ goto out;
}
/*
@@ -322,21 +342,39 @@
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_BLOCKED);
- return plat_ffa_yield_prepare(current, next, timeout_low, timeout_high);
+ ret = plat_ffa_yield_prepare(current_locked, next, timeout_low,
+ timeout_high);
+out:
+ vcpu_unlock(¤t_locked);
+ return ret;
}
/**
* Switches to the primary so that it can switch to the target, or kick it if it
* is already running on a different physical CPU.
*/
-struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
+static struct vcpu *api_wake_up_locked(struct vcpu_locked current_locked,
+ struct vcpu *target_vcpu)
{
struct ffa_value ret = {
.func = FFA_INTERRUPT_32,
.arg1 = ffa_vm_vcpu(target_vcpu->vm->id,
vcpu_index(target_vcpu)),
};
- return api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
+
+ return api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
+}
+
+struct vcpu *api_wake_up(struct vcpu *current, struct vcpu *target_vcpu)
+{
+ struct vcpu_locked current_locked;
+ struct vcpu *next;
+
+ current_locked = vcpu_lock(current);
+ next = api_wake_up_locked(current_locked, target_vcpu);
+ vcpu_unlock(¤t_locked);
+
+ return next;
}
/**
@@ -345,6 +383,8 @@
struct vcpu *api_abort(struct vcpu *current)
{
struct ffa_value ret = ffa_error(FFA_ABORTED);
+ struct vcpu_locked current_locked;
+ struct vcpu *next;
dlog_notice("Aborting VM %#x vCPU %u\n", current->vm->id,
vcpu_index(current));
@@ -361,7 +401,11 @@
/* TODO: free resources once all vCPUs abort. */
- return api_switch_to_primary(current, ret, VCPU_STATE_ABORTED);
+ current_locked = vcpu_lock(current);
+ next = api_switch_to_primary(current_locked, ret, VCPU_STATE_ABORTED);
+ vcpu_unlock(¤t_locked);
+
+ return next;
}
/*
@@ -784,10 +828,12 @@
* up or kick the target vCPU.
*/
int64_t api_interrupt_inject_locked(struct vcpu_locked target_locked,
- uint32_t intid, struct vcpu *current,
+ uint32_t intid,
+ struct vcpu_locked current_locked,
struct vcpu **next)
{
struct vcpu *target_vcpu = target_locked.vcpu;
+ struct vcpu *current = current_locked.vcpu;
struct interrupts *interrupts = &target_vcpu->interrupts;
int64_t ret = 0;
@@ -819,7 +865,7 @@
*/
ret = 1;
} else if (current != target_vcpu && next != NULL) {
- *next = api_wake_up(current, target_vcpu);
+ *next = api_wake_up_locked(current_locked, target_vcpu);
}
out:
@@ -829,21 +875,6 @@
return ret;
}
-/* Wrapper to internal_interrupt_inject with locking of target vCPU */
-static int64_t internal_interrupt_inject(struct vcpu *target_vcpu,
- uint32_t intid, struct vcpu *current,
- struct vcpu **next)
-{
- int64_t ret;
- struct vcpu_locked target_locked;
-
- target_locked = vcpu_lock(target_vcpu);
- ret = api_interrupt_inject_locked(target_locked, intid, current, next);
- vcpu_unlock(&target_locked);
-
- return ret;
-}
-
/**
* Constructs the return value from a successful FFA_MSG_POLL or
* FFA_MSG_WAIT call.
@@ -936,8 +967,12 @@
struct ffa_value api_ffa_msg_wait(struct vcpu *current, struct vcpu **next,
struct ffa_value *args)
{
+ struct vcpu_locked current_locked;
enum vcpu_state next_state = VCPU_STATE_RUNNING;
struct ffa_value ret;
+ struct vcpu_locked next_locked = (struct vcpu_locked){
+ .vcpu = NULL,
+ };
if (args->arg1 != 0U || args->arg2 != 0U || args->arg3 != 0U ||
args->arg4 != 0U || args->arg5 != 0U || args->arg6 != 0U ||
@@ -945,16 +980,20 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
+ current_locked = vcpu_lock(current);
if (!plat_ffa_check_runtime_state_transition(
- current, current->vm->id, HF_INVALID_VM_ID, NULL,
- FFA_MSG_WAIT_32, &next_state)) {
- return ffa_error(FFA_DENIED);
+ current_locked, current->vm->id, HF_INVALID_VM_ID,
+ next_locked, FFA_MSG_WAIT_32, &next_state)) {
+ ret = ffa_error(FFA_DENIED);
+ goto out;
}
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_WAITING);
- ret = plat_ffa_msg_wait_prepare(current, next);
+ ret = plat_ffa_msg_wait_prepare(current_locked, next);
+out:
+ vcpu_unlock(¤t_locked);
if (ret.func != FFA_ERROR_32) {
struct vm_locked vm_locked = vm_lock(current->vm);
@@ -970,15 +1009,16 @@
* Prepares the vCPU to run by updating its state and fetching whether a return
* value needs to be forced onto the vCPU.
*/
-static bool api_vcpu_prepare_run(struct vcpu *current, struct vcpu *vcpu,
+static bool api_vcpu_prepare_run(struct vcpu_locked current_locked,
+ struct vcpu_locked vcpu_next_locked,
struct ffa_value *run_ret)
{
- struct vcpu_locked vcpu_locked;
struct vm_locked vm_locked;
bool ret;
uint64_t timer_remaining_ns = FFA_SLEEP_INDEFINITE;
bool vcpu_was_init_state = false;
bool need_vm_lock;
+ struct two_vcpu_locked vcpus_locked;
/*
* Check that the registers are available so that the vCPU can be run.
@@ -988,7 +1028,8 @@
* dependencies in the common run case meaning the sensitive context
* switch performance is consistent.
*/
- vcpu_locked = vcpu_lock(vcpu);
+ struct vcpu *vcpu = vcpu_next_locked.vcpu;
+ struct vcpu *current = current_locked.vcpu;
/* The VM needs to be locked to deliver mailbox messages. */
need_vm_lock = vcpu->state == VCPU_STATE_WAITING ||
@@ -998,9 +1039,14 @@
vcpu->state == VCPU_STATE_PREEMPTED));
if (need_vm_lock) {
- vcpu_unlock(&vcpu_locked);
+ vcpu_unlock(&vcpu_next_locked);
+ vcpu_unlock(¤t_locked);
vm_locked = vm_lock(vcpu->vm);
- vcpu_locked = vcpu_lock(vcpu);
+
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ vcpu_next_locked = vcpus_locked.vcpu2;
}
/*
@@ -1063,7 +1109,7 @@
assert(need_vm_lock == true);
if (!vm_locked.vm->el0_partition &&
plat_ffa_inject_notification_pending_interrupt(
- vcpu_locked, current, vm_locked)) {
+ vcpu_next_locked, current_locked, vm_locked)) {
/* TODO: setting a return value to override
* the placeholder (FFA_ERROR(INTERRUPTED))
* set by FFA_MSG_WAIT. FF-A v1.1 allows
@@ -1091,7 +1137,7 @@
break;
}
- if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
+ if (vcpu_interrupt_count_get(vcpu_next_locked) > 0) {
break;
}
@@ -1112,13 +1158,13 @@
case VCPU_STATE_BLOCKED_INTERRUPT:
if (need_vm_lock &&
plat_ffa_inject_notification_pending_interrupt(
- vcpu_locked, current, vm_locked)) {
- assert(vcpu_interrupt_count_get(vcpu_locked) > 0);
+ vcpu_next_locked, current_locked, vm_locked)) {
+ assert(vcpu_interrupt_count_get(vcpu_next_locked) > 0);
break;
}
/* Allow virtual interrupts to be delivered. */
- if (vcpu_interrupt_count_get(vcpu_locked) > 0) {
+ if (vcpu_interrupt_count_get(vcpu_next_locked) > 0) {
break;
}
@@ -1152,7 +1198,7 @@
/* Check NPI is to be injected here. */
if (need_vm_lock) {
plat_ffa_inject_notification_pending_interrupt(
- vcpu_locked, current, vm_locked);
+ vcpu_next_locked, current_locked, vm_locked);
}
break;
default:
@@ -1165,10 +1211,10 @@
goto out;
}
- plat_ffa_init_schedule_mode_ffa_run(current, vcpu_locked);
+ plat_ffa_init_schedule_mode_ffa_run(current_locked, vcpu_next_locked);
/* It has been decided that the vCPU should be run. */
- vcpu->cpu = current->cpu;
+ vcpu->cpu = current_locked.vcpu->cpu;
vcpu->state = VCPU_STATE_RUNNING;
if (vcpu_was_init_state) {
@@ -1186,7 +1232,6 @@
ret = true;
out:
- vcpu_unlock(&vcpu_locked);
if (need_vm_lock) {
vm_unlock(&vm_locked);
}
@@ -1201,13 +1246,16 @@
struct ffa_value ret = ffa_error(FFA_INVALID_PARAMETERS);
enum vcpu_state next_state = VCPU_STATE_RUNNING;
struct vcpu_locked current_locked;
+ struct vcpu_locked vcpu_next_locked;
+ struct two_vcpu_locked vcpus_locked;
- if (!plat_ffa_run_checks(current, vm_id, vcpu_idx, &ret, next)) {
- return ret;
+ current_locked = vcpu_lock(current);
+ if (!plat_ffa_run_checks(current_locked, vm_id, vcpu_idx, &ret, next)) {
+ goto out;
}
if (plat_ffa_run_forward(vm_id, vcpu_idx, &ret)) {
- return ret;
+ goto out;
}
/* The requested VM must exist. */
@@ -1232,14 +1280,26 @@
vcpu = vm_get_vcpu(vm, vcpu_idx);
}
- if (!plat_ffa_check_runtime_state_transition(current, current->vm->id,
- HF_INVALID_VM_ID, vcpu,
- FFA_RUN_32, &next_state)) {
- return ffa_error(FFA_DENIED);
+ /*
+ * Unlock current vCPU to allow it to be locked together with next
+ * vcpu.
+ */
+ vcpu_unlock(¤t_locked);
+
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ vcpu_next_locked = vcpus_locked.vcpu2;
+
+ if (!plat_ffa_check_runtime_state_transition(
+ current_locked, current->vm->id, HF_INVALID_VM_ID,
+ vcpu_next_locked, FFA_RUN_32, &next_state)) {
+ ret = ffa_error(FFA_DENIED);
+ goto out_vcpu;
}
- if (!api_vcpu_prepare_run(current, vcpu, &ret)) {
- goto out;
+ if (!api_vcpu_prepare_run(current_locked, vcpu_next_locked, &ret)) {
+ goto out_vcpu;
}
/*
@@ -1250,8 +1310,9 @@
*/
if (arch_timer_pending(&vcpu->regs)) {
/* Make virtual timer interrupt pending. */
- internal_interrupt_inject(vcpu, HF_VIRTUAL_TIMER_INTID, vcpu,
- NULL);
+ api_interrupt_inject_locked(vcpu_next_locked,
+ HF_VIRTUAL_TIMER_INTID,
+ vcpu_next_locked, NULL);
/*
* Set the mask bit so the hardware interrupt doesn't fire
@@ -1268,9 +1329,7 @@
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_BLOCKED);
- current_locked = vcpu_lock(current);
current->state = VCPU_STATE_BLOCKED;
- vcpu_unlock(¤t_locked);
/*
* Set a placeholder return code to the scheduler. This will be
@@ -1280,7 +1339,11 @@
ret.arg1 = 0;
ret.arg2 = 0;
+out_vcpu:
+ vcpu_unlock(&vcpu_next_locked);
+
out:
+ vcpu_unlock(¤t_locked);
return ret;
}
@@ -1871,21 +1934,17 @@
* Checks whether the vCPU's attempt to block for a message has already been
* interrupted or whether it is allowed to block.
*/
-static bool api_ffa_msg_recv_block_interrupted(struct vcpu *current)
+static bool api_ffa_msg_recv_block_interrupted(
+ struct vcpu_locked current_locked)
{
- struct vcpu_locked current_locked;
bool interrupted;
- current_locked = vcpu_lock(current);
-
/*
* Don't block if there are enabled and pending interrupts, to match
* behaviour of wait_for_interrupt.
*/
interrupted = (vcpu_interrupt_count_get(current_locked) > 0);
- vcpu_unlock(¤t_locked);
-
return interrupted;
}
@@ -1895,11 +1954,11 @@
*
* No new messages can be received until the mailbox has been cleared.
*/
-struct ffa_value api_ffa_msg_recv(bool block, struct vcpu *current,
+struct ffa_value api_ffa_msg_recv(bool block, struct vcpu_locked current_locked,
struct vcpu **next)
{
bool is_direct_request_ongoing;
- struct vcpu_locked current_locked;
+ struct vcpu *current = current_locked.vcpu;
struct vm *vm = current->vm;
struct ffa_value return_code;
bool is_from_secure_world =
@@ -1917,17 +1976,22 @@
* Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
* invocation.
*/
- current_locked = vcpu_lock(current);
is_direct_request_ongoing =
is_ffa_direct_msg_request_ongoing(current_locked);
+
+ /*
+ * A VM's lock must be acquired before any of its vCPU's lock. Hence,
+ * unlock current vCPU and acquire it immediately after its VM's lock.
+ */
vcpu_unlock(¤t_locked);
+ sl_lock(&vm->lock);
+ current_locked = vcpu_lock(current);
if (is_direct_request_ongoing) {
- return ffa_error(FFA_DENIED);
+ return_code = ffa_error(FFA_DENIED);
+ goto out;
}
- sl_lock(&vm->lock);
-
/* Return pending messages without blocking. */
if (vm->mailbox.state == MAILBOX_STATE_FULL) {
return_code = ffa_msg_recv_return(vm);
@@ -1949,14 +2013,15 @@
* that time to FFA_SUCCESS.
*/
return_code = ffa_error(FFA_INTERRUPTED);
- if (api_ffa_msg_recv_block_interrupted(current)) {
+ if (api_ffa_msg_recv_block_interrupted(current_locked)) {
goto out;
}
if (is_from_secure_world) {
/* Return to other world if caller is a SP. */
*next = api_switch_to_other_world(
- current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
+ current_locked,
+ (struct ffa_value){.func = FFA_MSG_WAIT_32},
VCPU_STATE_WAITING);
} else {
/* Switch back to primary VM to block. */
@@ -1965,7 +2030,7 @@
.arg1 = ffa_vm_vcpu(vm->id, vcpu_index(current)),
};
- *next = api_switch_to_primary(current, run_return,
+ *next = api_switch_to_primary(current_locked, run_return,
VCPU_STATE_WAITING);
}
out:
@@ -2304,6 +2369,10 @@
{
struct vcpu *target_vcpu;
struct vm *target_vm = vm_find(target_vm_id);
+ struct vcpu_locked current_locked;
+ struct vcpu_locked target_locked;
+ struct two_vcpu_locked vcpus_locked;
+ int64_t ret;
if (intid >= HF_NUM_INTIDS) {
return -1;
@@ -2324,12 +2393,30 @@
target_vcpu = vm_get_vcpu(target_vm, target_vcpu_idx);
+ /* A VM could inject an interrupt for itself. */
+ if (target_vcpu != current) {
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, target_vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ target_locked = vcpus_locked.vcpu2;
+ } else {
+ current_locked = vcpu_lock(current);
+ target_locked = current_locked;
+ }
+
dlog_verbose(
"Injecting interrupt %u for VM %#x vCPU %u from VM %#x vCPU "
"%u\n",
intid, target_vm_id, target_vcpu_idx, current->vm->id,
vcpu_index(current));
- return internal_interrupt_inject(target_vcpu, intid, current, next);
+ ret = api_interrupt_inject_locked(target_locked, intid, current_locked,
+ next);
+ if (target_vcpu != current) {
+ vcpu_unlock(&target_locked);
+ }
+
+ vcpu_unlock(¤t_locked);
+ return ret;
}
/** Returns the version of the implemented FF-A specification. */
@@ -2535,6 +2622,8 @@
struct vm *receiver_vm;
struct vm_locked receiver_locked;
struct vcpu *receiver_vcpu;
+ struct vcpu_locked current_locked;
+ struct vcpu_locked receiver_vcpu_locked;
struct two_vcpu_locked vcpus_locked;
enum vcpu_state next_state = VCPU_STATE_RUNNING;
@@ -2580,14 +2669,23 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (!plat_ffa_check_runtime_state_transition(
- current, sender_vm_id, HF_INVALID_VM_ID, receiver_vcpu,
- args.func, &next_state)) {
- return ffa_error(FFA_DENIED);
- }
-
+ /*
+ * If VM must be locked, it must be done before any of its vCPUs are
+ * locked.
+ */
receiver_locked = vm_lock(receiver_vm);
- vcpus_locked = vcpu_lock_both(receiver_vcpu, current);
+
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, receiver_vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ receiver_vcpu_locked = vcpus_locked.vcpu2;
+
+ if (!plat_ffa_check_runtime_state_transition(
+ current_locked, sender_vm_id, HF_INVALID_VM_ID,
+ receiver_vcpu_locked, args.func, &next_state)) {
+ ret = ffa_error(FFA_DENIED);
+ goto out;
+ }
/*
* If destination vCPU is executing or already received an
@@ -2596,7 +2694,7 @@
* changed but regs_available is still false thus consider this case as
* the vCPU not yet ready to receive a direct message request.
*/
- if (is_ffa_direct_msg_request_ongoing(vcpus_locked.vcpu1) ||
+ if (is_ffa_direct_msg_request_ongoing(receiver_vcpu_locked) ||
receiver_vcpu->state == VCPU_STATE_RUNNING ||
!receiver_vcpu->regs_available) {
dlog_verbose("Receiver is busy with another request.\n");
@@ -2638,9 +2736,9 @@
/* Inject timer interrupt if any pending */
if (arch_timer_pending(&receiver_vcpu->regs)) {
- api_interrupt_inject_locked(vcpus_locked.vcpu1,
- HF_VIRTUAL_TIMER_INTID, current,
- NULL);
+ api_interrupt_inject_locked(receiver_vcpu_locked,
+ HF_VIRTUAL_TIMER_INTID,
+ current_locked, NULL);
arch_timer_mask(&receiver_vcpu->regs);
}
@@ -2657,8 +2755,8 @@
next_state == VCPU_STATE_BLOCKED);
current->state = VCPU_STATE_BLOCKED;
- plat_ffa_wind_call_chain_ffa_direct_req(vcpus_locked.vcpu2,
- vcpus_locked.vcpu1);
+ plat_ffa_wind_call_chain_ffa_direct_req(current_locked,
+ receiver_vcpu_locked);
/* Switch to receiver vCPU targeted to by direct msg request */
*next = receiver_vcpu;
@@ -2671,10 +2769,7 @@
* to receiver_vcpu.
*/
plat_ffa_inject_notification_pending_interrupt(
- vcpus_locked.vcpu1.vcpu == receiver_vcpu
- ? vcpus_locked.vcpu1
- : vcpus_locked.vcpu2,
- current, receiver_locked);
+ receiver_vcpu_locked, current_locked, receiver_locked);
}
/*
@@ -2683,9 +2778,9 @@
*/
out:
- sl_unlock(&receiver_vcpu->lock);
- sl_unlock(¤t->lock);
+ vcpu_unlock(&receiver_vcpu_locked);
vm_unlock(&receiver_locked);
+ vcpu_unlock(¤t_locked);
return ret;
}
@@ -2694,34 +2789,30 @@
* Resume the target vCPU after the current vCPU sent a direct response.
* Current vCPU moves to waiting state.
*/
-void api_ffa_resume_direct_resp_target(struct vcpu *current, struct vcpu **next,
+void api_ffa_resume_direct_resp_target(struct vcpu_locked current_locked,
+ struct vcpu **next,
ffa_vm_id_t receiver_vm_id,
struct ffa_value to_ret,
bool is_nwd_call_chain)
{
if (!vm_id_is_current_world(receiver_vm_id)) {
- *next = api_switch_to_other_world(current, to_ret,
+ *next = api_switch_to_other_world(current_locked, to_ret,
VCPU_STATE_WAITING);
/* End of NWd scheduled call chain. */
assert(!is_nwd_call_chain ||
- (current->call_chain.prev_node == NULL));
+ (current_locked.vcpu->call_chain.prev_node == NULL));
} else if (receiver_vm_id == HF_PRIMARY_VM_ID) {
- *next = api_switch_to_primary(current, to_ret,
+ *next = api_switch_to_primary(current_locked, to_ret,
VCPU_STATE_WAITING);
-
- /* Removing a node from NWd scheduled call chain. */
- if (is_nwd_call_chain) {
- vcpu_call_chain_remove_node(current, *next);
- }
} else if (vm_id_is_current_world(receiver_vm_id)) {
/*
* It is expected the receiver_vm_id to be from an SP, otherwise
* 'plat_ffa_is_direct_response_valid' should have
* made function return error before getting to this point.
*/
- *next = api_switch_to_vm(current, to_ret, VCPU_STATE_WAITING,
- receiver_vm_id);
+ *next = api_switch_to_vm(current_locked, to_ret,
+ VCPU_STATE_WAITING, receiver_vm_id);
} else {
panic("Invalid direct message response invocation");
}
@@ -2737,35 +2828,42 @@
struct vcpu **next)
{
struct vcpu_locked current_locked;
+ struct vcpu_locked next_locked = (struct vcpu_locked){
+ .vcpu = NULL,
+ };
enum vcpu_state next_state = VCPU_STATE_RUNNING;
+ struct ffa_value ret = (struct ffa_value){.func = FFA_INTERRUPT_32};
struct ffa_value signal_interrupt =
(struct ffa_value){.func = FFA_INTERRUPT_32};
+ struct ffa_value to_ret = api_ffa_dir_msg_value(args);
+ struct two_vcpu_locked vcpus_locked;
if (!api_ffa_dir_msg_is_arg2_zero(args)) {
return ffa_error(FFA_INVALID_PARAMETERS);
}
- struct ffa_value to_ret = api_ffa_dir_msg_value(args);
-
if (!plat_ffa_is_direct_response_valid(current, sender_vm_id,
receiver_vm_id)) {
dlog_verbose("Invalid direct response call.\n");
return ffa_error(FFA_INVALID_PARAMETERS);
}
- if (!plat_ffa_check_runtime_state_transition(current, sender_vm_id,
- receiver_vm_id, NULL,
- args.func, &next_state)) {
- return ffa_error(FFA_DENIED);
+ current_locked = vcpu_lock(current);
+
+ if (!plat_ffa_check_runtime_state_transition(
+ current_locked, sender_vm_id, receiver_vm_id, next_locked,
+ args.func, &next_state)) {
+ ret = ffa_error(FFA_DENIED);
+ goto out;
}
- if (plat_ffa_is_direct_response_interrupted(current)) {
- return ffa_error(FFA_INTERRUPTED);
+ if (plat_ffa_is_direct_response_interrupted(current_locked)) {
+ ret = ffa_error(FFA_INTERRUPTED);
+ goto out;
}
assert(!vm_id_is_current_world(current->vm->id) ||
next_state == VCPU_STATE_WAITING);
- current_locked = vcpu_lock(current);
/*
* Ensure the terminating FFA_MSG_SEND_DIRECT_REQ had a
@@ -2776,8 +2874,8 @@
* Sending direct response but direct request origin
* vCPU is not set.
*/
- vcpu_unlock(¤t_locked);
- return ffa_error(FFA_DENIED);
+ ret = ffa_error(FFA_DENIED);
+ goto out;
}
if (api_ffa_is_managed_exit_ongoing(current_locked)) {
@@ -2811,21 +2909,33 @@
if (plat_ffa_intercept_direct_response(current_locked, next, to_ret,
&signal_interrupt)) {
- vcpu_unlock(¤t_locked);
- return signal_interrupt;
+ ret = signal_interrupt;
+ goto out;
}
/* Clear direct request origin for the caller. */
current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
+ api_ffa_resume_direct_resp_target(current_locked, next, receiver_vm_id,
+ to_ret, false);
+
+ /*
+ * Unlock current vCPU to allow it to be locked together with next
+ * vcpu.
+ */
vcpu_unlock(¤t_locked);
- api_ffa_resume_direct_resp_target(current, next, receiver_vm_id, to_ret,
- false);
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, *next);
+ current_locked = vcpus_locked.vcpu1;
+ next_locked = vcpus_locked.vcpu2;
- plat_ffa_unwind_call_chain_ffa_direct_resp(current, *next);
+ plat_ffa_unwind_call_chain_ffa_direct_resp(current_locked, next_locked);
+ vcpu_unlock(&next_locked);
- return (struct ffa_value){.func = FFA_INTERRUPT_32};
+out:
+ vcpu_unlock(¤t_locked);
+ return ret;
}
static bool api_memory_region_check_flags(
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 1ee1f3d..0204429 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -590,9 +590,14 @@
case FFA_MSG_WAIT_32:
*args = api_ffa_msg_wait(current, next, args);
return true;
- case FFA_MSG_POLL_32:
- *args = api_ffa_msg_recv(false, current, next);
+ case FFA_MSG_POLL_32: {
+ struct vcpu_locked current_locked;
+
+ current_locked = vcpu_lock(current);
+ *args = api_ffa_msg_recv(false, current_locked, next);
+ vcpu_unlock(¤t_locked);
return true;
+ }
case FFA_RUN_32:
*args = api_ffa_run(ffa_vm_id(*args), ffa_vcpu_index(*args),
current, next);
@@ -1120,7 +1125,7 @@
current_vcpu->priority_mask = pmr;
ret = api_interrupt_inject_locked(current_locked,
HF_MANAGED_EXIT_INTID,
- current_vcpu, NULL);
+ current_locked, NULL);
if (ret != 0) {
panic("Failed to inject managed exit interrupt\n");
}
diff --git a/src/arch/aarch64/plat/ffa/absent.c b/src/arch/aarch64/plat/ffa/absent.c
index 212e33c..2c58c97 100644
--- a/src/arch/aarch64/plat/ffa/absent.c
+++ b/src/arch/aarch64/plat/ffa/absent.c
@@ -358,11 +358,11 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
- struct vcpu **next)
+bool plat_ffa_run_checks(struct vcpu_locked current_locked,
+ ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
- (void)current;
+ (void)current_locked;
(void)target_vm_id;
(void)run_ret;
(void)next;
@@ -404,11 +404,11 @@
}
bool plat_ffa_inject_notification_pending_interrupt(
- struct vcpu_locked target_locked, struct vcpu *current,
+ struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
(void)target_locked;
- (void)current;
+ (void)current_locked;
(void)receiver_locked;
return false;
@@ -455,36 +455,36 @@
(void)ppool;
}
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
struct vcpu **next)
{
- (void)current;
+ (void)current_locked;
(void)next;
return (struct ffa_value){.func = FFA_INTERRUPT_32};
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
+bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
ffa_vm_id_t vm_id,
ffa_vm_id_t receiver_vm_id,
- struct vcpu *receiver_vcpu,
+ struct vcpu_locked receiver_locked,
uint32_t func, // NOLINTNEXTLINE
enum vcpu_state *next_state)
{
- (void)current;
+ (void)current_locked;
(void)vm_id;
(void)receiver_vm_id;
- (void)receiver_vcpu;
+ (void)receiver_locked;
(void)func;
(void)next_state;
return true;
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
struct vcpu_locked target_locked)
{
- (void)current;
+ (void)current_locked;
(void)target_locked;
}
@@ -496,11 +496,11 @@
(void)receiver_vcpu_locked;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
- struct vcpu *next)
+void plat_ffa_unwind_call_chain_ffa_direct_resp(
+ struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
- (void)current;
- (void)next;
+ (void)current_locked;
+ (void)next_locked;
}
bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
@@ -527,9 +527,9 @@
(void)vm_locked;
}
-bool plat_ffa_is_direct_response_interrupted(struct vcpu *current)
+bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked)
{
- (void)current;
+ (void)current_locked;
return false;
}
@@ -596,12 +596,12 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_yield_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_yield_prepare(struct vcpu current_locked,
struct vcpu **next,
uint32_t timeout_low,
uint32_t timeout_high)
{
- (void)current;
+ (void)current_locked;
(void)next;
(void)timeout_low;
(void)timeout_high;
diff --git a/src/arch/aarch64/plat/ffa/hypervisor.c b/src/arch/aarch64/plat/ffa/hypervisor.c
index 46a2c29..46d6f19 100644
--- a/src/arch/aarch64/plat/ffa/hypervisor.c
+++ b/src/arch/aarch64/plat/ffa/hypervisor.c
@@ -790,15 +790,15 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
- struct vcpu **next)
+bool plat_ffa_run_checks(struct vcpu_locked current_locked,
+ ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
(void)next;
(void)vcpu_idx;
/* Only the primary VM can switch vCPUs. */
- if (current->vm->id != HF_PRIMARY_VM_ID) {
+ if (current_locked.vcpu->vm->id != HF_PRIMARY_VM_ID) {
run_ret->arg2 = FFA_DENIED;
return false;
}
@@ -850,11 +850,11 @@
}
bool plat_ffa_inject_notification_pending_interrupt(
- struct vcpu_locked target_locked, struct vcpu *current,
+ struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
(void)target_locked;
- (void)current;
+ (void)current_locked;
(void)receiver_locked;
return false;
@@ -1102,20 +1102,23 @@
* to be compliant with version v1.0 of the FF-A specification. It serves as
* a blocking call.
*/
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
struct vcpu **next)
{
- return api_ffa_msg_recv(true, current, next);
+ return api_ffa_msg_recv(true, current_locked, next);
}
-bool plat_ffa_check_runtime_state_transition(
- struct vcpu *current, ffa_vm_id_t vm_id, ffa_vm_id_t receiver_vm_id,
- struct vcpu *receiver_vcpu, uint32_t func, enum vcpu_state *next_state)
+bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
+ ffa_vm_id_t vm_id,
+ ffa_vm_id_t receiver_vm_id,
+ struct vcpu_locked receiver_locked,
+ uint32_t func,
+ enum vcpu_state *next_state)
{
- (void)current;
+ (void)current_locked;
(void)vm_id;
(void)receiver_vm_id;
- (void)receiver_vcpu;
+ (void)receiver_locked;
switch (func) {
case FFA_YIELD_32:
@@ -1136,11 +1139,11 @@
}
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
struct vcpu_locked target_locked)
{
/* Scheduling mode not supported in the Hypervisor/VMs. */
- (void)current;
+ (void)current_locked;
(void)target_locked;
}
@@ -1153,12 +1156,12 @@
(void)receiver_vcpu_locked;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
- struct vcpu *next)
+void plat_ffa_unwind_call_chain_ffa_direct_resp(
+ struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
/* Calls chains not supported in the Hypervisor/VMs. */
- (void)current;
- (void)next;
+ (void)current_locked;
+ (void)next_locked;
}
bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
@@ -1196,9 +1199,9 @@
}
}
-bool plat_ffa_is_direct_response_interrupted(struct vcpu *current)
+bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked)
{
- (void)current;
+ (void)current_locked;
return false;
}
@@ -1429,7 +1432,8 @@
* with the help of the primary VM.
*/
static struct ffa_value deliver_msg(struct vm_locked to, ffa_vm_id_t from_id,
- struct vcpu *current, struct vcpu **next)
+ struct vcpu_locked current_locked,
+ struct vcpu **next)
{
struct ffa_value ret = (struct ffa_value){.func = FFA_SUCCESS_32};
struct ffa_value primary_ret = {
@@ -1446,7 +1450,7 @@
*/
primary_ret = ffa_msg_recv_return(to.vm);
- *next = api_switch_to_primary(current, primary_ret,
+ *next = api_switch_to_primary(current_locked, primary_ret,
VCPU_STATE_BLOCKED);
return ret;
}
@@ -1472,7 +1476,7 @@
/* Return to the primary VM directly or with a switch. */
if (from_id != HF_PRIMARY_VM_ID) {
- *next = api_switch_to_primary(current, primary_ret,
+ *next = api_switch_to_primary(current_locked, primary_ret,
VCPU_STATE_BLOCKED);
}
@@ -2210,18 +2214,6 @@
if (size > FFA_MSG_PAYLOAD_MAX) {
return ffa_error(FFA_INVALID_PARAMETERS);
}
- /*
- * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
- * invocation.
- */
- current_locked = vcpu_lock(current);
- is_direct_request_ongoing =
- is_ffa_direct_msg_request_ongoing(current_locked);
- vcpu_unlock(¤t_locked);
-
- if (is_direct_request_ongoing) {
- return ffa_error(FFA_DENIED);
- }
/* Ensure the receiver VM exists. */
to = vm_find(receiver_vm_id);
@@ -2230,17 +2222,35 @@
}
/*
+ * Deny if vCPU is executing in context of an FFA_MSG_SEND_DIRECT_REQ
+ * invocation.
+ */
+ current_locked = vcpu_lock(current);
+ is_direct_request_ongoing =
+ is_ffa_direct_msg_request_ongoing(current_locked);
+
+ if (is_direct_request_ongoing) {
+ ret = ffa_error(FFA_DENIED);
+ goto out_current;
+ }
+
+ /*
* Check that the sender has configured its send buffer. If the tx
* mailbox at from_msg is configured (i.e. from_msg != NULL) then it can
* be safely accessed after releasing the lock since the tx mailbox
* address can only be configured once.
+ * A VM's lock must be acquired before any of its vCPU's lock. Hence,
+ * unlock current vCPU and acquire it immediately after its VM's lock.
*/
+ vcpu_unlock(¤t_locked);
sl_lock(&from->lock);
+ current_locked = vcpu_lock(current);
from_msg = from->mailbox.send;
sl_unlock(&from->lock);
if (from_msg == NULL) {
- return ffa_error(FFA_INVALID_PARAMETERS);
+ ret = ffa_error(FFA_INVALID_PARAMETERS);
+ goto out_current;
}
to_locked = vm_lock(to);
@@ -2256,11 +2266,14 @@
to->mailbox.recv_sender = sender_vm_id;
to->mailbox.recv_func = FFA_MSG_SEND_32;
to->mailbox.state = MAILBOX_STATE_FULL;
- ret = deliver_msg(to_locked, sender_vm_id, current, next);
+ ret = deliver_msg(to_locked, sender_vm_id, current_locked, next);
out:
vm_unlock(&to_locked);
+out_current:
+ vcpu_unlock(¤t_locked);
+
return ret;
}
@@ -2268,11 +2281,12 @@
* Prepare to yield execution back to the VM that allocated cpu cycles and move
* to BLOCKED state.
*/
-struct ffa_value plat_ffa_yield_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
struct vcpu **next,
uint32_t timeout_low,
uint32_t timeout_high)
{
+ struct vcpu *current = current_locked.vcpu;
struct ffa_value ret = {
.func = FFA_YIELD_32,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
@@ -2283,7 +2297,7 @@
/*
* Return execution to primary VM.
*/
- *next = api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
+ *next = api_switch_to_primary(current_locked, ret, VCPU_STATE_BLOCKED);
return (struct ffa_value){.func = FFA_SUCCESS_32};
}
diff --git a/src/arch/aarch64/plat/ffa/spmc.c b/src/arch/aarch64/plat/ffa/spmc.c
index bd497bd..85a1438 100644
--- a/src/arch/aarch64/plat/ffa/spmc.c
+++ b/src/arch/aarch64/plat/ffa/spmc.c
@@ -282,10 +282,12 @@
return result;
}
-static bool is_predecessor_in_call_chain(struct vcpu *current,
- struct vcpu *target)
+static bool is_predecessor_in_call_chain(struct vcpu_locked current_locked,
+ struct vcpu_locked target_locked)
{
struct vcpu *prev_node;
+ struct vcpu *current = current_locked.vcpu;
+ struct vcpu *target = target_locked.vcpu;
assert(current != NULL);
assert(target != NULL);
@@ -309,7 +311,8 @@
* Validates the Runtime model for FFA_RUN. Refer to section 7.2 of the FF-A
* v1.1 EAC0 spec.
*/
-static bool plat_ffa_check_rtm_ffa_run(struct vcpu *current, struct vcpu *vcpu,
+static bool plat_ffa_check_rtm_ffa_run(struct vcpu_locked current_locked,
+ struct vcpu_locked locked_vcpu,
uint32_t func,
enum vcpu_state *next_state)
{
@@ -319,7 +322,7 @@
/* Fall through. */
case FFA_RUN_32: {
/* Rules 1,2 section 7.2 EAC0 spec. */
- if (is_predecessor_in_call_chain(current, vcpu)) {
+ if (is_predecessor_in_call_chain(current_locked, locked_vcpu)) {
return false;
}
*next_state = VCPU_STATE_BLOCKED;
@@ -346,8 +349,8 @@
* Validates the Runtime model for FFA_MSG_SEND_DIRECT_REQ. Refer to section 7.3
* of the FF-A v1.1 EAC0 spec.
*/
-static bool plat_ffa_check_rtm_ffa_dir_req(struct vcpu *current,
- struct vcpu *vcpu,
+static bool plat_ffa_check_rtm_ffa_dir_req(struct vcpu_locked current_locked,
+ struct vcpu_locked locked_vcpu,
ffa_vm_id_t receiver_vm_id,
uint32_t func,
enum vcpu_state *next_state)
@@ -358,7 +361,7 @@
/* Fall through. */
case FFA_RUN_32: {
/* Rules 1,2. */
- if (is_predecessor_in_call_chain(current, vcpu)) {
+ if (is_predecessor_in_call_chain(current_locked, locked_vcpu)) {
return false;
}
@@ -368,7 +371,8 @@
case FFA_MSG_SEND_DIRECT_RESP_64:
case FFA_MSG_SEND_DIRECT_RESP_32: {
/* Rule 3. */
- if (current->direct_request_origin_vm_id == receiver_vm_id) {
+ if (current_locked.vcpu->direct_request_origin_vm_id ==
+ receiver_vm_id) {
*next_state = VCPU_STATE_WAITING;
return true;
}
@@ -391,10 +395,14 @@
* Validates the Runtime model for Secure interrupt handling. Refer to section
* 7.4 of the FF-A v1.1 EAC0 spec.
*/
-static bool plat_ffa_check_rtm_sec_interrupt(struct vcpu *current,
- struct vcpu *vcpu, uint32_t func,
+static bool plat_ffa_check_rtm_sec_interrupt(struct vcpu_locked current_locked,
+ struct vcpu_locked locked_vcpu,
+ uint32_t func,
enum vcpu_state *next_state)
{
+ struct vcpu *current = current_locked.vcpu;
+ struct vcpu *vcpu = locked_vcpu.vcpu;
+
CHECK(current->scheduling_mode == SPMC_MODE);
switch (func) {
@@ -433,12 +441,15 @@
* Validates the Runtime model for SP initialization. Refer to section 7.5 of
* the FF-A v1.1 EAC0 spec.
*/
-static bool plat_ffa_check_rtm_sp_init(struct vcpu *vcpu, uint32_t func,
+static bool plat_ffa_check_rtm_sp_init(struct vcpu_locked locked_vcpu,
+ uint32_t func,
enum vcpu_state *next_state)
{
switch (func) {
case FFA_MSG_SEND_DIRECT_REQ_64:
case FFA_MSG_SEND_DIRECT_REQ_32: {
+ struct vcpu *vcpu = locked_vcpu.vcpu;
+
assert(vcpu != NULL);
/* Rule 1. */
if (vcpu->rt_model != RTM_SP_INIT) {
@@ -473,13 +484,15 @@
* the current vcpu would transition upon the FF-A ABI invocation as determined
* by the Partition runtime model.
*/
-bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
+bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
ffa_vm_id_t vm_id,
ffa_vm_id_t receiver_vm_id,
- struct vcpu *vcpu, uint32_t func,
+ struct vcpu_locked locked_vcpu,
+ uint32_t func,
enum vcpu_state *next_state)
{
bool allowed = false;
+ struct vcpu *current = current_locked.vcpu;
assert(current != NULL);
@@ -490,19 +503,21 @@
switch (current->rt_model) {
case RTM_FFA_RUN:
- allowed = plat_ffa_check_rtm_ffa_run(current, vcpu, func,
- next_state);
+ allowed = plat_ffa_check_rtm_ffa_run(
+ current_locked, locked_vcpu, func, next_state);
break;
case RTM_FFA_DIR_REQ:
allowed = plat_ffa_check_rtm_ffa_dir_req(
- current, vcpu, receiver_vm_id, func, next_state);
+ current_locked, locked_vcpu, receiver_vm_id, func,
+ next_state);
break;
case RTM_SEC_INTERRUPT:
- allowed = plat_ffa_check_rtm_sec_interrupt(current, vcpu, func,
- next_state);
+ allowed = plat_ffa_check_rtm_sec_interrupt(
+ current_locked, locked_vcpu, func, next_state);
break;
case RTM_SP_INIT:
- allowed = plat_ffa_check_rtm_sp_init(vcpu, func, next_state);
+ allowed = plat_ffa_check_rtm_sp_init(locked_vcpu, func,
+ next_state);
break;
default:
dlog_error("Illegal Runtime Model specified by SP%x on CPU%x\n",
@@ -1070,19 +1085,21 @@
/**
* Check if current VM can resume target VM using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
- struct vcpu **next)
+bool plat_ffa_run_checks(struct vcpu_locked current_locked,
+ ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
/*
* Under the Partition runtime model specified in FF-A v1.1-Beta0 spec,
* SP can invoke FFA_RUN to resume target SP.
*/
struct vcpu *target_vcpu;
+ struct vcpu *current = current_locked.vcpu;
bool ret = true;
struct vm *vm;
- struct two_vcpu_locked vcpus_locked;
+ struct vcpu_locked target_locked;
uint8_t priority_mask;
+ struct two_vcpu_locked vcpus_locked;
vm = vm_find(target_vm_id);
if (vm == NULL) {
@@ -1097,8 +1114,12 @@
target_vcpu = api_ffa_get_vm_vcpu(vm, current);
- /* Lock both vCPUs at once. */
+ vcpu_unlock(¤t_locked);
+
+ /* Lock both vCPUs at once to avoid deadlock. */
vcpus_locked = vcpu_lock_both(current, target_vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ target_locked = vcpus_locked.vcpu2;
/* Only the primary VM can turn ON a vCPU that is currently OFF. */
if (current->vm->id != HF_PRIMARY_VM_ID &&
@@ -1130,7 +1151,7 @@
goto out;
}
- vcpu_secondary_reset_and_start(vcpus_locked.vcpu2, vm->secondary_ep, 0);
+ vcpu_secondary_reset_and_start(target_locked, vm->secondary_ep, 0);
if (vm_id_is_current_world(current->vm->id)) {
/*
@@ -1179,8 +1200,7 @@
* Clear fields corresponding to secure interrupt
* handling.
*/
- plat_ffa_reset_secure_interrupt_flags(
- vcpus_locked.vcpu1);
+ plat_ffa_reset_secure_interrupt_flags(current_locked);
}
}
@@ -1210,8 +1230,7 @@
* vCPU at that moment.
*/
assert(target_vcpu->state == VCPU_STATE_PREEMPTED);
- assert(vcpu_interrupt_count_get(vcpus_locked.vcpu2) >
- 0);
+ assert(vcpu_interrupt_count_get(target_locked) > 0);
assert(target_vcpu->secure_interrupt_deactivated);
/*
@@ -1236,8 +1255,7 @@
}
out:
- sl_unlock(&target_vcpu->lock);
- sl_unlock(¤t->lock);
+ vcpu_unlock(&target_locked);
return ret;
}
@@ -1351,28 +1369,35 @@
* simplifies the interrupt management implementation in SPMC.
*/
static struct vcpu_locked plat_ffa_secure_interrupt_prepare(
- struct vcpu *current, uint32_t *int_id)
+ struct vcpu_locked current_locked, uint32_t *int_id)
{
- struct vcpu_locked current_vcpu_locked;
struct vcpu_locked target_vcpu_locked;
struct vcpu *target_vcpu;
+ struct vcpu *current = current_locked.vcpu;
uint32_t id;
uint8_t priority_mask;
+ struct two_vcpu_locked vcpus_locked;
/* Find pending interrupt id. This also activates the interrupt. */
id = plat_interrupts_get_pending_interrupt_id();
target_vcpu = plat_ffa_find_target_vcpu(current, id);
- /* Update the state of current vCPU if it belongs to an SP. */
- current_vcpu_locked = vcpu_lock(current);
+ if (target_vcpu == current) {
+ current_locked = vcpu_lock(current);
+ target_vcpu_locked = current_locked;
+ } else {
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(current, target_vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ target_vcpu_locked = vcpus_locked.vcpu2;
+ }
+ /* Update the state of current vCPU if it belongs to an SP. */
if (vm_id_is_current_world(current->vm->id)) {
current->state = VCPU_STATE_PREEMPTED;
}
- vcpu_unlock(¤t_vcpu_locked);
-
/*
* TODO: Design limitation. Current implementation does not support
* handling a secure interrupt while currently handling a secure
@@ -1383,8 +1408,6 @@
priority_mask = plat_interrupts_get_priority_mask();
plat_interrupts_set_priority_mask(0x0);
- target_vcpu_locked = vcpu_lock(target_vcpu);
-
/* Save current value of priority mask. */
target_vcpu->priority_mask = priority_mask;
CHECK(!target_vcpu->processing_secure_interrupt);
@@ -1399,8 +1422,8 @@
/* Inject this interrupt as a vIRQ to the target SP context. */
/* TODO: check api_interrupt_inject_locked return value. */
- (void)api_interrupt_inject_locked(target_vcpu_locked, id, current,
- NULL);
+ (void)api_interrupt_inject_locked(target_vcpu_locked, id,
+ current_locked, NULL);
*int_id = id;
return target_vcpu_locked;
}
@@ -1472,8 +1495,9 @@
* Helper for secure interrupt signaling for a S-EL1 SP.
*/
static void plat_ffa_signal_secure_interrupt_sel1(
- struct vcpu *current, struct vcpu_locked target_vcpu_locked,
- uint32_t id, struct vcpu **next, bool from_normal_world)
+ struct vcpu_locked current_locked,
+ struct vcpu_locked target_vcpu_locked, uint32_t id, struct vcpu **next,
+ bool from_normal_world)
{
struct vcpu *target_vcpu = target_vcpu_locked.vcpu;
struct ffa_value args = {
@@ -1537,12 +1561,13 @@
* resumes the target vCPU for handling secure
* interrupt.
*/
- assert(current->scheduling_mode == NWD_MODE);
+ assert(current_locked.vcpu->scheduling_mode ==
+ NWD_MODE);
assert(target_vcpu->scheduling_mode == NWD_MODE);
/* Both must be part of the same call chain. */
- assert(is_predecessor_in_call_chain(current,
- target_vcpu));
+ assert(is_predecessor_in_call_chain(
+ current_locked, target_vcpu_locked));
break;
}
case VCPU_STATE_PREEMPTED:
@@ -1610,14 +1635,15 @@
}
static void plat_ffa_signal_secure_interrupt_sp(
- struct vcpu *current, struct vcpu_locked target_vcpu_locked,
- uint32_t id, struct vcpu **next, bool from_normal_world)
+ struct vcpu_locked current_locked,
+ struct vcpu_locked target_vcpu_locked, uint32_t id, struct vcpu **next,
+ bool from_normal_world)
{
if (target_vcpu_locked.vcpu->vm->el0_partition) {
plat_ffa_signal_secure_interrupt_sel0(target_vcpu_locked, id,
next);
} else {
- plat_ffa_signal_secure_interrupt_sel1(current,
+ plat_ffa_signal_secure_interrupt_sel1(current_locked,
target_vcpu_locked, id,
next, from_normal_world);
}
@@ -1632,13 +1658,17 @@
struct vcpu *current, struct vcpu **next)
{
struct vcpu_locked target_vcpu_locked;
+ struct vcpu_locked current_locked = {
+ .vcpu = current,
+ };
struct ffa_value ffa_ret = ffa_error(FFA_NOT_SUPPORTED);
uint32_t id;
bool is_el0_partition;
/* Secure interrupt triggered while execution is in SWd. */
CHECK(vm_id_is_current_world(current->vm->id));
- target_vcpu_locked = plat_ffa_secure_interrupt_prepare(current, &id);
+ target_vcpu_locked =
+ plat_ffa_secure_interrupt_prepare(current_locked, &id);
is_el0_partition = target_vcpu_locked.vcpu->vm->el0_partition;
if (current == target_vcpu_locked.vcpu && !is_el0_partition) {
@@ -1665,8 +1695,8 @@
*/
current->preempted_vcpu = NULL;
} else {
- plat_ffa_signal_secure_interrupt_sp(current, target_vcpu_locked,
- id, next, false);
+ plat_ffa_signal_secure_interrupt_sp(
+ current_locked, target_vcpu_locked, id, next, false);
/*
* In the scenario where target SP cannot be resumed for
@@ -1681,7 +1711,11 @@
target_vcpu_locked.vcpu->processing_secure_interrupt = true;
target_vcpu_locked.vcpu->current_sec_interrupt_id = id;
- vcpu_unlock(&target_vcpu_locked);
+
+ if (current != target_vcpu_locked.vcpu) {
+ vcpu_unlock(&target_vcpu_locked);
+ }
+ vcpu_unlock(¤t_locked);
return ffa_ret;
}
@@ -1697,6 +1731,9 @@
struct ffa_value ffa_ret = ffa_error(FFA_NOT_SUPPORTED);
uint32_t id;
struct vcpu_locked target_vcpu_locked;
+ struct vcpu_locked current_locked = {
+ .vcpu = current,
+ };
/*
* A malicious SP could invoke a HVC call with FFA_INTERRUPT_32 as
@@ -1706,10 +1743,11 @@
return ffa_error(FFA_DENIED);
}
- target_vcpu_locked = plat_ffa_secure_interrupt_prepare(current, &id);
+ target_vcpu_locked =
+ plat_ffa_secure_interrupt_prepare(current_locked, &id);
- plat_ffa_signal_secure_interrupt_sp(current, target_vcpu_locked, id,
- next, true);
+ plat_ffa_signal_secure_interrupt_sp(current_locked, target_vcpu_locked,
+ id, next, true);
/*
* current refers to other world. target must be a vCPU in the secure
* world.
@@ -1730,6 +1768,7 @@
target_vcpu_locked.vcpu->preempted_vcpu = current;
}
vcpu_unlock(&target_vcpu_locked);
+ vcpu_unlock(¤t_locked);
return ffa_ret;
}
@@ -1768,15 +1807,13 @@
* in SPMC that is processed by SPMD to make the world context switch. Refer
* FF-A v1.1 Beta0 section 14.4.
*/
-struct ffa_value plat_ffa_normal_world_resume(struct vcpu *current,
+struct ffa_value plat_ffa_normal_world_resume(struct vcpu_locked current_locked,
struct vcpu **next)
{
struct ffa_value ffa_ret = (struct ffa_value){.func = FFA_MSG_WAIT_32};
struct ffa_value other_world_ret =
(struct ffa_value){.func = FFA_NORMAL_WORLD_RESUME};
- struct vcpu_locked current_locked;
-
- current_locked = vcpu_lock(current);
+ struct vcpu *current = current_locked.vcpu;
/* Reset the fields tracking secure interrupt processing. */
plat_ffa_reset_secure_interrupt_flags(current_locked);
@@ -1786,12 +1823,10 @@
assert(current->call_chain.prev_node == NULL);
current->state = VCPU_STATE_WAITING;
- vcpu_unlock(¤t_locked);
-
/* Restore interrupt priority mask. */
plat_interrupts_set_priority_mask(current->priority_mask);
- *next = api_switch_to_other_world(current, other_world_ret,
+ *next = api_switch_to_other_world(current_locked, other_world_ret,
VCPU_STATE_WAITING);
/* The next vCPU to be run cannot be null. */
@@ -1809,26 +1844,31 @@
*
* SPM then resumes the original SP that was initially pre-empted.
*/
-struct ffa_value plat_ffa_preempted_vcpu_resume(struct vcpu *current,
- struct vcpu **next)
+static struct ffa_value plat_ffa_preempted_vcpu_resume(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
struct ffa_value ffa_ret = (struct ffa_value){.func = FFA_MSG_WAIT_32};
struct vcpu *target_vcpu;
+ struct vcpu *current = current_locked.vcpu;
+ struct vcpu_locked target_locked;
struct two_vcpu_locked vcpus_locked;
CHECK(current->preempted_vcpu != NULL);
CHECK(current->preempted_vcpu->state == VCPU_STATE_PREEMPTED);
target_vcpu = current->preempted_vcpu;
+ vcpu_unlock(¤t_locked);
- /* Lock both vCPUs at once. */
+ /* Lock both vCPUs at once to avoid deadlock. */
vcpus_locked = vcpu_lock_both(current, target_vcpu);
+ current_locked = vcpus_locked.vcpu1;
+ target_locked = vcpus_locked.vcpu2;
/* Reset the fields tracking secure interrupt processing. */
- plat_ffa_reset_secure_interrupt_flags(vcpus_locked.vcpu1);
+ plat_ffa_reset_secure_interrupt_flags(current_locked);
/* SPMC scheduled call chain is completely unwound. */
- plat_ffa_exit_spmc_schedule_mode(vcpus_locked.vcpu1);
+ plat_ffa_exit_spmc_schedule_mode(current_locked);
assert(current->call_chain.prev_node == NULL);
current->state = VCPU_STATE_WAITING;
@@ -1836,8 +1876,8 @@
/* Mark the registers as unavailable now. */
target_vcpu->regs_available = false;
- sl_unlock(&target_vcpu->lock);
- sl_unlock(¤t->lock);
+
+ vcpu_unlock(&target_locked);
/* Restore interrupt priority mask. */
plat_interrupts_set_priority_mask(current->priority_mask);
@@ -1936,7 +1976,7 @@
}
bool plat_ffa_inject_notification_pending_interrupt(
- struct vcpu_locked target_locked, struct vcpu *current,
+ struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
struct vm *next_vm = target_locked.vcpu->vm;
@@ -1956,7 +1996,7 @@
!vm_notifications_is_npi_injected(receiver_locked)))) {
api_interrupt_inject_locked(target_locked,
HF_NOTIFICATION_PENDING_INTID,
- current, NULL);
+ current_locked, NULL);
vm_notifications_set_npi_injected(receiver_locked, true);
ret = true;
}
@@ -2020,10 +2060,11 @@
return true;
}
-static bool sp_boot_next(struct vcpu *current, struct vcpu **next)
+static bool sp_boot_next(struct vcpu_locked current_locked, struct vcpu **next)
{
static bool spmc_booted = false;
struct vcpu *vcpu_next = NULL;
+ struct vcpu *current = current_locked.vcpu;
if (spmc_booted) {
return false;
@@ -2154,21 +2195,18 @@
* Second, the intercepted direct response message is replayed followed by
* unwinding of the NWd scheduled call chain.
*/
-static struct ffa_value plat_ffa_resume_direct_response(struct vcpu *current,
- struct vcpu **next)
+static struct ffa_value plat_ffa_resume_direct_response(
+ struct vcpu_locked current_locked, struct vcpu **next)
{
ffa_vm_id_t receiver_vm_id;
+ struct vcpu *current = current_locked.vcpu;
struct ffa_value to_ret;
- struct vcpu_locked current_vcpu_locked;
-
- /* Lock current vCPU. */
- current_vcpu_locked = vcpu_lock(current);
/* Reset the fields tracking secure interrupt processing. */
- plat_ffa_reset_secure_interrupt_flags(current_vcpu_locked);
+ plat_ffa_reset_secure_interrupt_flags(current_locked);
/* SPMC scheduled call chain is completely unwound. */
- plat_ffa_exit_spmc_schedule_mode(current_vcpu_locked);
+ plat_ffa_exit_spmc_schedule_mode(current_locked);
/* Restore interrupt priority mask. */
plat_interrupts_set_priority_mask(current->priority_mask);
@@ -2187,10 +2225,9 @@
/* Clear direct request origin for the caller. */
current->direct_request_origin_vm_id = HF_INVALID_VM_ID;
- vcpu_unlock(¤t_vcpu_locked);
- api_ffa_resume_direct_resp_target(current, next, receiver_vm_id, to_ret,
- true);
+ api_ffa_resume_direct_resp_target(current_locked, next, receiver_vm_id,
+ to_ret, true);
plat_ffa_vcpu_allow_interrupts(current);
@@ -2203,13 +2240,14 @@
* from RUNNING to WAITING for the following Partition runtime models:
* RTM_FFA_RUN, RTM_SEC_INTERRUPT, RTM_SP_INIT.
*/
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
struct vcpu **next)
{
struct ffa_value ret_args =
(struct ffa_value){.func = FFA_INTERRUPT_32};
+ struct vcpu *current = current_locked.vcpu;
- if (sp_boot_next(current, next)) {
+ if (sp_boot_next(current_locked, next)) {
return ret_args;
}
@@ -2233,16 +2271,18 @@
if (current->direct_resp_intercepted) {
assert(current->vm->el0_partition);
- return plat_ffa_resume_direct_response(current, next);
+ return plat_ffa_resume_direct_response(current_locked,
+ next);
}
/* Secure interrupt pre-empted normal world. */
if (current->preempted_vcpu->vm->id == HF_OTHER_WORLD_ID) {
- return plat_ffa_normal_world_resume(current, next);
+ return plat_ffa_normal_world_resume(current_locked,
+ next);
}
/* Secure interrupt pre-empted an SP. Resume it. */
- return plat_ffa_preempted_vcpu_resume(current, next);
+ return plat_ffa_preempted_vcpu_resume(current_locked, next);
}
/*
@@ -2258,14 +2298,12 @@
* The vCPU of an SP on secondary CPUs will invoke FFA_MSG_WAIT
* to indicate successful initialization to SPMC.
*/
- sl_lock(¤t->lock);
current->scheduling_mode = NONE;
current->rt_model = RTM_NONE;
- sl_unlock(¤t->lock);
/* Relinquish control back to the NWd. */
*next = api_switch_to_other_world(
- current, (struct ffa_value){.func = FFA_MSG_WAIT_32},
+ current_locked, (struct ffa_value){.func = FFA_MSG_WAIT_32},
VCPU_STATE_WAITING);
return ret_args;
@@ -2274,8 +2312,6 @@
struct vcpu *plat_ffa_unwind_nwd_call_chain_interrupt(struct vcpu *current_vcpu)
{
struct vcpu *next;
- struct vcpu_locked current_vcpu_locked;
- struct vcpu_locked next_vcpu_locked;
struct ffa_value ret = {
.func = FFA_INTERRUPT_32,
.arg1 = ffa_vm_vcpu(current_vcpu->vm->id,
@@ -2304,8 +2340,14 @@
next = current_vcpu->call_chain.prev_node;
CHECK(next != NULL);
+ /*
+ * Lock both vCPUs. Strictly speaking, it may not be necessary since
+ * next is guaranteed to be in BLOCKED state as it is the predecessor of
+ * the current vCPU in the present call chain.
+ */
+ vcpu_lock_both(current_vcpu, next);
+
/* Removing a node from an existing call chain. */
- current_vcpu_locked = vcpu_lock(current_vcpu);
current_vcpu->call_chain.prev_node = NULL;
current_vcpu->state = VCPU_STATE_PREEMPTED;
@@ -2316,10 +2358,6 @@
* are not changed here.
*/
- vcpu_unlock(¤t_vcpu_locked);
-
- /* Lock next vcpu. */
- next_vcpu_locked = vcpu_lock(next);
assert(next->state == VCPU_STATE_BLOCKED);
next->state = VCPU_STATE_RUNNING;
assert(next->call_chain.next_node == current_vcpu);
@@ -2331,7 +2369,8 @@
/* Set the return value for the target VM. */
arch_regs_set_retval(&next->regs, ret);
- vcpu_unlock(&next_vcpu_locked);
+ sl_unlock(&next->lock);
+ sl_unlock(¤t_vcpu->lock);
return next;
}
@@ -2369,14 +2408,11 @@
* Initialize the scheduling mode and/or Partition Runtime model of the target
* SP upon being resumed by an FFA_RUN ABI.
*/
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
struct vcpu_locked target_locked)
{
- struct vcpu_locked current_vcpu_locked;
struct vcpu *vcpu = target_locked.vcpu;
-
- /* Lock current vCPU now. */
- current_vcpu_locked = vcpu_lock(current);
+ struct vcpu *current = current_locked.vcpu;
/*
* Scenario 1 in Table 8.4; Therefore SPMC could be resuming a vCPU
@@ -2401,8 +2437,6 @@
}
plat_ffa_vcpu_queue_interrupts(target_locked);
-
- vcpu_unlock(¤t_vcpu_locked);
}
/*
@@ -2429,7 +2463,7 @@
receiver_vcpu->scheduling_mode = NWD_MODE;
} else {
/* Adding a new node to an existing call chain. */
- vcpu_call_chain_extend(current, receiver_vcpu);
+ vcpu_call_chain_extend(current_locked, receiver_vcpu_locked);
receiver_vcpu->scheduling_mode = current->scheduling_mode;
}
plat_ffa_vcpu_queue_interrupts(receiver_vcpu_locked);
@@ -2439,13 +2473,12 @@
* Unwind the present call chain upon the invocation of
* FFA_MSG_SEND_DIRECT_RESP ABI.
*/
-void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
- struct vcpu *next)
+void plat_ffa_unwind_call_chain_ffa_direct_resp(
+ struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
+ struct vcpu *next = next_locked.vcpu;
ffa_vm_id_t receiver_vm_id = next->vm->id;
-
- /* Lock both vCPUs at once. */
- vcpu_lock_both(current, next);
+ struct vcpu *current = current_locked.vcpu;
assert(current->call_chain.next_node == NULL);
current->scheduling_mode = NONE;
@@ -2459,11 +2492,8 @@
assert(current->call_chain.prev_node == NULL);
} else {
/* Removing a node from an existing call chain. */
- vcpu_call_chain_remove_node(current, next);
+ vcpu_call_chain_remove_node(current_locked, next_locked);
}
-
- sl_unlock(&next->lock);
- sl_unlock(¤t->lock);
}
static void plat_ffa_enable_virtual_maintenance_interrupts(
@@ -2602,9 +2632,8 @@
return ffa_error(FFA_INVALID_PARAMETERS);
}
-bool plat_ffa_is_direct_response_interrupted(struct vcpu *current)
+bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked)
{
- struct vcpu_locked current_locked;
bool ret;
/*
@@ -2618,7 +2647,7 @@
* a pending interrupt and allow it to be handled before sending the
* direct response.
*/
- current_locked = vcpu_lock(current);
+ struct vcpu *current = current_locked.vcpu;
/*
* An S-EL0 partition can handle virtual secure interrupt only in
@@ -2626,8 +2655,7 @@
* virtual interrupt during FFA_MSG_SEND_DIRECT_RESP invocation.
*/
if (current->vm->el0_partition) {
- ret = false;
- goto out;
+ return false;
}
/*
@@ -2656,8 +2684,7 @@
ret = false;
}
-out:
- vcpu_unlock(¤t_locked);
+
return ret;
}
@@ -2680,12 +2707,13 @@
* execution context by the SPMC to handle secure virtual interrupt, then
* FFA_YIELD invocation is essentially a no-op.
*/
-struct ffa_value plat_ffa_yield_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
struct vcpu **next,
uint32_t timeout_low,
uint32_t timeout_high)
{
struct ffa_value ret_args = (struct ffa_value){.func = FFA_SUCCESS_32};
+ struct vcpu *current = current_locked.vcpu;
struct ffa_value ret = {
.func = FFA_YIELD_32,
.arg1 = ffa_vm_vcpu(current->vm->id, vcpu_index(current)),
@@ -2702,7 +2730,7 @@
* Relinquish cycles to the NWd VM that sent direct
* request message to the current SP.
*/
- *next = api_switch_to_other_world(current, ret,
+ *next = api_switch_to_other_world(current_locked, ret,
VCPU_STATE_BLOCKED);
} else {
/*
@@ -2710,7 +2738,7 @@
* message to the current SP.
*/
*next = api_switch_to_vm(
- current, ret, VCPU_STATE_BLOCKED,
+ current_locked, ret, VCPU_STATE_BLOCKED,
current->direct_request_origin_vm_id);
}
break;
@@ -2733,7 +2761,8 @@
}
default:
CHECK(current->rt_model == RTM_FFA_RUN);
- *next = api_switch_to_primary(current, ret, VCPU_STATE_BLOCKED);
+ *next = api_switch_to_primary(current_locked, ret,
+ VCPU_STATE_BLOCKED);
break;
}
diff --git a/src/arch/aarch64/plat/psci/spmc.c b/src/arch/aarch64/plat/psci/spmc.c
index d48224d..5152226 100644
--- a/src/arch/aarch64/plat/psci/spmc.c
+++ b/src/arch/aarch64/plat/psci/spmc.c
@@ -60,9 +60,12 @@
struct vcpu *plat_psci_cpu_resume(struct cpu *c)
{
struct vcpu_locked vcpu_locked;
+ struct vcpu_locked other_world_vcpu_locked;
struct vcpu *vcpu = vcpu_get_boot_vcpu();
struct vm *vm = vcpu->vm;
struct vm *other_world_vm;
+ struct vcpu *other_world_vcpu;
+ struct two_vcpu_locked vcpus_locked;
cpu_on(c);
@@ -78,10 +81,19 @@
vm_power_management_cpu_on_requested(vm) == false) {
other_world_vm = vm_find(HF_OTHER_WORLD_ID);
CHECK(other_world_vm != NULL);
+ other_world_vcpu = vm_get_vcpu(other_world_vm, cpu_index(c));
+ vcpu_unlock(&vcpu_locked);
+
+ /* Lock both vCPUs at once to avoid deadlock. */
+ vcpus_locked = vcpu_lock_both(vcpu, other_world_vcpu);
+ vcpu_locked = vcpus_locked.vcpu1;
+ other_world_vcpu_locked = vcpus_locked.vcpu2;
+
vcpu = api_switch_to_other_world(
- vm_get_vcpu(other_world_vm, cpu_index(c)),
+ other_world_vcpu_locked,
(struct ffa_value){.func = FFA_MSG_WAIT_32},
VCPU_STATE_WAITING);
+ vcpu_unlock(&other_world_vcpu_locked);
goto exit;
}
diff --git a/src/arch/fake/hypervisor/ffa.c b/src/arch/fake/hypervisor/ffa.c
index bee008c..d3de6f3 100644
--- a/src/arch/fake/hypervisor/ffa.c
+++ b/src/arch/fake/hypervisor/ffa.c
@@ -336,11 +336,11 @@
/**
* Check if current VM can resume target VM/SP using FFA_RUN ABI.
*/
-bool plat_ffa_run_checks(struct vcpu *current, ffa_vm_id_t target_vm_id,
- ffa_vcpu_index_t vcpu_idx, struct ffa_value *run_ret,
- struct vcpu **next)
+bool plat_ffa_run_checks(struct vcpu_locked current_locked,
+ ffa_vm_id_t target_vm_id, ffa_vcpu_index_t vcpu_idx,
+ struct ffa_value *run_ret, struct vcpu **next)
{
- (void)current;
+ (void)current_locked;
(void)target_vm_id;
(void)run_ret;
(void)next;
@@ -376,11 +376,11 @@
}
bool plat_ffa_inject_notification_pending_interrupt(
- struct vcpu_locked target_locked, struct vcpu *current,
+ struct vcpu_locked target_locked, struct vcpu_locked current_locked,
struct vm_locked receiver_locked)
{
(void)target_locked;
- (void)current;
+ (void)current_locked;
(void)receiver_locked;
return false;
@@ -418,38 +418,38 @@
return false;
}
-struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_msg_wait_prepare(struct vcpu_locked current_locked,
struct vcpu **next)
{
- (void)current;
+ (void)current_locked;
(void)next;
return (struct ffa_value){.func = FFA_INTERRUPT_32};
}
-bool plat_ffa_check_runtime_state_transition(struct vcpu *current,
+bool plat_ffa_check_runtime_state_transition(struct vcpu_locked current_locked,
ffa_vm_id_t vm_id,
ffa_vm_id_t receiver_vm_id,
- struct vcpu *receiver_vcpu,
+ struct vcpu_locked receiver_locked,
uint32_t func, // NOLINTNEXTLINE
enum vcpu_state *next_state)
{
/* Perform state transition checks only for Secure Partitions. */
- (void)current;
+ (void)current_locked;
(void)vm_id;
(void)receiver_vm_id;
- (void)receiver_vcpu;
+ (void)receiver_locked;
(void)func;
(void)next_state;
return true;
}
-void plat_ffa_init_schedule_mode_ffa_run(struct vcpu *current,
+void plat_ffa_init_schedule_mode_ffa_run(struct vcpu_locked current_locked,
struct vcpu_locked target_locked)
{
/* Scheduling mode not supported in the Hypervisor/VMs. */
- (void)current;
+ (void)current_locked;
(void)target_locked;
}
@@ -462,12 +462,12 @@
(void)receiver_vcpu_locked;
}
-void plat_ffa_unwind_call_chain_ffa_direct_resp(struct vcpu *current,
- struct vcpu *next)
+void plat_ffa_unwind_call_chain_ffa_direct_resp(
+ struct vcpu_locked current_locked, struct vcpu_locked next_locked)
{
/* Calls chains not supported in the Hypervisor/VMs. */
- (void)current;
- (void)next;
+ (void)current_locked;
+ (void)next_locked;
}
bool plat_ffa_intercept_direct_response(struct vcpu_locked current_locked,
@@ -494,9 +494,9 @@
(void)vm_locked;
}
-bool plat_ffa_is_direct_response_interrupted(struct vcpu *current)
+bool plat_ffa_is_direct_response_interrupted(struct vcpu_locked current_locked)
{
- (void)current;
+ (void)current_locked;
return false;
}
@@ -565,12 +565,12 @@
return ffa_error(FFA_NOT_SUPPORTED);
}
-struct ffa_value plat_ffa_yield_prepare(struct vcpu *current,
+struct ffa_value plat_ffa_yield_prepare(struct vcpu_locked current_locked,
struct vcpu **next,
uint32_t timeout_low,
uint32_t timeout_high)
{
- (void)current;
+ (void)current_locked;
(void)next;
(void)timeout_low;
(void)timeout_high;