Ensure that register state is saved before it's used.
Without this there is a race where a physical CPU is still saving the
contents of registers into a vCPU buffer while another physical CPU
is already trying to restore the state.
Also adding test case that fails without this fix and succeeds with it.
Change-Id: I0badfd61f12385a3d8dc8d09eb298cfe56f2a415
diff --git a/src/api.c b/src/api.c
index 4ab7416..6be8702 100644
--- a/src/api.c
+++ b/src/api.c
@@ -129,6 +129,66 @@
}
/**
+ * This function is called by the architecture-specific context switching
+ * function to indicate that register state for the given vcpu has been saved
+ * and can therefore be used by other pcpus.
+ */
+void api_regs_state_saved(struct vcpu *vcpu)
+{
+ sl_lock(&vcpu->lock);
+ vcpu->regs_available = true;
+ sl_unlock(&vcpu->lock);
+}
+
+/**
+ * Prepares the vcpu to run by updating its state and fetching whether a return
+ * value needs to be forced onto the vCPU.
+ */
+static bool api_vcpu_prepare_run(const struct vcpu *current, struct vcpu *vcpu,
+ struct retval_state *vcpu_retval)
+{
+ bool ret;
+
+ sl_lock(&vcpu->lock);
+ if (vcpu->state != vcpu_state_ready) {
+ ret = false;
+ goto out;
+ }
+
+ vcpu->cpu = current->cpu;
+ vcpu->state = vcpu_state_running;
+
+ /* Fetch return value to inject into vCPU if there is one. */
+ *vcpu_retval = vcpu->retval;
+ if (vcpu_retval->force) {
+ vcpu->retval.force = false;
+ }
+
+ /*
+ * Wait until the registers become available. Care must be taken when
+ * looping on this: it shouldn't be done while holding other locks
+ * to avoid deadlocks.
+ */
+ while (!vcpu->regs_available) {
+ sl_unlock(&vcpu->lock);
+ sl_lock(&vcpu->lock);
+ }
+
+ /*
+ * Mark the registers as unavailable now that we're about to reflect
+ * them onto the real registers. This will also prevent another physical
+ * CPU from trying to read these registers.
+ */
+ vcpu->regs_available = false;
+
+ ret = true;
+
+out:
+ sl_unlock(&vcpu->lock);
+ return ret;
+}
+
+/**
* Runs the given vcpu of the given vm.
*/
struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
@@ -137,6 +197,7 @@
{
struct vm *vm;
struct vcpu *vcpu;
+ struct retval_state vcpu_retval;
struct hf_vcpu_run_return ret = {
.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT,
};
@@ -162,18 +223,20 @@
goto out;
}
+ /* Update state if allowed. */
vcpu = &vm->vcpus[vcpu_idx];
-
- sl_lock(&vcpu->lock);
- if (vcpu->state != vcpu_state_ready) {
+ if (!api_vcpu_prepare_run(current, vcpu, &vcpu_retval)) {
ret.code = HF_VCPU_RUN_WAIT_FOR_INTERRUPT;
- } else {
- vcpu->cpu = current->cpu;
- vcpu->state = vcpu_state_running;
- *next = vcpu;
- ret.code = HF_VCPU_RUN_YIELD;
+ goto out;
}
- sl_unlock(&vcpu->lock);
+
+ *next = vcpu;
+ ret.code = HF_VCPU_RUN_YIELD;
+
+ /* Update return value if one was injected. */
+ if (vcpu_retval.force) {
+ arch_regs_set_retval(&vcpu->regs, vcpu_retval.value);
+ }
out:
return ret;
@@ -353,12 +416,12 @@
to_vcpu->state = vcpu_state_ready;
/* Return from HF_MAILBOX_RECEIVE. */
- arch_regs_set_retval(&to_vcpu->regs,
- hf_mailbox_receive_return_encode((
- struct hf_mailbox_receive_return){
- .vm_id = to->mailbox.recv_from_id,
- .size = size,
- }));
+ to_vcpu->retval.force = true;
+ to_vcpu->retval.value = hf_mailbox_receive_return_encode(
+ (struct hf_mailbox_receive_return){
+ .vm_id = to->mailbox.recv_from_id,
+ .size = size,
+ });
sl_unlock(&to_vcpu->lock);