Add support for notifying VMs when mailboxes become writable.
This allows VMs to know when they can retry sending messages to another
VM after an attempt fails because the recipient's mailbox is not
available.
Change-Id: Ib20159f7ecf81544e40149edcd663876c58851c7
diff --git a/driver/linux b/driver/linux
index e05702e..cd9fef9 160000
--- a/driver/linux
+++ b/driver/linux
@@ -1 +1 @@
-Subproject commit e05702ed670317545b11ee8e03f4616a029f6a18
+Subproject commit cd9fef9cbe9bcbbdea0f5a5cc96d79f17af81a16
diff --git a/inc/hf/api.h b/inc/hf/api.h
index f3e189a..8357a89 100644
--- a/inc/hf/api.h
+++ b/inc/hf/api.h
@@ -29,14 +29,16 @@
struct hf_vcpu_run_return api_vcpu_run(uint32_t vm_id, uint32_t vcpu_idx,
const struct vcpu *current,
struct vcpu **next);
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
- const struct vcpu *current);
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
+ struct vcpu **next);
int64_t api_mailbox_send(uint32_t vm_id, size_t size, bool notify,
struct vcpu *current, struct vcpu **next);
struct hf_mailbox_receive_return api_mailbox_receive(bool block,
struct vcpu *current,
struct vcpu **next);
-int64_t api_mailbox_clear(const struct vcpu *current);
+int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next);
+int64_t api_mailbox_writable_get(const struct vcpu *current);
+int64_t api_mailbox_waiter_get(uint32_t vm_id, const struct vcpu *current);
struct vcpu *api_preempt(struct vcpu *current);
struct vcpu *api_yield(struct vcpu *current);
diff --git a/inc/hf/vm.h b/inc/hf/vm.h
index 6a140b8..75096a5 100644
--- a/inc/hf/vm.h
+++ b/inc/hf/vm.h
@@ -17,6 +17,7 @@
#pragma once
#include "hf/cpu.h"
+#include "hf/list.h"
#include "hf/mm.h"
#include "hf/mpool.h"
@@ -31,6 +32,23 @@
mailbox_state_read,
};
+struct wait_entry {
+ /** The VM that is waiting for a mailbox to become writable. */
+ struct vm *waiting_vm;
+
+ /**
+ * Links used to add entry to a VM's waiter_list. This is protected by
+ * the notifying VM's lock.
+ */
+ struct list_entry wait_links;
+
+ /**
+ * Links used to add entry to a VM's ready_list. This is protected by
+ * the waiting VM's lock.
+ */
+ struct list_entry ready_links;
+};
+
struct mailbox {
enum mailbox_state state;
uint32_t recv_from_id;
@@ -38,6 +56,20 @@
void *recv;
const void *send;
struct vcpu *recv_waiter;
+
+ /**
+ * List of wait_entry structs representing VMs that want to be notified
+ * when the mailbox becomes writable. Once the mailbox does become
+ * writable, the entry is removed from this list and added to the
+ * waiting VM's ready_list.
+ */
+ struct list_entry waiter_list;
+
+ /**
+ * List of wait_entry structs representing VMs whose mailboxes became
+ * writable since the owner of the mailbox registers for notification.
+ */
+ struct list_entry ready_list;
};
struct vm {
@@ -48,9 +80,19 @@
struct vcpu vcpus[MAX_CPUS];
struct mm_ptable ptable;
struct mailbox mailbox;
+
+ /** Wait entries to be used when waiting on other VM mailboxes. */
+ struct wait_entry wentry[MAX_VMS];
+};
+
+/** Encapsulates a VM whose lock is held. */
+struct vm_locked {
+ struct vm *vm;
};
bool vm_init(uint32_t vcpu_count, struct mpool *ppool, struct vm **new_vm);
uint32_t vm_get_count(void);
struct vm *vm_get(uint32_t id);
void vm_start_vcpu(struct vm *vm, size_t index, ipaddr_t entry, uintreg_t arg);
+void vm_lock(struct vm *vm, struct vm_locked *locked);
+void vm_unlock(struct vm_locked *locked);
diff --git a/inc/vmapi/hf/abi.h b/inc/vmapi/hf/abi.h
index 6f43360..451de41 100644
--- a/inc/vmapi/hf/abi.h
+++ b/inc/vmapi/hf/abi.h
@@ -64,6 +64,13 @@
* `hf_vcpu_run` on it again.
*/
HF_VCPU_RUN_SLEEP,
+
+ /**
+ * The vCPU has made the mailbox writable and there are pending waiters.
+ * The scheduler MUST call hf_mailbox_waiter_get() repeatedly and notify
+ * all waiters by injecting an HF_MAILBOX_WRITABLE_INTID interrupt.
+ */
+ HF_VCPU_RUN_NOTIFY_WAITERS,
};
struct hf_vcpu_run_return {
diff --git a/inc/vmapi/hf/call.h b/inc/vmapi/hf/call.h
index 3922f7a..8014f8c 100644
--- a/inc/vmapi/hf/call.h
+++ b/inc/vmapi/hf/call.h
@@ -23,18 +23,20 @@
/* clang-format off */
/* TODO: Define constants below according to spec. */
-#define HF_VM_GET_ID 0xff00
-#define HF_VM_GET_COUNT 0xff01
-#define HF_VCPU_GET_COUNT 0xff02
-#define HF_VCPU_RUN 0xff03
-#define HF_VCPU_YIELD 0xff04
-#define HF_VM_CONFIGURE 0xff05
-#define HF_MAILBOX_SEND 0xff06
-#define HF_MAILBOX_RECEIVE 0xff07
-#define HF_MAILBOX_CLEAR 0xff08
-#define HF_INTERRUPT_ENABLE 0xff09
-#define HF_INTERRUPT_GET 0xff0a
-#define HF_INTERRUPT_INJECT 0xff0b
+#define HF_VM_GET_ID 0xff00
+#define HF_VM_GET_COUNT 0xff01
+#define HF_VCPU_GET_COUNT 0xff02
+#define HF_VCPU_RUN 0xff03
+#define HF_VCPU_YIELD 0xff04
+#define HF_VM_CONFIGURE 0xff05
+#define HF_MAILBOX_SEND 0xff06
+#define HF_MAILBOX_RECEIVE 0xff07
+#define HF_MAILBOX_CLEAR 0xff08
+#define HF_MAILBOX_WRITABLE_GET 0xff09
+#define HF_MAILBOX_WAITER_GET 0xff0a
+#define HF_INTERRUPT_ENABLE 0xff0b
+#define HF_INTERRUPT_GET 0xff0c
+#define HF_INTERRUPT_INJECT 0xff0d
/** The amount of data that can be sent to a mailbox. */
#define HF_MAILBOX_SIZE 4096
@@ -95,7 +97,11 @@
* Configures the pages to send/receive data through. The pages must not be
* shared.
*
- * Returns 0 on success or -1 or failure.
+ * Returns:
+ * - -1 on failure.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick waiters.
*/
static inline int64_t hf_vm_configure(hf_ipaddr_t send, hf_ipaddr_t recv)
{
@@ -137,8 +143,12 @@
/**
* Clears the caller's mailbox so a new message can be received.
*
- * Returns 0 on success, or -1 if the mailbox hasn't been read or is already
- * empty.
+ * Returns:
+ * - -1 on failure, if the mailbox hasn't been read or is already empty.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick waiters. Waiters should be retrieved by calling
+ * hf_mailbox_waiter_get.
*/
static inline int64_t hf_mailbox_clear(void)
{
@@ -146,6 +156,34 @@
}
/**
+ * Retrieves the next VM whose mailbox became writable. For a VM to be notified
+ * by this function, the caller must have called api_mailbox_send before with
+ * the notify argument set to true, and this call must have failed because the
+ * mailbox was not available.
+ *
+ * It should be called repeatedly to retreive a list of VMs.
+ *
+ * Returns -1 if no VM became writable, or the id of the VM whose mailbox
+ * became writable.
+ */
+static inline int64_t hf_mailbox_writable_get(void)
+{
+ return hf_call(HF_MAILBOX_WRITABLE_GET, 0, 0, 0);
+}
+
+/**
+ * Retrieves the next VM waiting to be notified that the mailbox of the
+ * specified VM became writable. Only primary VMs are allowed to call this.
+ *
+ * Returns -1 if there are no waiters, or the VM id of the next waiter
+ * otherwise.
+ */
+static inline int64_t hf_mailbox_waiter_get(uint32_t vm_id)
+{
+ return hf_call(HF_MAILBOX_WAITER_GET, vm_id, 0, 0);
+}
+
+/**
* Enables or disables a given interrupt ID.
*
* Returns 0 on success, or -1 if the intid is invalid.
diff --git a/inc/vmapi/hf/types.h b/inc/vmapi/hf/types.h
index 697a990..f0b39d9 100644
--- a/inc/vmapi/hf/types.h
+++ b/inc/vmapi/hf/types.h
@@ -45,3 +45,9 @@
/** Interrupt ID returned when there is no interrupt pending. */
#define HF_INVALID_INTID 0xffffffff
+
+/** Interrupt ID indicating the mailbox is readable. */
+#define HF_MAILBOX_READBLE_INTID 1
+
+/** Interrupt ID indicating a mailbox is writable. */
+#define HF_MAILBOX_WRITABLE_INTID 2
diff --git a/src/abi_test.cc b/src/abi_test.cc
index 11ab032..fc64547 100644
--- a/src/abi_test.cc
+++ b/src/abi_test.cc
@@ -211,4 +211,24 @@
EXPECT_THAT(res.size, Eq(0x8badf00d));
}
+/**
+ * Encode a 'notify waiters' response without leaking.
+ */
+TEST(abi, hf_vcpu_run_return_encode_interrupt)
+{
+ struct hf_vcpu_run_return res = dirty_vcpu_run_return();
+ res.code = HF_VCPU_RUN_NOTIFY_WAITERS;
+ EXPECT_THAT(hf_vcpu_run_return_encode(res), Eq(6));
+}
+
+/**
+ * Decode a 'notify waiters' response ignoring the irrelevant bits.
+ */
+TEST(abi, hf_vcpu_run_return_decode_interrupt)
+{
+ struct hf_vcpu_run_return res =
+ hf_vcpu_run_return_decode(0x1a1a1a1a2b2b2b06);
+ EXPECT_THAT(res.code, Eq(HF_VCPU_RUN_NOTIFY_WAITERS));
+}
+
} /* namespace */
diff --git a/src/api.c b/src/api.c
index 7341bfc..08f1e05 100644
--- a/src/api.c
+++ b/src/api.c
@@ -164,6 +164,28 @@
}
/**
+ * Retrieves the next waiter and removes it from the wait list if the VM's
+ * mailbox is in a writable state.
+ */
+static struct wait_entry *api_fetch_waiter(struct vm_locked locked_vm)
+{
+ struct wait_entry *entry;
+ struct vm *vm = locked_vm.vm;
+
+ if (vm->mailbox.state != mailbox_state_empty ||
+ vm->mailbox.recv == NULL || list_empty(&vm->mailbox.waiter_list)) {
+ /* The mailbox is not writable or there are no waiters. */
+ return NULL;
+ }
+
+ /* Remove waiter from the wait list. */
+ entry = CONTAINER_OF(vm->mailbox.waiter_list.next, struct wait_entry,
+ wait_links);
+ list_remove(&entry->wait_links);
+ return entry;
+}
+
+/**
* Prepares the vcpu to run by updating its state and fetching whether a return
* value needs to be forced onto the vCPU.
*/
@@ -281,13 +303,53 @@
}
/**
+ * Determines the value to be returned by api_vm_configure and api_mailbox_clear
+ * after they've succeeded. If a secondary VM is running and there are waiters,
+ * it also switches back to the primary VM for it to wake waiters up.
+ */
+static int64_t api_waiter_result(struct vm_locked locked_vm,
+ struct vcpu *current, struct vcpu **next)
+{
+ struct vm *vm = locked_vm.vm;
+ struct hf_vcpu_run_return ret = {
+ .code = HF_VCPU_RUN_NOTIFY_WAITERS,
+ };
+
+ if (list_empty(&vm->mailbox.waiter_list)) {
+ /* No waiters, nothing else to do. */
+ return 0;
+ }
+
+ if (vm->id == HF_PRIMARY_VM_ID) {
+ /* The caller is the primary VM. Tell it to wake up waiters. */
+ return 1;
+ }
+
+ /*
+ * Switch back to the primary VM, informing it that there are waiters
+ * that need to be notified.
+ */
+ *next = api_switch_to_primary(current, ret, vcpu_state_ready);
+
+ return 0;
+}
+
+/**
* Configures the VM to send/receive data through the specified pages. The pages
* must not be shared.
+ *
+ * Returns:
+ * - -1 on failure.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick waiters. Waiters should be retrieved by calling
+ * hf_mailbox_waiter_get.
*/
-int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv,
- const struct vcpu *current)
+int64_t api_vm_configure(ipaddr_t send, ipaddr_t recv, struct vcpu *current,
+ struct vcpu **next)
{
struct vm *vm = current->vm;
+ struct vm_locked locked;
paddr_t pa_send_begin;
paddr_t pa_send_end;
paddr_t pa_recv_begin;
@@ -315,7 +377,7 @@
return -1;
}
- sl_lock(&vm->lock);
+ vm_lock(vm, &locked);
/* We only allow these to be setup once. */
if (vm->mailbox.send || vm->mailbox.recv) {
@@ -388,9 +450,8 @@
goto fail_undo_all;
}
- /* TODO: Notify any waiters. */
-
- ret = 0;
+ /* Tell caller about waiters, if any. */
+ ret = api_waiter_result(locked, current, next);
goto exit;
/*
@@ -416,7 +477,7 @@
ret = -1;
exit:
- sl_unlock(&vm->lock);
+ vm_unlock(&locked);
return ret;
}
@@ -437,8 +498,6 @@
uint16_t vcpu;
int64_t ret;
- (void)notify;
-
/* Limit the size of transfer. */
if (size > HF_MAILBOX_SIZE) {
return -1;
@@ -471,7 +530,20 @@
if (to->mailbox.state != mailbox_state_empty ||
to->mailbox.recv == NULL) {
- /* Fail if the target isn't currently ready to receive data. */
+ /*
+ * Fail if the target isn't currently ready to receive data,
+ * setting up for notification if requested.
+ */
+ if (notify) {
+ struct wait_entry *entry = ¤t->vm->wentry[vm_id];
+
+ /* Append waiter only if it's not there yet. */
+ if (list_empty(&entry->wait_links)) {
+ list_append(&to->mailbox.waiter_list,
+ &entry->wait_links);
+ }
+ }
+
ret = -1;
goto out;
}
@@ -612,27 +684,110 @@
}
/**
- * Clears the caller's mailbox so that a new message can be received. The caller
- * must have copied out all data they wish to preserve as new messages will
- * overwrite the old and will arrive asynchronously.
+ * Retrieves the next VM whose mailbox became writable. For a VM to be notified
+ * by this function, the caller must have called api_mailbox_send before with
+ * the notify argument set to true, and this call must have failed because the
+ * mailbox was not available.
+ *
+ * It should be called repeatedly to retrieve a list of VMs.
+ *
+ * Returns -1 if no VM became writable, or the id of the VM whose mailbox
+ * became writable.
*/
-int64_t api_mailbox_clear(const struct vcpu *current)
+int64_t api_mailbox_writable_get(const struct vcpu *current)
{
struct vm *vm = current->vm;
+ struct wait_entry *entry;
int64_t ret;
sl_lock(&vm->lock);
+ if (list_empty(&vm->mailbox.ready_list)) {
+ ret = -1;
+ goto exit;
+ }
+
+ entry = CONTAINER_OF(vm->mailbox.ready_list.next, struct wait_entry,
+ ready_links);
+ list_remove(&entry->ready_links);
+ ret = entry - vm->wentry;
+
+exit:
+ sl_unlock(&vm->lock);
+ return ret;
+}
+
+/**
+ * Retrieves the next VM waiting to be notified that the mailbox of the
+ * specified VM became writable. Only primary VMs are allowed to call this.
+ *
+ * Returns -1 if there are no waiters, or the VM id of the next waiter
+ * otherwise.
+ */
+int64_t api_mailbox_waiter_get(uint32_t vm_id, const struct vcpu *current)
+{
+ struct vm *vm;
+ struct vm_locked locked;
+ struct wait_entry *entry;
+ struct vm *waiting_vm;
+
+ /* Only primary VMs are allowed to call this function. */
+ if (current->vm->id != HF_PRIMARY_VM_ID) {
+ return -1;
+ }
+
+ vm = vm_get(vm_id);
+ if (vm == NULL) {
+ return -1;
+ }
+
+ /* Check if there are outstanding notifications from given vm. */
+ vm_lock(vm, &locked);
+ entry = api_fetch_waiter(locked);
+ vm_unlock(&locked);
+
+ if (entry == NULL) {
+ return -1;
+ }
+
+ /* Enqueue notification to waiting VM. */
+ waiting_vm = entry->waiting_vm;
+
+ sl_lock(&waiting_vm->lock);
+ if (list_empty(&entry->ready_links)) {
+ list_append(&waiting_vm->mailbox.ready_list,
+ &entry->ready_links);
+ }
+ sl_unlock(&waiting_vm->lock);
+
+ return waiting_vm->id;
+}
+
+/**
+ * Clears the caller's mailbox so that a new message can be received. The caller
+ * must have copied out all data they wish to preserve as new messages will
+ * overwrite the old and will arrive asynchronously.
+ *
+ * Returns:
+ * - -1 on failure, if the mailbox hasn't been read or is already empty.
+ * - 0 on success if no further action is needed.
+ * - 1 if it was called by the primary VM and the primary VM now needs to wake
+ * up or kick waiters. Waiters should be retrieved by calling
+ * hf_mailbox_waiter_get.
+ */
+int64_t api_mailbox_clear(struct vcpu *current, struct vcpu **next)
+{
+ struct vm *vm = current->vm;
+ struct vm_locked locked;
+ int64_t ret;
+
+ vm_lock(vm, &locked);
if (vm->mailbox.state == mailbox_state_read) {
- ret = 0;
+ ret = api_waiter_result(locked, current, next);
vm->mailbox.state = mailbox_state_empty;
} else {
ret = -1;
}
- sl_unlock(&vm->lock);
-
- if (ret == 0) {
- /* TODO: Notify waiters, if any. */
- }
+ vm_unlock(&locked);
return ret;
}
diff --git a/src/arch/aarch64/handler.c b/src/arch/aarch64/handler.c
index 9fbaa3c..1faef38 100644
--- a/src/arch/aarch64/handler.c
+++ b/src/arch/aarch64/handler.c
@@ -273,7 +273,7 @@
case HF_VM_CONFIGURE:
ret.user_ret = api_vm_configure(ipa_init(arg1), ipa_init(arg2),
- current());
+ current(), &ret.new);
break;
case HF_MAILBOX_SEND:
@@ -287,7 +287,15 @@
break;
case HF_MAILBOX_CLEAR:
- ret.user_ret = api_mailbox_clear(current());
+ ret.user_ret = api_mailbox_clear(current(), &ret.new);
+ break;
+
+ case HF_MAILBOX_WRITABLE_GET:
+ ret.user_ret = api_mailbox_writable_get(current());
+ break;
+
+ case HF_MAILBOX_WAITER_GET:
+ ret.user_ret = api_mailbox_waiter_get(arg1, current());
break;
case HF_INTERRUPT_ENABLE:
diff --git a/src/vm.c b/src/vm.c
index 40aa294..9bc616f 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -38,6 +38,10 @@
memset(vm, 0, sizeof(*vm));
+ list_init(&vm->mailbox.waiter_list);
+ list_init(&vm->mailbox.ready_list);
+ sl_init(&vm->lock);
+
vm->id = vm_count;
vm->vcpu_count = vcpu_count;
vm->mailbox.state = mailbox_state_empty;
@@ -46,6 +50,13 @@
return false;
}
+ /* Initialise waiter entries. */
+ for (i = 0; i < MAX_VMS; i++) {
+ vm->wentry[i].waiting_vm = vm;
+ list_init(&vm->wentry[i].wait_links);
+ list_init(&vm->wentry[i].ready_links);
+ }
+
/* Do basic initialization of vcpus. */
for (i = 0; i < vcpu_count; i++) {
vcpu_init(&vm->vcpus[i], vm);
@@ -72,6 +83,25 @@
return &vms[id];
}
+/**
+ * Locks the given VM and updates `locked` to hold the newly locked vm.
+ */
+void vm_lock(struct vm *vm, struct vm_locked *locked)
+{
+ sl_lock(&vm->lock);
+ locked->vm = vm;
+}
+
+/**
+ * Unlocks a VM previously locked with vm_lock, and updates `locked` to reflect
+ * the fact that the VM is no longer locked.
+ */
+void vm_unlock(struct vm_locked *locked)
+{
+ sl_unlock(&locked->vm->lock);
+ locked->vm = NULL;
+}
+
/* TODO: Shall we use index or id here? */
void vm_start_vcpu(struct vm *vm, size_t index, ipaddr_t entry, uintreg_t arg)
{