Use VM ID offset for wait entries to avoid overflow.
Change-Id: I52a1c4a69eb65d7bd7b63988057aa7d4c254ea1e
diff --git a/src/vm.c b/src/vm.c
index da2460b..3d71c51 100644
--- a/src/vm.c
+++ b/src/vm.c
@@ -149,6 +149,30 @@
}
/**
+ * Gets `vm`'s wait entry for waiting on the `for_vm`.
+ */
+struct wait_entry *vm_get_wait_entry(struct vm *vm, spci_vm_id_t for_vm)
+{
+ uint16_t index;
+
+ CHECK(for_vm >= HF_VM_ID_OFFSET);
+ index = for_vm - HF_VM_ID_OFFSET;
+ CHECK(index < MAX_VMS);
+
+ return &vm->wait_entries[index];
+}
+
+/**
+ * Gets the ID of the VM which the given VM's wait entry is for.
+ */
+spci_vm_id_t vm_id_for_wait_entry(struct vm *vm, struct wait_entry *entry)
+{
+ uint16_t index = entry - vm->wait_entries;
+
+ return index + HF_VM_ID_OFFSET;
+}
+
+/**
* Map a range of addresses to the VM in both the MMU and the IOMMU.
*
* mm_vm_defrag should always be called after a series of page table updates,