Invalidate TLB if necessary when switching vCPUs.
Bug: 132422393
Change-Id: If5983b9c39c92604a75ea42839b73376577a4f65
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index c580c1a..7de6ad3 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -16,6 +16,7 @@
#include <stdnoreturn.h>
+#include "hf/arch/barriers.h"
#include "hf/arch/init.h"
#include "hf/api.h"
@@ -125,6 +126,55 @@
}
}
+/**
+ * Ensures all explicit memory access and management instructions for
+ * non-shareable normal memory have completed before continuing.
+ */
+static void dsb_nsh(void)
+{
+ __asm__ volatile("dsb nsh");
+}
+
+/**
+ * Invalidate all stage 1 TLB entries on the current (physical) CPU for the
+ * current VMID.
+ */
+static void invalidate_vm_tlb(void)
+{
+ isb();
+ __asm__ volatile("tlbi vmalle1");
+ isb();
+ dsb_nsh();
+}
+
+/**
+ * Invalidates the TLB if a different vCPU is being run than the last vCPU of
+ * the same VM which was run on the current pCPU.
+ *
+ * This is necessary because VMs may (contrary to the architecture
+ * specification) use inconsistent ASIDs across vCPUs. c.f. KVM's similar
+ * workaround:
+ * https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=94d0e5980d6791b9
+ */
+void maybe_invalidate_tlb(struct vcpu *vcpu)
+{
+ size_t current_cpu_index = cpu_index(vcpu->cpu);
+ size_t new_vcpu_index = vcpu_index(vcpu);
+
+ if (vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] !=
+ new_vcpu_index) {
+ /*
+ * The vCPU has changed since the last time this VM was run on
+ * this pCPU, so we need to invalidate the TLB.
+ */
+ invalidate_vm_tlb();
+
+ /* Record the fact that this vCPU is now running on this CPU. */
+ vcpu->vm->arch.last_vcpu_on_cpu[current_cpu_index] =
+ new_vcpu_index;
+ }
+}
+
noreturn void irq_current_exception(uintreg_t elr, uintreg_t spsr)
{
(void)elr;