VHE: Move hcr_el2 from lazy saved region to volatile region.
This patch moves hcr_el2 from the list of registers that are saved
lazily to the volatile region, so that it is saved and restored on every
entry and exit into EL2. This is to prepare for future changes that
require enabling hcr_el2.TGE at every entry into EL2 so that hafnium can
execute in "host" mode when FEAT_VHE is enabled and available.
Change-Id: I1e4570ee07df70eb1b608ad8b26a791bf9a50f2a
Signed-off-by: Raghu Krishnamurthy <raghu.ncstate@gmail.com>
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index b71efe0..eeb9eb1 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -99,7 +99,7 @@
}
}
- r->lazy.hcr_el2 = get_hcr_el2_value(vm_id);
+ r->hcr_el2 = get_hcr_el2_value(vm_id);
r->lazy.cnthctl_el2 = cnthctl;
r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
r->lazy.vmpidr_el2 = vcpu_id;
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 6239c60..9a39031 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -51,6 +51,8 @@
mrs x1, elr_el2
mrs x2, spsr_el2
stp x1, x2, [x18, #VCPU_REGS + 8 * 31]
+ mrs x1, hcr_el2
+ str x1, [x18, #VCPU_REGS + 8 * 33]
.endm
/**
@@ -346,10 +348,6 @@
mrs x23, sp_el1
stp x22, x23, [x28], #16
- mrs x24, par_el1
- mrs x25, hcr_el2
- stp x24, x25, [x28], #16
-
mrs x26, cnthctl_el2
mrs x27, vttbr_el2
stp x26, x27, [x28], #16
@@ -366,6 +364,9 @@
mrs x9, pmintenset_el1
stp x8, x9, [x28], #16
+ mrs x8, par_el1
+ str x8, [x28], #8
+
#if BRANCH_PROTECTION
add x2, x1, #(VCPU_PAC + 16)
mrs x10, APIBKEYLO_EL1
@@ -680,10 +681,6 @@
msr sp_el0, x22
msr sp_el1, x23
- ldp x24, x25, [x28], #16
- msr par_el1, x24
- msr hcr_el2, x25
-
ldp x26, x27, [x28], #16
msr cnthctl_el2, x26
msr vttbr_el2, x27
@@ -716,6 +713,9 @@
msr pmintenclr_el1, x27
msr pmintenset_el1, x9
+ ldr x8, [x28], #8
+ msr par_el1, x8
+
#if BRANCH_PROTECTION
add x2, x0, #(VCPU_PAC + 16)
ldp x10, x11, [x2], #16
@@ -793,6 +793,9 @@
msr elr_el2, x1
msr spsr_el2, x2
+ ldr x1, [x0, #VCPU_REGS + 8 * 33]
+ msr hcr_el2, x1
+
/* Restore x0..x3, which we have used as scratch before. */
ldp x2, x3, [x0, #VCPU_REGS + 8 * 2]
ldp x0, x1, [x0, #VCPU_REGS + 8 * 0]
diff --git a/src/arch/aarch64/hypervisor/feature_id.c b/src/arch/aarch64/hypervisor/feature_id.c
index 8f3c95d..ed3bf8f 100644
--- a/src/arch/aarch64/hypervisor/feature_id.c
+++ b/src/arch/aarch64/hypervisor/feature_id.c
@@ -175,7 +175,7 @@
~(ID_AA64MMFR1_EL1_VH_MASK << ID_AA64MMFR1_EL1_VH_SHIFT);
if (features & HF_FEATURE_RAS) {
- regs->lazy.hcr_el2 |= HCR_EL2_TERR;
+ regs->hcr_el2 |= HCR_EL2_TERR;
vm->arch.tid3_masks.id_aa64mmfr1_el1 &=
~ID_AA64MMFR1_EL1_SPEC_SEI;
vm->arch.tid3_masks.id_aa64pfr0_el1 &= ~ID_AA64PFR0_EL1_RAS;
@@ -221,14 +221,14 @@
}
if (features & HF_FEATURE_LOR) {
- regs->lazy.hcr_el2 |= HCR_EL2_TLOR;
+ regs->hcr_el2 |= HCR_EL2_TLOR;
vm->arch.tid3_masks.id_aa64mmfr1_el1 &= ~ID_AA64MMFR1_EL1_LO;
}
if (features & HF_FEATURE_PAUTH) {
/* APK and API bits *enable* trapping when cleared. */
- regs->lazy.hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
+ regs->hcr_el2 &= ~(HCR_EL2_APK | HCR_EL2_API);
vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPI;
vm->arch.tid3_masks.id_aa64isar1_el1 &= ~ID_AA64ISAR1_EL1_GPA;
diff --git a/src/arch/aarch64/hypervisor/handler.c b/src/arch/aarch64/hypervisor/handler.c
index 368b38d..414167f 100644
--- a/src/arch/aarch64/hypervisor/handler.c
+++ b/src/arch/aarch64/hypervisor/handler.c
@@ -260,9 +260,9 @@
static void set_virtual_irq(struct arch_regs *r, bool enable)
{
if (enable) {
- r->lazy.hcr_el2 |= HCR_EL2_VI;
+ r->hcr_el2 |= HCR_EL2_VI;
} else {
- r->lazy.hcr_el2 &= ~HCR_EL2_VI;
+ r->hcr_el2 &= ~HCR_EL2_VI;
}
}
@@ -271,14 +271,14 @@
*/
static void set_virtual_irq_current(bool enable)
{
- uintreg_t hcr_el2 = read_msr(hcr_el2);
+ uintreg_t hcr_el2 = current()->regs.hcr_el2;
if (enable) {
hcr_el2 |= HCR_EL2_VI;
} else {
hcr_el2 &= ~HCR_EL2_VI;
}
- write_msr(hcr_el2, hcr_el2);
+ current()->regs.hcr_el2 = hcr_el2;
}
/**
@@ -288,9 +288,9 @@
static void set_virtual_fiq(struct arch_regs *r, bool enable)
{
if (enable) {
- r->lazy.hcr_el2 |= HCR_EL2_VF;
+ r->hcr_el2 |= HCR_EL2_VF;
} else {
- r->lazy.hcr_el2 &= ~HCR_EL2_VF;
+ r->hcr_el2 &= ~HCR_EL2_VF;
}
}
@@ -299,14 +299,14 @@
*/
static void set_virtual_fiq_current(bool enable)
{
- uintreg_t hcr_el2 = read_msr(hcr_el2);
+ uintreg_t hcr_el2 = current()->regs.hcr_el2;
if (enable) {
hcr_el2 |= HCR_EL2_VF;
} else {
hcr_el2 &= ~HCR_EL2_VF;
}
- write_msr(hcr_el2, hcr_el2);
+ current()->regs.hcr_el2 = hcr_el2;
}
#if SECURE_WORLD == 1
diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
index b10e45e..a8cdf46 100644
--- a/src/arch/aarch64/inc/hf/arch/types.h
+++ b/src/arch/aarch64/inc/hf/arch/types.h
@@ -79,6 +79,7 @@
uintreg_t r[NUM_GP_REGS];
uintreg_t pc;
uintreg_t spsr;
+ uintreg_t hcr_el2;
/*
* System registers.
@@ -114,8 +115,6 @@
uintreg_t tpidr_el1;
uintreg_t sp_el0;
uintreg_t sp_el1;
- uintreg_t par_el1;
- uintreg_t hcr_el2;
uintreg_t cnthctl_el2;
uintreg_t vttbr_el2;
uintreg_t mdcr_el2;
@@ -124,6 +123,7 @@
uintreg_t pmcr_el0;
uintreg_t pmcntenset_el0;
uintreg_t pmintenset_el1;
+ uintreg_t par_el1;
} lazy;
/* Floating point registers. */