refactor: memory config moved to vcpu context
This change is motivated by [1] where secure accessible only EL2
registers are no longer saved/restored as part of the EL3 context switch
sequence. This mainly concerns VSTCR_EL2/VSTTBR_EL2.
From Hafnium (SPMC) perspective VTCR_EL2/VSTCR_EL2 are statically
configured once for all at boot time. Provided that VSTCR_EL2 is no
longer maintained as part of EL2 SPMC state, its contents can be lost
upon a core resume from suspend.
This change moves VTCR_EL2/VTTBR_EL2/VSTCR_EL2/VSTTBR_EL2 as part of the
(S-)EL2 vCPU switch sequence. Those become VM/vCPU properties rather
than SPMC's.
In effect VTCR_EL2/VSTCR_EL2 (non-secure and secure IPA spaces
configuration) hold the same values across all vCPUs (whichever the VM).
VTTBR_EL2/VSTTBR_EL2 hold the same values for all vCPUs within a VM
(point to VM non-secure/secure IPA root page tables). Presently this
address is equal for both IPA spaces. In a future change
VTTBR_EL2/VSTTBR_EL2 will be split into different sets of page tables.
Note non-secure VTCR_EL2/VTTBR_EL2 are kept in the EL3 sequence but also
added to vCPU context for the sake of completeness but that may not be
strictly required.
[1] https://review.trustedfirmware.org/c/TF-A/trusted-firmware-a/+/13803
Change-Id: I9a85ab106a81086be07fd93088d6501cae83c43f
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/inc/hf/arch/mm.h b/inc/hf/arch/mm.h
index 043c564..83dabce 100644
--- a/inc/hf/arch/mm.h
+++ b/inc/hf/arch/mm.h
@@ -184,3 +184,7 @@
* Returns the maximum supported PA Range in bits.
*/
uint32_t arch_mm_get_pa_range(void);
+
+uintptr_t arch_mm_get_vtcr_el2(void);
+
+uintptr_t arch_mm_get_vstcr_el2(void);
diff --git a/src/arch/aarch64/hypervisor/cpu.c b/src/arch/aarch64/hypervisor/cpu.c
index 05f422e..c6cebdd 100644
--- a/src/arch/aarch64/hypervisor/cpu.c
+++ b/src/arch/aarch64/hypervisor/cpu.c
@@ -115,7 +115,12 @@
r->spsr = PSR_PE_MODE_EL0T;
} else {
r->ttbr0_el2 = read_msr(ttbr0_el2);
+ r->lazy.vtcr_el2 = arch_mm_get_vtcr_el2();
r->lazy.vttbr_el2 = pa_addr(table) | ((uint64_t)vm_id << 48);
+#if SECURE_WORLD == 1
+ r->lazy.vstcr_el2 = arch_mm_get_vstcr_el2();
+ r->lazy.vsttbr_el2 = pa_addr(table);
+#endif
r->lazy.vmpidr_el2 = vcpu_id;
/* Mask (disable) interrupts and run in EL1h mode. */
r->spsr = PSR_D | PSR_A | PSR_I | PSR_F | PSR_PE_MODE_EL1H;
diff --git a/src/arch/aarch64/hypervisor/exceptions.S b/src/arch/aarch64/hypervisor/exceptions.S
index 61e638d..1861404 100644
--- a/src/arch/aarch64/hypervisor/exceptions.S
+++ b/src/arch/aarch64/hypervisor/exceptions.S
@@ -356,9 +356,17 @@
mrs x23, sp_el1
stp x22, x23, [x28], #16
- mrs x26, cnthctl_el2
- mrs x27, vttbr_el2
+ mrs x24, vtcr_el2
+ mrs x25, vttbr_el2
+ stp x24, x25, [x28], #16
+
+#if SECURE_WORLD == 1
+ mrs x26, MSR_VSTCR_EL2
+ mrs x27, MSR_VSTTBR_EL2
stp x26, x27, [x28], #16
+#else
+ stp xzr, xzr, [x28], #16
+#endif
mrs x4, mdcr_el2
mrs x5, mdscr_el1
@@ -372,8 +380,9 @@
mrs x9, pmintenset_el1
stp x8, x9, [x28], #16
- mrs x8, par_el1
- str x8, [x28], #8
+ mrs x10, cnthctl_el2
+ mrs x11, par_el1
+ stp x10, x11, [x28], #16
#if BRANCH_PROTECTION
add x2, x1, #(VCPU_PAC + 16)
@@ -696,11 +705,13 @@
msr sp_el0, x22
msr sp_el1, x23
- ldp x26, x27, [x28], #16
- msr cnthctl_el2, x26
- msr vttbr_el2, x27
+ ldp x24, x25, [x28], #16
+ msr vtcr_el2, x24
+ msr vttbr_el2, x25
+ ldp x26, x27, [x28], #16
#if SECURE_WORLD == 1
+ msr MSR_VSTCR_EL2, x26
msr MSR_VSTTBR_EL2, x27
#endif
@@ -728,8 +739,9 @@
msr pmintenclr_el1, x27
msr pmintenset_el1, x9
- ldr x8, [x28], #8
- msr par_el1, x8
+ ldp x10, x11, [x28], #16
+ msr cnthctl_el2, x10
+ msr par_el1, x11
#if BRANCH_PROTECTION
add x2, x0, #(VCPU_PAC + 16)
diff --git a/src/arch/aarch64/hypervisor/hypervisor_entry.S b/src/arch/aarch64/hypervisor/hypervisor_entry.S
index a6b89a4..eae9b03 100644
--- a/src/arch/aarch64/hypervisor/hypervisor_entry.S
+++ b/src/arch/aarch64/hypervisor/hypervisor_entry.S
@@ -191,27 +191,21 @@
adrp x7, arch_mm_config
add x7, x7, :lo12:arch_mm_config
- ldp x1, x2, [x7]
- ldp x3, x4, [x7, #16]
- ldp x5, x6, [x7, #32]
- ldr x7, [x7, #48]
+ ldp x1, x2, [x7] /* x1: ttbr0_el2, x2: mair_el2 */
+ ldp x3, x4, [x7, #16] /* x3: tcr_el2, x4: sctlr_el2 */
+ ldp x5, xzr, [x7, #32] /* x5: hcr_el2 */
/*
* Set hcr_el2 before tcr_el2, since hcr_el2.e2h may be set, which changes
* the definition of tcr_el2.
*/
- msr hcr_el2, x7
+ msr hcr_el2, x5
isb
msr ttbr0_el2, x1
- msr vtcr_el2, x2
-#if SECURE_WORLD == 1
- msr MSR_VSTCR_EL2, x6
-#endif
-
- msr mair_el2, x3
- msr tcr_el2, x4
+ msr mair_el2, x2
+ msr tcr_el2, x3
/* Ensure everything before this point has completed. */
dsb sy
@@ -236,6 +230,6 @@
* Configure sctlr_el2 to enable MMU and cache and don't proceed until
* this has completed.
*/
- msr sctlr_el2, x5
+ msr sctlr_el2, x4
isb
ret
diff --git a/src/arch/aarch64/inc/hf/arch/types.h b/src/arch/aarch64/inc/hf/arch/types.h
index 0248522..96e4819 100644
--- a/src/arch/aarch64/inc/hf/arch/types.h
+++ b/src/arch/aarch64/inc/hf/arch/types.h
@@ -116,14 +116,17 @@
uintreg_t tpidr_el1;
uintreg_t sp_el0;
uintreg_t sp_el1;
- uintreg_t cnthctl_el2;
+ uintreg_t vtcr_el2;
uintreg_t vttbr_el2;
+ uintreg_t vstcr_el2;
+ uintreg_t vsttbr_el2;
uintreg_t mdcr_el2;
uintreg_t mdscr_el1;
uintreg_t pmccfiltr_el0;
uintreg_t pmcr_el0;
uintreg_t pmcntenset_el0;
uintreg_t pmintenset_el1;
+ uintreg_t cnthctl_el2;
uintreg_t par_el1;
} lazy;
diff --git a/src/arch/aarch64/mm.c b/src/arch/aarch64/mm.c
index 0834a48..d2307e2 100644
--- a/src/arch/aarch64/mm.c
+++ b/src/arch/aarch64/mm.c
@@ -134,12 +134,12 @@
*/
struct arch_mm_config {
uintreg_t ttbr0_el2;
- uintreg_t vtcr_el2;
uintreg_t mair_el2;
uintreg_t tcr_el2;
uintreg_t sctlr_el2;
- uintreg_t vstcr_el2;
uintreg_t hcr_el2;
+ uintreg_t vtcr_el2;
+ uintreg_t vstcr_el2;
} arch_mm_config;
static uint8_t mm_s1_max_level;
@@ -959,3 +959,13 @@
uint64_t features = read_msr(id_aa64mmfr0_el1);
return pa_bits_table[features & 0xf];
}
+
+uintptr_t arch_mm_get_vtcr_el2(void)
+{
+ return arch_mm_config.vtcr_el2;
+}
+
+uintptr_t arch_mm_get_vstcr_el2(void)
+{
+ return arch_mm_config.vstcr_el2;
+}