Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 2fabc2d..ea710f6 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,6 +10,7 @@
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
@@ -19,7 +20,6 @@
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
-obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 5000976..0fc9872 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
@@ -29,40 +18,70 @@
#define save_debug(ptr,reg,nr) \
switch (nr) { \
case 15: ptr[15] = read_debug(reg, 15); \
+ /* Fall through */ \
case 14: ptr[14] = read_debug(reg, 14); \
+ /* Fall through */ \
case 13: ptr[13] = read_debug(reg, 13); \
+ /* Fall through */ \
case 12: ptr[12] = read_debug(reg, 12); \
+ /* Fall through */ \
case 11: ptr[11] = read_debug(reg, 11); \
+ /* Fall through */ \
case 10: ptr[10] = read_debug(reg, 10); \
+ /* Fall through */ \
case 9: ptr[9] = read_debug(reg, 9); \
+ /* Fall through */ \
case 8: ptr[8] = read_debug(reg, 8); \
+ /* Fall through */ \
case 7: ptr[7] = read_debug(reg, 7); \
+ /* Fall through */ \
case 6: ptr[6] = read_debug(reg, 6); \
+ /* Fall through */ \
case 5: ptr[5] = read_debug(reg, 5); \
+ /* Fall through */ \
case 4: ptr[4] = read_debug(reg, 4); \
+ /* Fall through */ \
case 3: ptr[3] = read_debug(reg, 3); \
+ /* Fall through */ \
case 2: ptr[2] = read_debug(reg, 2); \
+ /* Fall through */ \
case 1: ptr[1] = read_debug(reg, 1); \
+ /* Fall through */ \
default: ptr[0] = read_debug(reg, 0); \
}
#define restore_debug(ptr,reg,nr) \
switch (nr) { \
case 15: write_debug(ptr[15], reg, 15); \
+ /* Fall through */ \
case 14: write_debug(ptr[14], reg, 14); \
+ /* Fall through */ \
case 13: write_debug(ptr[13], reg, 13); \
+ /* Fall through */ \
case 12: write_debug(ptr[12], reg, 12); \
+ /* Fall through */ \
case 11: write_debug(ptr[11], reg, 11); \
+ /* Fall through */ \
case 10: write_debug(ptr[10], reg, 10); \
+ /* Fall through */ \
case 9: write_debug(ptr[9], reg, 9); \
+ /* Fall through */ \
case 8: write_debug(ptr[8], reg, 8); \
+ /* Fall through */ \
case 7: write_debug(ptr[7], reg, 7); \
+ /* Fall through */ \
case 6: write_debug(ptr[6], reg, 6); \
+ /* Fall through */ \
case 5: write_debug(ptr[5], reg, 5); \
+ /* Fall through */ \
case 4: write_debug(ptr[4], reg, 4); \
+ /* Fall through */ \
case 3: write_debug(ptr[3], reg, 3); \
+ /* Fall through */ \
case 2: write_debug(ptr[2], reg, 2); \
+ /* Fall through */ \
case 1: write_debug(ptr[1], reg, 1); \
+ /* Fall through */ \
default: write_debug(ptr[0], reg, 0); \
}
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fad1e16..e5cc8d6 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -1,22 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/fpsimdmacros.h>
@@ -24,6 +14,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
@@ -62,8 +53,29 @@
// Store the host regs
save_callee_saved_regs x1
+ // Now the host state is stored if we have a pending RAS SError it must
+ // affect the host. If any asynchronous exception is pending we defer
+ // the guest entry. The DSB isn't necessary before v8.2 as any SError
+ // would be fatal.
+alternative_if ARM64_HAS_RAS_EXTN
+ dsb nshst
+ isb
+alternative_else_nop_endif
+ mrs x1, isr_el1
+ cbz x1, 1f
+ mov x0, #ARM_EXCEPTION_IRQ
+ ret
+
+1:
add x18, x0, #VCPU_CONTEXT
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
+ // The below macro to restore guest keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_guest x18, x0, x1, x2
+
// Restore guest regs x0-x17
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
@@ -83,6 +95,7 @@
// Do not touch any register after this!
eret
+ sb
ENDPROC(__guest_enter)
ENTRY(__guest_exit)
@@ -117,13 +130,20 @@
get_host_ctxt x2, x3
+ // Macro ptrauth_switch_to_guest format:
+ // ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
+ // The below macro to save/restore keys is not implemented in C code
+ // as it may cause Pointer Authentication key signing mismatch errors
+ // when this feature is enabled for kernel code.
+ ptrauth_switch_to_host x1, x2, x3, x4, x5
+
// Now restore the host regs
restore_callee_saved_regs x2
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
- // without an unmask-SError and isb.
- esb
+ // without an unmask-SError and isb. The ESB-instruction consumed any
+ // pending guest error when we took the exception from the guest.
mrs_s x2, SYS_DISR_EL1
str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
cbz x2, 1f
@@ -131,8 +151,16 @@
orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1: ret
alternative_else
- // If we have a pending asynchronous abort, now is the
- // time to find out. From your VAXorcist book, page 666:
+ dsb sy // Synchronize against in-flight ld/st
+ isb // Prevent an early read of side-effect free ISR
+ mrs x2, isr_el1
+ tbnz x2, #8, 2f // ISR_EL1.A
+ ret
+ nop
+2:
+alternative_endif
+ // We know we have a pending asynchronous abort, now is the
+ // time to flush it out. From your VAXorcist book, page 666:
// "Threaten me not, oh Evil one! For I speak with
// the power of DEC, and I command thee to show thyself!"
mrs x2, elr_el2
@@ -140,10 +168,7 @@
mrs x4, spsr_el2
mov x5, x0
- dsb sy // Synchronize against in-flight ld/st
- nop
msr daifclr, #4 // Unmask aborts
-alternative_endif
// This is our single instruction exception window. A pending
// SError is guaranteed to occur at the earliest when we unmask
@@ -156,6 +181,8 @@
.global abort_guest_exit_end
abort_guest_exit_end:
+ msr daifset, #4 // Mask aborts
+
// If the exception took place, restore the EL1 exception
// context so that we can report some information.
// Merge the exception code with the SError pending bit.
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index da3f22c..78ff532 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 24b4fba..ffa68d5 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -1,18 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2015-2018 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
@@ -43,18 +32,6 @@
ldr lr, [sp], #16
.endm
-ENTRY(__vhe_hyp_call)
- do_el2_call
- /*
- * We used to rely on having an exception return to get
- * an implicit isb. In the E2H case, we don't have it anymore.
- * rather than changing all the leaf functions, just do it here
- * before returning to the rest of the kernel.
- */
- isb
- ret
-ENDPROC(__vhe_hyp_call)
-
el1_sync: // Guest trapped into EL2
mrs x0, esr_el2
@@ -96,6 +73,7 @@
do_el2_call
eret
+ sb
el1_hvc_guest:
/*
@@ -146,6 +124,7 @@
mov x0, xzr
add sp, sp, #16
eret
+ sb
el1_trap:
get_vcpu_ptr x1, x0
@@ -162,6 +141,20 @@
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
+el2_sync:
+ /* Check for illegal exception return, otherwise panic */
+ mrs x0, spsr_el2
+
+ /* if this was something else, then panic! */
+ tst x0, #PSR_IL_BIT
+ b.eq __hyp_panic
+
+ /* Let's attempt a recovery from the illegal exception return */
+ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_IL
+ b __guest_exit
+
+
el2_error:
ldp x0, x1, [sp], #16
@@ -185,6 +178,7 @@
b.ne __hyp_panic
mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
eret
+ sb
ENTRY(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -193,6 +187,7 @@
ldr lr, =panic
msr elr_el2, lr
eret
+ sb
ENDPROC(__hyp_do_panic)
ENTRY(__hyp_panic)
@@ -221,17 +216,34 @@
.align 11
+.macro check_preamble_length start, end
+/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
+.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
+ .error "KVM vector preamble length mismatch"
+.endif
+.endm
+
.macro valid_vect target
.align 7
+661:
+ esb
stp x0, x1, [sp, #-16]!
+662:
b \target
+
+check_preamble_length 661b, 662b
.endm
.macro invalid_vect target
.align 7
+661:
b \target
+ nop
+662:
ldp x0, x1, [sp], #16
b \target
+
+check_preamble_length 661b, 662b
.endm
ENTRY(__kvm_hyp_vector)
@@ -240,7 +252,7 @@
invalid_vect el2t_fiq_invalid // FIQ EL2t
invalid_vect el2t_error_invalid // Error EL2t
- invalid_vect el2h_sync_invalid // Synchronous EL2h
+ valid_vect el2_sync // Synchronous EL2h
invalid_vect el2h_irq_invalid // IRQ EL2h
invalid_vect el2h_fiq_invalid // FIQ EL2h
valid_vect el2_error // Error EL2h
@@ -259,13 +271,14 @@
#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
.align 7
-1: .rept 27
+1: esb
+ .rept 26
nop
.endr
/*
* The default sequence is to directly branch to the KVM vectors,
* using the computed offset. This applies for VHE as well as
- * !ARM64_HARDEN_EL2_VECTORS.
+ * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
*
* For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
* with:
@@ -276,12 +289,13 @@
* movk x0, #((addr >> 32) & 0xffff), lsl #32
* br x0
*
- * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
+ * Where:
+ * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
* See kvm_patch_vector_branch for details.
*/
alternative_cb kvm_patch_vector_branch
- b __kvm_hyp_vector + (1b - 0b)
- nop
+ stp x0, x1, [sp, #-16]!
+ b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
nop
nop
nop
@@ -306,6 +320,7 @@
.popsection
ENTRY(__smccc_workaround_1_smc_start)
+ esb
sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)]
stp x0, x1, [sp, #(8 * 2)]
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
deleted file mode 100644
index 603e1ee..0000000
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/types.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_hyp.h>
-
-u32 __hyp_text __init_stage2_translation(void)
-{
- u64 val = VTCR_EL2_FLAGS;
- u64 parange;
- u64 tmp;
-
- /*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
- * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
- * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
- */
- parange = read_sysreg(id_aa64mmfr0_el1) & 7;
- if (parange > ID_AA64MMFR0_PARANGE_MAX)
- parange = ID_AA64MMFR0_PARANGE_MAX;
- val |= parange << 16;
-
- /* Compute the actual PARange... */
- switch (parange) {
- case 0:
- parange = 32;
- break;
- case 1:
- parange = 36;
- break;
- case 2:
- parange = 40;
- break;
- case 3:
- parange = 42;
- break;
- case 4:
- parange = 44;
- break;
- case 5:
- default:
- parange = 48;
- break;
- }
-
- /*
- * ... and clamp it to 40 bits, unless we have some braindead
- * HW that implements less than that. In all cases, we'll
- * return that value for the rest of the kernel to decide what
- * to do.
- */
- val |= 64 - (parange > 40 ? 40 : parange);
-
- /*
- * Check the availability of Hardware Access Flag / Dirty Bit
- * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf;
- if (tmp)
- val |= VTCR_EL2_HA;
-
- /*
- * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
- * bit in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
- val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
- VTCR_EL2_VS_16BIT :
- VTCR_EL2_VS_8BIT;
-
- write_sysreg(val, vtcr_el2);
-
- return parange;
-}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca46153..799e84a 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -1,28 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
#include <kvm/arm_psci.h>
+#include <asm/arch_gicv3.h>
#include <asm/cpufeature.h>
+#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
@@ -98,7 +90,10 @@
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu)) {
+ if (update_fp_enabled(vcpu)) {
+ if (vcpu_has_sve(vcpu))
+ val |= CPACR_EL1_ZEN;
+ } else {
val &= ~CPACR_EL1_FPEN;
__activate_traps_fpsimd32(vcpu);
}
@@ -107,6 +102,7 @@
write_sysreg(kvm_get_hyp_vector(), vbar_el1);
}
+NOKPROBE_SYMBOL(activate_traps_vhe);
static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
{
@@ -128,6 +124,9 @@
{
u64 hcr = vcpu->arch.hcr_el2;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+ hcr |= HCR_TVM;
+
write_sysreg(hcr, hcr_el2);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -143,9 +142,18 @@
{
extern char vectors[]; /* kernel exception vectors */
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+
+ /*
+ * ARM erratum 1165522 requires the actual execution of the above
+ * before we can switch to the EL2/EL0 translation regime used by
+ * the host.
+ */
+ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
+
write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
write_sysreg(vectors, vbar_el1);
}
+NOKPROBE_SYMBOL(deactivate_traps_vhe);
static void __hyp_text __deactivate_traps_nvhe(void)
{
@@ -157,7 +165,7 @@
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
- write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
}
@@ -169,8 +177,10 @@
* the crucial bit is "On taking a vSError interrupt,
* HCR_EL2.VSE is cleared to 0."
*/
- if (vcpu->arch.hcr_el2 & HCR_VSE)
- vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+ if (vcpu->arch.hcr_el2 & HCR_VSE) {
+ vcpu->arch.hcr_el2 &= ~HCR_VSE;
+ vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+ }
if (has_vhe())
deactivate_traps_vhe();
@@ -198,7 +208,7 @@
static void __hyp_text __activate_vm(struct kvm *kvm)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
@@ -224,20 +234,6 @@
}
}
-static bool __hyp_text __true_value(void)
-{
- return true;
-}
-
-static bool __hyp_text __false_value(void)
-{
- return false;
-}
-
-static hyp_alternate_select(__check_arm_834220,
- __false_value, __true_value,
- ARM64_WORKAROUND_834220);
-
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
{
u64 par, tmp;
@@ -259,11 +255,11 @@
tmp = read_sysreg(par_el1);
write_sysreg(par, par_el1);
- if (unlikely(tmp & 1))
+ if (unlikely(tmp & SYS_PAR_EL1_F))
return false; /* Translation failed, back to guest */
/* Convert PAR to HPFAR format */
- *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
+ *hpfar = PAR_TO_HPFAR(tmp);
return true;
}
@@ -279,7 +275,7 @@
if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
return true;
- far = read_sysreg_el2(far);
+ far = read_sysreg_el2(SYS_FAR);
/*
* The HPFAR can be invalid if the stage 2 fault did not
@@ -293,7 +289,8 @@
* resolve the IPA using the AT instruction.
*/
if (!(esr & ESR_ELx_S1PTW) &&
- (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+ (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+ (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
if (!__translate_far_to_hpfar(far, &hpfar))
return false;
} else {
@@ -305,43 +302,48 @@
return true;
}
-/* Skip an instruction which has been emulated. Returns true if
- * execution can continue or false if we need to exit hyp mode because
- * single-step was in effect.
- */
-static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+/* Check for an FPSIMD/SVE trap and handle as appropriate */
+static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
- *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+ bool vhe, sve_guest, sve_host;
+ u8 hsr_ec;
- if (vcpu_mode_is_32bit(vcpu)) {
- vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
- kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
- write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
- } else {
- *vcpu_pc(vcpu) += 4;
- }
-
- write_sysreg_el2(*vcpu_pc(vcpu), elr);
-
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
- vcpu->arch.fault.esr_el2 =
- (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
+ if (!system_supports_fpsimd())
return false;
+
+ if (system_supports_sve()) {
+ sve_guest = vcpu_has_sve(vcpu);
+ sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
+ vhe = true;
} else {
- return true;
+ sve_guest = false;
+ sve_host = false;
+ vhe = has_vhe();
}
-}
-static bool __hyp_text __hyp_switch_fpsimd(struct kvm_vcpu *vcpu)
-{
- struct user_fpsimd_state *host_fpsimd = vcpu->arch.host_fpsimd_state;
+ hsr_ec = kvm_vcpu_trap_get_class(vcpu);
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
+ hsr_ec != ESR_ELx_EC_SVE)
+ return false;
- if (has_vhe())
- write_sysreg(read_sysreg(cpacr_el1) | CPACR_EL1_FPEN,
- cpacr_el1);
- else
+ /* Don't handle SVE traps for non-SVE vcpus here: */
+ if (!sve_guest)
+ if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+ return false;
+
+ /* Valid trap. Switch the context: */
+
+ if (vhe) {
+ u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
+
+ if (sve_guest)
+ reg |= CPACR_EL1_ZEN;
+
+ write_sysreg(reg, cpacr_el1);
+ } else {
write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
cptr_el2);
+ }
isb();
@@ -350,21 +352,28 @@
* In the SVE case, VHE is assumed: it is enforced by
* Kconfig and kvm_arch_init().
*/
- if (system_supports_sve() &&
- (vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE)) {
+ if (sve_host) {
struct thread_struct *thread = container_of(
- host_fpsimd,
+ vcpu->arch.host_fpsimd_state,
struct thread_struct, uw.fpsimd_state);
- sve_save_state(sve_pffr(thread), &host_fpsimd->fpsr);
+ sve_save_state(sve_pffr(thread),
+ &vcpu->arch.host_fpsimd_state->fpsr);
} else {
- __fpsimd_save_state(host_fpsimd);
+ __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
- __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ if (sve_guest) {
+ sve_load_state(vcpu_sve_pffr(vcpu),
+ &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
+ sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
+ write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
+ } else {
+ __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
+ }
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
@@ -376,6 +385,61 @@
return true;
}
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+ u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ u64 val = vcpu_get_reg(vcpu, rt);
+
+ /*
+ * The normal sysreg handling code expects to see the traps,
+ * let's not do anything here.
+ */
+ if (vcpu->arch.hcr_el2 & HCR_TVM)
+ return false;
+
+ switch (sysreg) {
+ case SYS_SCTLR_EL1:
+ write_sysreg_el1(val, SYS_SCTLR);
+ break;
+ case SYS_TTBR0_EL1:
+ write_sysreg_el1(val, SYS_TTBR0);
+ break;
+ case SYS_TTBR1_EL1:
+ write_sysreg_el1(val, SYS_TTBR1);
+ break;
+ case SYS_TCR_EL1:
+ write_sysreg_el1(val, SYS_TCR);
+ break;
+ case SYS_ESR_EL1:
+ write_sysreg_el1(val, SYS_ESR);
+ break;
+ case SYS_FAR_EL1:
+ write_sysreg_el1(val, SYS_FAR);
+ break;
+ case SYS_AFSR0_EL1:
+ write_sysreg_el1(val, SYS_AFSR0);
+ break;
+ case SYS_AFSR1_EL1:
+ write_sysreg_el1(val, SYS_AFSR1);
+ break;
+ case SYS_MAIR_EL1:
+ write_sysreg_el1(val, SYS_MAIR);
+ break;
+ case SYS_AMAIR_EL1:
+ write_sysreg_el1(val, SYS_AMAIR);
+ break;
+ case SYS_CONTEXTIDR_EL1:
+ write_sysreg_el1(val, SYS_CONTEXTIDR);
+ break;
+ default:
+ return false;
+ }
+
+ __kvm_skip_instr(vcpu);
+ return true;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -384,7 +448,7 @@
static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
- vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
+ vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
/*
* We're using the raw exception code in order to only process
@@ -395,15 +459,20 @@
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
+ if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+ handle_tx2_tvm(vcpu))
+ return true;
+
/*
* We trap the first access to the FP/SIMD to save the host context
* and restore the guest context lazily.
* If FP/SIMD is not implemented, handle the trap and inject an
* undefined instruction exception to the guest.
+ * Similarly for trapped SVE accesses.
*/
- if (system_supports_fpsimd() &&
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_FP_ASIMD)
- return __hyp_switch_fpsimd(vcpu);
+ if (__hyp_handle_fpsimd(vcpu))
+ return true;
if (!__populate_fault_info(vcpu))
return true;
@@ -420,20 +489,12 @@
if (valid) {
int ret = __vgic_v2_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
- if (ret == -1) {
- /* Promote an illegal access to an
- * SError. If we would be returning
- * due to single-step clear the SS
- * bit so handle_exit knows what to
- * do after dealing with the error.
- */
- if (!__skip_instr(vcpu))
- *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
+ /* Promote an illegal access to an SError.*/
+ if (ret == -1)
*exit_code = ARM_EXCEPTION_EL1_SERROR;
- }
goto exit;
}
@@ -444,7 +505,7 @@
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
int ret = __vgic_v3_perform_cpuif_access(vcpu);
- if (ret == 1 && __skip_instr(vcpu))
+ if (ret == 1)
return true;
}
@@ -486,6 +547,44 @@
#endif
}
+/**
+ * Disable host events, enable guest events
+ */
+static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenclr_el0);
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenset_el0);
+
+ return (pmu->events_host || pmu->events_guest);
+}
+
+/**
+ * Disable guest events, enable host events
+ */
+static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenclr_el0);
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenset_el0);
+}
+
/* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
@@ -499,8 +598,19 @@
sysreg_save_host_state_vhe(host_ctxt);
- __activate_traps(vcpu);
+ /*
+ * ARM erratum 1165522 requires us to configure both stage 1 and
+ * stage 2 translation for the guest context before we clear
+ * HCR_EL2.TGE.
+ *
+ * We have already configured the guest's stage 1 translation in
+ * kvm_vcpu_load_sysregs above. We must now call __activate_vm
+ * before __activate_traps, because __activate_vm configures
+ * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
+ * (among other things).
+ */
__activate_vm(vcpu->kvm);
+ __activate_traps(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@@ -529,24 +639,39 @@
return exit_code;
}
+NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+ bool pmu_switch_needed;
u64 exit_code;
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ */
+ if (system_uses_irq_prio_masking()) {
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
+ dsb(sy);
+ }
+
vcpu = kern_hyp_va(vcpu);
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
+ pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
+
__sysreg_save_state_nvhe(host_ctxt);
- __activate_traps(vcpu);
__activate_vm(kern_hyp_va(vcpu->kvm));
+ __activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
__timer_enable_traps(vcpu);
@@ -589,6 +714,13 @@
*/
__debug_switch_to_host(vcpu);
+ if (pmu_switch_needed)
+ __pmu_switch_to_host(host_ctxt);
+
+ /* Returning to host will clear PSR.I, remask PMR if needed */
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(GIC_PRIO_IRQOFF);
+
return exit_code;
}
@@ -617,8 +749,8 @@
asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
__hyp_do_panic(str_va,
- spsr, elr,
- read_sysreg(esr_el2), read_sysreg_el2(far),
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
}
@@ -633,14 +765,15 @@
panic(__hyp_panic_string,
spsr, elr,
- read_sysreg_el2(esr), read_sysreg_el2(far),
+ read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
}
+NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
{
- u64 spsr = read_sysreg_el2(spsr);
- u64 elr = read_sysreg_el2(elr);
+ u64 spsr = read_sysreg_el2(SYS_SPSR);
+ u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg(par_el1);
if (!has_vhe())
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 9ce2239..7ddbc84 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -1,23 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
#include <linux/kvm_host.h>
+#include <asm/kprobes.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
@@ -52,35 +42,34 @@
static void __hyp_text __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
- ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
- ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
+ ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(SYS_SCTLR);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
- ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
- ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
- ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
- ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
- ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
- ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
- ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
- ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
- ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
- ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
- ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
- ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
- ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
+ ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(SYS_CPACR);
+ ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(SYS_TTBR0);
+ ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(SYS_TTBR1);
+ ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(SYS_TCR);
+ ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(SYS_ESR);
+ ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(SYS_AFSR0);
+ ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(SYS_AFSR1);
+ ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(SYS_FAR);
+ ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(SYS_MAIR);
+ ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(SYS_VBAR);
+ ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(SYS_CONTEXTIDR);
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(SYS_AMAIR);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(SYS_CNTKCTL);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
- ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
- ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
+ ctxt->gp_regs.elr_el1 = read_sysreg_el1(SYS_ELR);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(SYS_SPSR);
}
static void __hyp_text __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
- ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
- ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ ctxt->gp_regs.regs.pc = read_sysreg_el2(SYS_ELR);
+ ctxt->gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
ctxt->sys_regs[DISR_EL1] = read_sysreg_s(SYS_VDISR_EL2);
@@ -98,12 +87,14 @@
{
__sysreg_save_common_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_save_host_state_vhe);
void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_save_common_state(ctxt);
__sysreg_save_el2_return_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe);
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
@@ -118,42 +109,59 @@
static void __hyp_text __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{
- write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
- write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+ write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
+ write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
}
static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
- write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
- write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
- write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
- write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
- write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
- write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
- write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
- write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
- write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
- write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
- write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
- write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
- write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
- write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], SYS_SCTLR);
+ write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
+ write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], SYS_CPACR);
+ write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], SYS_TTBR0);
+ write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], SYS_TTBR1);
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], SYS_TCR);
+ write_sysreg_el1(ctxt->sys_regs[ESR_EL1], SYS_ESR);
+ write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], SYS_AFSR0);
+ write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], SYS_AFSR1);
+ write_sysreg_el1(ctxt->sys_regs[FAR_EL1], SYS_FAR);
+ write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], SYS_MAIR);
+ write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], SYS_VBAR);
+ write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],SYS_CONTEXTIDR);
+ write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], SYS_AMAIR);
+ write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], SYS_CNTKCTL);
write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
- write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
- write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+ write_sysreg_el1(ctxt->gp_regs.elr_el1, SYS_ELR);
+ write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],SYS_SPSR);
}
static void __hyp_text
__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{
- write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ u64 pstate = ctxt->gp_regs.regs.pstate;
+ u64 mode = pstate & PSR_AA32_MODE_MASK;
+
+ /*
+ * Safety check to ensure we're setting the CPU up to enter the guest
+ * in a less privileged mode.
+ *
+ * If we are attempting a return to EL2 or higher in AArch64 state,
+ * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
+ * we'll take an illegal exception state exception immediately after
+ * the ERET to the guest. Attempts to return to AArch32 Hyp will
+ * result in an illegal exception return because EL2's execution state
+ * is determined by SCR_EL3.RW.
+ */
+ if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
+ pstate = PSR_MODE_EL2h | PSR_IL_BIT;
+
+ write_sysreg_el2(ctxt->gp_regs.regs.pc, SYS_ELR);
+ write_sysreg_el2(pstate, SYS_SPSR);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
@@ -171,12 +179,14 @@
{
__sysreg_restore_common_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe);
void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt)
{
__sysreg_restore_common_state(ctxt);
__sysreg_restore_el2_return_state(ctxt);
}
+NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe);
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
{
@@ -288,3 +298,14 @@
vcpu->arch.sysregs_loaded_on_cpu = false;
}
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+ u64 tmp;
+
+ asm volatile(
+ "mrs %0, sctlr_el2\n"
+ "orr %0, %0, %1\n"
+ "msr sctlr_el2, %0"
+ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 131c777..eb0efc5 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -1,54 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+struct tlb_inv_context {
+ unsigned long flags;
+ u64 tcr;
+ u64 sctlr;
+};
+
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
u64 val;
+ local_irq_save(cxt->flags);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /*
+ * For CPUs that are affected by ARM erratum 1165522, we
+ * cannot trust stage-1 to be in a correct state at that
+ * point. Since we do not want to force a full load of the
+ * vcpu state, we prevent the EL1 page-table walker to
+ * allocate new TLBs. This is done by setting the EPD bits
+ * in the TCR_EL1 register. We also need to prevent it to
+ * allocate IPA->PA walks, so we enable the S1 MMU...
+ */
+ val = cxt->tcr = read_sysreg_el1(SYS_TCR);
+ val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
+ write_sysreg_el1(val, SYS_TCR);
+ val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
+ val |= SCTLR_ELx_M;
+ write_sysreg_el1(val, SYS_SCTLR);
+ }
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
* guest TLBs (EL1/EL0), we need to change one of these two
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
+ *
+ * ARM erratum 1165522 requires some special handling (again),
+ * as we need to make sure both stages of translation are in
+ * place before clearing TGE. __load_guest_stage2() already
+ * has an ISB in order to deal with this.
*/
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
isb();
}
-static hyp_alternate_select(__tlb_switch_to_guest,
- __tlb_switch_to_guest_nvhe,
- __tlb_switch_to_guest_vhe,
- ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
+{
+ if (has_vhe())
+ __tlb_switch_to_guest_vhe(kvm, cxt);
+ else
+ __tlb_switch_to_guest_nvhe(kvm, cxt);
+}
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -56,25 +85,41 @@
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
+ /* Restore the registers to what they were */
+ write_sysreg_el1(cxt->tcr, SYS_TCR);
+ write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
+ }
+
+ local_irq_restore(cxt->flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
}
-static hyp_alternate_select(__tlb_switch_to_host,
- __tlb_switch_to_host_nvhe,
- __tlb_switch_to_host_vhe,
- ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
+ struct tlb_inv_context *cxt)
+{
+ if (has_vhe())
+ __tlb_switch_to_host_vhe(kvm, cxt);
+ else
+ __tlb_switch_to_host_nvhe(kvm, cxt);
+}
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest(kvm, &cxt);
/*
* We could do so much better if we had the VA as well.
@@ -117,42 +162,57 @@
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ struct tlb_inv_context cxt;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest(kvm, &cxt);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host(kvm, &cxt);
}
void __hyp_text __kvm_flush_vm_context(void)
{
dsb(ishst);
__tlbi(alle1is);
- asm volatile("ic ialluis" : : );
+
+ /*
+ * VIPT and PIPT caches are not affected by VMID, so no maintenance
+ * is necessary across a VMID rollover.
+ *
+ * VPIPT caches constrain lookup and maintenance to the active VMID,
+ * so we need to invalidate lines with a stale VMID to avoid an ABA
+ * race after multiple rollovers.
+ *
+ */
+ if (icache_is_vpipt())
+ asm volatile("ic ialluis");
+
dsb(ish);
}
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 215c7c0..29ee1fe 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2015 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/compiler.h>
@@ -27,7 +16,7 @@
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
- return !!(read_sysreg_el2(spsr) & PSR_AA32_E_BIT);
+ return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
@@ -41,7 +30,7 @@
* Returns:
* 1: GICV access successfully performed
* 0: Not a GICV access
- * -1: Illegal GICV access
+ * -1: Illegal GICV access successfully performed
*/
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
@@ -61,12 +50,16 @@
return 0;
/* Reject anything but a 32bit access */
- if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
+ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
/* Not aligned? Don't bother */
- if (fault_ipa & 3)
+ if (fault_ipa & 3) {
+ __kvm_skip_instr(vcpu);
return -1;
+ }
rd = kvm_vcpu_dabt_get_rd(vcpu);
addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
@@ -88,5 +81,7 @@
vcpu_set_reg(vcpu, rd, data);
}
+ __kvm_skip_instr(vcpu);
+
return 1;
}