David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 7 | #include <linux/irqflags.h> |
| 8 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 9 | #include <asm/kvm_hyp.h> |
| 10 | #include <asm/kvm_mmu.h> |
| 11 | #include <asm/tlbflush.h> |
| 12 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 13 | struct tlb_inv_context { |
| 14 | unsigned long flags; |
| 15 | u64 tcr; |
| 16 | u64 sctlr; |
| 17 | }; |
| 18 | |
| 19 | static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm, |
| 20 | struct tlb_inv_context *cxt) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 21 | { |
| 22 | u64 val; |
| 23 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 24 | local_irq_save(cxt->flags); |
| 25 | |
| 26 | if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { |
| 27 | /* |
| 28 | * For CPUs that are affected by ARM erratum 1165522, we |
| 29 | * cannot trust stage-1 to be in a correct state at that |
| 30 | * point. Since we do not want to force a full load of the |
| 31 | * vcpu state, we prevent the EL1 page-table walker to |
| 32 | * allocate new TLBs. This is done by setting the EPD bits |
| 33 | * in the TCR_EL1 register. We also need to prevent it to |
| 34 | * allocate IPA->PA walks, so we enable the S1 MMU... |
| 35 | */ |
| 36 | val = cxt->tcr = read_sysreg_el1(SYS_TCR); |
| 37 | val |= TCR_EPD1_MASK | TCR_EPD0_MASK; |
| 38 | write_sysreg_el1(val, SYS_TCR); |
| 39 | val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); |
| 40 | val |= SCTLR_ELx_M; |
| 41 | write_sysreg_el1(val, SYS_SCTLR); |
| 42 | } |
| 43 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 44 | /* |
| 45 | * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and |
| 46 | * most TLB operations target EL2/EL0. In order to affect the |
| 47 | * guest TLBs (EL1/EL0), we need to change one of these two |
| 48 | * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so |
| 49 | * let's flip TGE before executing the TLB operation. |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 50 | * |
| 51 | * ARM erratum 1165522 requires some special handling (again), |
| 52 | * as we need to make sure both stages of translation are in |
| 53 | * place before clearing TGE. __load_guest_stage2() already |
| 54 | * has an ISB in order to deal with this. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 55 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 56 | __load_guest_stage2(kvm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 57 | val = read_sysreg(hcr_el2); |
| 58 | val &= ~HCR_TGE; |
| 59 | write_sysreg(val, hcr_el2); |
| 60 | isb(); |
| 61 | } |
| 62 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 63 | static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm, |
| 64 | struct tlb_inv_context *cxt) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 65 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 66 | __load_guest_stage2(kvm); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | isb(); |
| 68 | } |
| 69 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 70 | static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm, |
| 71 | struct tlb_inv_context *cxt) |
| 72 | { |
| 73 | if (has_vhe()) |
| 74 | __tlb_switch_to_guest_vhe(kvm, cxt); |
| 75 | else |
| 76 | __tlb_switch_to_guest_nvhe(kvm, cxt); |
| 77 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 78 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 79 | static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm, |
| 80 | struct tlb_inv_context *cxt) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | { |
| 82 | /* |
| 83 | * We're done with the TLB operation, let's restore the host's |
| 84 | * view of HCR_EL2. |
| 85 | */ |
| 86 | write_sysreg(0, vttbr_el2); |
| 87 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 88 | isb(); |
| 89 | |
| 90 | if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) { |
| 91 | /* Restore the registers to what they were */ |
| 92 | write_sysreg_el1(cxt->tcr, SYS_TCR); |
| 93 | write_sysreg_el1(cxt->sctlr, SYS_SCTLR); |
| 94 | } |
| 95 | |
| 96 | local_irq_restore(cxt->flags); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | } |
| 98 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 99 | static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm, |
| 100 | struct tlb_inv_context *cxt) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 101 | { |
| 102 | write_sysreg(0, vttbr_el2); |
| 103 | } |
| 104 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 105 | static void __hyp_text __tlb_switch_to_host(struct kvm *kvm, |
| 106 | struct tlb_inv_context *cxt) |
| 107 | { |
| 108 | if (has_vhe()) |
| 109 | __tlb_switch_to_host_vhe(kvm, cxt); |
| 110 | else |
| 111 | __tlb_switch_to_host_nvhe(kvm, cxt); |
| 112 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | |
| 114 | void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
| 115 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 116 | struct tlb_inv_context cxt; |
| 117 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | dsb(ishst); |
| 119 | |
| 120 | /* Switch to requested VMID */ |
| 121 | kvm = kern_hyp_va(kvm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 122 | __tlb_switch_to_guest(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 123 | |
| 124 | /* |
| 125 | * We could do so much better if we had the VA as well. |
| 126 | * Instead, we invalidate Stage-2 for this IPA, and the |
| 127 | * whole of Stage-1. Weep... |
| 128 | */ |
| 129 | ipa >>= 12; |
| 130 | __tlbi(ipas2e1is, ipa); |
| 131 | |
| 132 | /* |
| 133 | * We have to ensure completion of the invalidation at Stage-2, |
| 134 | * since a table walk on another CPU could refill a TLB with a |
| 135 | * complete (S1 + S2) walk based on the old Stage-2 mapping if |
| 136 | * the Stage-1 invalidation happened first. |
| 137 | */ |
| 138 | dsb(ish); |
| 139 | __tlbi(vmalle1is); |
| 140 | dsb(ish); |
| 141 | isb(); |
| 142 | |
| 143 | /* |
| 144 | * If the host is running at EL1 and we have a VPIPT I-cache, |
| 145 | * then we must perform I-cache maintenance at EL2 in order for |
| 146 | * it to have an effect on the guest. Since the guest cannot hit |
| 147 | * I-cache lines allocated with a different VMID, we don't need |
| 148 | * to worry about junk out of guest reset (we nuke the I-cache on |
| 149 | * VMID rollover), but we do need to be careful when remapping |
| 150 | * executable pages for the same guest. This can happen when KSM |
| 151 | * takes a CoW fault on an executable page, copies the page into |
| 152 | * a page that was previously mapped in the guest and then needs |
| 153 | * to invalidate the guest view of the I-cache for that page |
| 154 | * from EL1. To solve this, we invalidate the entire I-cache when |
| 155 | * unmapping a page from a guest if we have a VPIPT I-cache but |
| 156 | * the host is running at EL1. As above, we could do better if |
| 157 | * we had the VA. |
| 158 | * |
| 159 | * The moral of this story is: if you have a VPIPT I-cache, then |
| 160 | * you should be running with VHE enabled. |
| 161 | */ |
| 162 | if (!has_vhe() && icache_is_vpipt()) |
| 163 | __flush_icache_all(); |
| 164 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 165 | __tlb_switch_to_host(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) |
| 169 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 170 | struct tlb_inv_context cxt; |
| 171 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | dsb(ishst); |
| 173 | |
| 174 | /* Switch to requested VMID */ |
| 175 | kvm = kern_hyp_va(kvm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 176 | __tlb_switch_to_guest(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 177 | |
| 178 | __tlbi(vmalls12e1is); |
| 179 | dsb(ish); |
| 180 | isb(); |
| 181 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 182 | __tlb_switch_to_host(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 183 | } |
| 184 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 185 | void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 186 | { |
| 187 | struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 188 | struct tlb_inv_context cxt; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 189 | |
| 190 | /* Switch to requested VMID */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 191 | __tlb_switch_to_guest(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 192 | |
| 193 | __tlbi(vmalle1); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame^] | 194 | asm volatile("ic iallu"); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 195 | dsb(nsh); |
| 196 | isb(); |
| 197 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 198 | __tlb_switch_to_host(kvm, &cxt); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | void __hyp_text __kvm_flush_vm_context(void) |
| 202 | { |
| 203 | dsb(ishst); |
| 204 | __tlbi(alle1is); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * VIPT and PIPT caches are not affected by VMID, so no maintenance |
| 208 | * is necessary across a VMID rollover. |
| 209 | * |
| 210 | * VPIPT caches constrain lookup and maintenance to the active VMID, |
| 211 | * so we need to invalidate lines with a stale VMID to avoid an ABA |
| 212 | * race after multiple rollovers. |
| 213 | * |
| 214 | */ |
| 215 | if (icache_is_vpipt()) |
| 216 | asm volatile("ic ialluis"); |
| 217 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 218 | dsb(ish); |
| 219 | } |