blob: 7b7213fc17d952b57ac777299093d3d582dfb9b6 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00005 */
6
David Brazdil0f672f62019-12-10 10:32:29 +00007#include <linux/irqflags.h>
8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009#include <asm/kvm_hyp.h>
10#include <asm/kvm_mmu.h>
11#include <asm/tlbflush.h>
12
David Brazdil0f672f62019-12-10 10:32:29 +000013struct tlb_inv_context {
14 unsigned long flags;
15 u64 tcr;
16 u64 sctlr;
17};
18
19static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
20 struct tlb_inv_context *cxt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000021{
22 u64 val;
23
David Brazdil0f672f62019-12-10 10:32:29 +000024 local_irq_save(cxt->flags);
25
26 if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
27 /*
28 * For CPUs that are affected by ARM erratum 1165522, we
29 * cannot trust stage-1 to be in a correct state at that
30 * point. Since we do not want to force a full load of the
31 * vcpu state, we prevent the EL1 page-table walker to
32 * allocate new TLBs. This is done by setting the EPD bits
33 * in the TCR_EL1 register. We also need to prevent it to
34 * allocate IPA->PA walks, so we enable the S1 MMU...
35 */
36 val = cxt->tcr = read_sysreg_el1(SYS_TCR);
37 val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
38 write_sysreg_el1(val, SYS_TCR);
39 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
40 val |= SCTLR_ELx_M;
41 write_sysreg_el1(val, SYS_SCTLR);
42 }
43
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000044 /*
45 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
46 * most TLB operations target EL2/EL0. In order to affect the
47 * guest TLBs (EL1/EL0), we need to change one of these two
48 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
49 * let's flip TGE before executing the TLB operation.
David Brazdil0f672f62019-12-10 10:32:29 +000050 *
51 * ARM erratum 1165522 requires some special handling (again),
52 * as we need to make sure both stages of translation are in
53 * place before clearing TGE. __load_guest_stage2() already
54 * has an ISB in order to deal with this.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000055 */
David Brazdil0f672f62019-12-10 10:32:29 +000056 __load_guest_stage2(kvm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000057 val = read_sysreg(hcr_el2);
58 val &= ~HCR_TGE;
59 write_sysreg(val, hcr_el2);
60 isb();
61}
62
David Brazdil0f672f62019-12-10 10:32:29 +000063static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
64 struct tlb_inv_context *cxt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000065{
David Brazdil0f672f62019-12-10 10:32:29 +000066 __load_guest_stage2(kvm);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067 isb();
68}
69
David Brazdil0f672f62019-12-10 10:32:29 +000070static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
71 struct tlb_inv_context *cxt)
72{
73 if (has_vhe())
74 __tlb_switch_to_guest_vhe(kvm, cxt);
75 else
76 __tlb_switch_to_guest_nvhe(kvm, cxt);
77}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000078
David Brazdil0f672f62019-12-10 10:32:29 +000079static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
80 struct tlb_inv_context *cxt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000081{
82 /*
83 * We're done with the TLB operation, let's restore the host's
84 * view of HCR_EL2.
85 */
86 write_sysreg(0, vttbr_el2);
87 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
David Brazdil0f672f62019-12-10 10:32:29 +000088 isb();
89
90 if (cpus_have_const_cap(ARM64_WORKAROUND_1165522)) {
91 /* Restore the registers to what they were */
92 write_sysreg_el1(cxt->tcr, SYS_TCR);
93 write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
94 }
95
96 local_irq_restore(cxt->flags);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097}
98
David Brazdil0f672f62019-12-10 10:32:29 +000099static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
100 struct tlb_inv_context *cxt)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101{
102 write_sysreg(0, vttbr_el2);
103}
104
David Brazdil0f672f62019-12-10 10:32:29 +0000105static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
106 struct tlb_inv_context *cxt)
107{
108 if (has_vhe())
109 __tlb_switch_to_host_vhe(kvm, cxt);
110 else
111 __tlb_switch_to_host_nvhe(kvm, cxt);
112}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000113
114void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
115{
David Brazdil0f672f62019-12-10 10:32:29 +0000116 struct tlb_inv_context cxt;
117
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 dsb(ishst);
119
120 /* Switch to requested VMID */
121 kvm = kern_hyp_va(kvm);
David Brazdil0f672f62019-12-10 10:32:29 +0000122 __tlb_switch_to_guest(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123
124 /*
125 * We could do so much better if we had the VA as well.
126 * Instead, we invalidate Stage-2 for this IPA, and the
127 * whole of Stage-1. Weep...
128 */
129 ipa >>= 12;
130 __tlbi(ipas2e1is, ipa);
131
132 /*
133 * We have to ensure completion of the invalidation at Stage-2,
134 * since a table walk on another CPU could refill a TLB with a
135 * complete (S1 + S2) walk based on the old Stage-2 mapping if
136 * the Stage-1 invalidation happened first.
137 */
138 dsb(ish);
139 __tlbi(vmalle1is);
140 dsb(ish);
141 isb();
142
143 /*
144 * If the host is running at EL1 and we have a VPIPT I-cache,
145 * then we must perform I-cache maintenance at EL2 in order for
146 * it to have an effect on the guest. Since the guest cannot hit
147 * I-cache lines allocated with a different VMID, we don't need
148 * to worry about junk out of guest reset (we nuke the I-cache on
149 * VMID rollover), but we do need to be careful when remapping
150 * executable pages for the same guest. This can happen when KSM
151 * takes a CoW fault on an executable page, copies the page into
152 * a page that was previously mapped in the guest and then needs
153 * to invalidate the guest view of the I-cache for that page
154 * from EL1. To solve this, we invalidate the entire I-cache when
155 * unmapping a page from a guest if we have a VPIPT I-cache but
156 * the host is running at EL1. As above, we could do better if
157 * we had the VA.
158 *
159 * The moral of this story is: if you have a VPIPT I-cache, then
160 * you should be running with VHE enabled.
161 */
162 if (!has_vhe() && icache_is_vpipt())
163 __flush_icache_all();
164
David Brazdil0f672f62019-12-10 10:32:29 +0000165 __tlb_switch_to_host(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166}
167
168void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
169{
David Brazdil0f672f62019-12-10 10:32:29 +0000170 struct tlb_inv_context cxt;
171
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000172 dsb(ishst);
173
174 /* Switch to requested VMID */
175 kvm = kern_hyp_va(kvm);
David Brazdil0f672f62019-12-10 10:32:29 +0000176 __tlb_switch_to_guest(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177
178 __tlbi(vmalls12e1is);
179 dsb(ish);
180 isb();
181
David Brazdil0f672f62019-12-10 10:32:29 +0000182 __tlb_switch_to_host(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183}
184
Olivier Deprez0e641232021-09-23 10:07:05 +0200185void __hyp_text __kvm_flush_cpu_context(struct kvm_vcpu *vcpu)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186{
187 struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
David Brazdil0f672f62019-12-10 10:32:29 +0000188 struct tlb_inv_context cxt;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000189
190 /* Switch to requested VMID */
David Brazdil0f672f62019-12-10 10:32:29 +0000191 __tlb_switch_to_guest(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192
193 __tlbi(vmalle1);
Olivier Deprez0e641232021-09-23 10:07:05 +0200194 asm volatile("ic iallu");
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000195 dsb(nsh);
196 isb();
197
David Brazdil0f672f62019-12-10 10:32:29 +0000198 __tlb_switch_to_host(kvm, &cxt);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000199}
200
201void __hyp_text __kvm_flush_vm_context(void)
202{
203 dsb(ishst);
204 __tlbi(alle1is);
David Brazdil0f672f62019-12-10 10:32:29 +0000205
206 /*
207 * VIPT and PIPT caches are not affected by VMID, so no maintenance
208 * is necessary across a VMID rollover.
209 *
210 * VPIPT caches constrain lookup and maintenance to the active VMID,
211 * so we need to invalidate lines with a stale VMID to avoid an ABA
212 * race after multiple rollovers.
213 *
214 */
215 if (icache_is_vpipt())
216 asm volatile("ic ialluis");
217
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218 dsb(ish);
219}