blob: 2bff44f1efec8889675ce8d2ba7756f1c57cfe05 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef ARCH_X86_KVM_X86_H
3#define ARCH_X86_KVM_X86_H
4
5#include <linux/kvm_host.h>
6#include <asm/pvclock.h>
7#include "kvm_cache_regs.h"
Olivier Deprez157378f2022-04-04 15:47:50 +02008#include "kvm_emulate.h"
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009
10#define KVM_DEFAULT_PLE_GAP 128
11#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
12#define KVM_DEFAULT_PLE_WINDOW_GROW 2
13#define KVM_DEFAULT_PLE_WINDOW_SHRINK 0
14#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX UINT_MAX
15#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
16#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
17
18static inline unsigned int __grow_ple_window(unsigned int val,
19 unsigned int base, unsigned int modifier, unsigned int max)
20{
21 u64 ret = val;
22
23 if (modifier < 1)
24 return base;
25
26 if (modifier < base)
27 ret *= modifier;
28 else
29 ret += modifier;
30
31 return min(ret, (u64)max);
32}
33
34static inline unsigned int __shrink_ple_window(unsigned int val,
35 unsigned int base, unsigned int modifier, unsigned int min)
36{
37 if (modifier < 1)
38 return base;
39
40 if (modifier < base)
41 val /= modifier;
42 else
43 val -= modifier;
44
45 return max(val, min);
46}
47
48#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
49
50static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
51{
52 vcpu->arch.exception.pending = false;
53 vcpu->arch.exception.injected = false;
54}
55
56static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
57 bool soft)
58{
59 vcpu->arch.interrupt.injected = true;
60 vcpu->arch.interrupt.soft = soft;
61 vcpu->arch.interrupt.nr = vector;
62}
63
64static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
65{
66 vcpu->arch.interrupt.injected = false;
67}
68
69static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
70{
71 return vcpu->arch.exception.injected || vcpu->arch.interrupt.injected ||
72 vcpu->arch.nmi_injected;
73}
74
75static inline bool kvm_exception_is_soft(unsigned int nr)
76{
77 return (nr == BP_VECTOR) || (nr == OF_VECTOR);
78}
79
80static inline bool is_protmode(struct kvm_vcpu *vcpu)
81{
82 return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
83}
84
85static inline int is_long_mode(struct kvm_vcpu *vcpu)
86{
87#ifdef CONFIG_X86_64
88 return vcpu->arch.efer & EFER_LMA;
89#else
90 return 0;
91#endif
92}
93
94static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
95{
96 int cs_db, cs_l;
97
98 if (!is_long_mode(vcpu))
99 return false;
Olivier Deprez157378f2022-04-04 15:47:50 +0200100 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000101 return cs_l;
102}
103
104static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
105{
106#ifdef CONFIG_X86_64
107 return (vcpu->arch.efer & EFER_LMA) &&
108 kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
109#else
110 return 0;
111#endif
112}
113
114static inline bool x86_exception_has_error_code(unsigned int vector)
115{
116 static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
117 BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
118 BIT(PF_VECTOR) | BIT(AC_VECTOR);
119
120 return (1U << vector) & exception_has_error_code;
121}
122
123static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
124{
125 return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
126}
127
Olivier Deprez157378f2022-04-04 15:47:50 +0200128static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
129{
130 ++vcpu->stat.tlb_flush;
131 kvm_x86_ops.tlb_flush_current(vcpu);
132}
133
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134static inline int is_pae(struct kvm_vcpu *vcpu)
135{
136 return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
137}
138
139static inline int is_pse(struct kvm_vcpu *vcpu)
140{
141 return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
142}
143
144static inline int is_paging(struct kvm_vcpu *vcpu)
145{
146 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
147}
148
David Brazdil0f672f62019-12-10 10:32:29 +0000149static inline bool is_pae_paging(struct kvm_vcpu *vcpu)
150{
151 return !is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu);
152}
153
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000154static inline u8 vcpu_virt_addr_bits(struct kvm_vcpu *vcpu)
155{
156 return kvm_read_cr4_bits(vcpu, X86_CR4_LA57) ? 57 : 48;
157}
158
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000159static inline u64 get_canonical(u64 la, u8 vaddr_bits)
160{
161 return ((int64_t)la << (64 - vaddr_bits)) >> (64 - vaddr_bits);
162}
163
164static inline bool is_noncanonical_address(u64 la, struct kvm_vcpu *vcpu)
165{
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000166 return get_canonical(la, vcpu_virt_addr_bits(vcpu)) != la;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000167}
168
169static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
170 gva_t gva, gfn_t gfn, unsigned access)
171{
David Brazdil0f672f62019-12-10 10:32:29 +0000172 u64 gen = kvm_memslots(vcpu->kvm)->generation;
173
174 if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS))
175 return;
176
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000177 /*
178 * If this is a shadow nested page table, the "GVA" is
179 * actually a nGPA.
180 */
181 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK;
David Brazdil0f672f62019-12-10 10:32:29 +0000182 vcpu->arch.mmio_access = access;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000183 vcpu->arch.mmio_gfn = gfn;
David Brazdil0f672f62019-12-10 10:32:29 +0000184 vcpu->arch.mmio_gen = gen;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000185}
186
187static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
188{
189 return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
190}
191
192/*
193 * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
194 * clear all mmio cache info.
195 */
196#define MMIO_GVA_ANY (~(gva_t)0)
197
198static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
199{
200 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
201 return;
202
203 vcpu->arch.mmio_gva = 0;
204}
205
206static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
207{
208 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
209 vcpu->arch.mmio_gva == (gva & PAGE_MASK))
210 return true;
211
212 return false;
213}
214
215static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
216{
217 if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
218 vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
219 return true;
220
221 return false;
222}
223
Olivier Deprez157378f2022-04-04 15:47:50 +0200224static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225{
226 unsigned long val = kvm_register_read(vcpu, reg);
227
228 return is_64_bit_mode(vcpu) ? val : (u32)val;
229}
230
231static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
Olivier Deprez157378f2022-04-04 15:47:50 +0200232 int reg, unsigned long val)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000233{
234 if (!is_64_bit_mode(vcpu))
235 val = (u32)val;
236 return kvm_register_write(vcpu, reg, val);
237}
238
239static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
240{
241 return !(kvm->arch.disabled_quirks & quirk);
242}
243
Olivier Deprez157378f2022-04-04 15:47:50 +0200244static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
245{
246 return is_smm(vcpu) || kvm_x86_ops.apic_init_signal_blocked(vcpu);
247}
248
David Brazdil0f672f62019-12-10 10:32:29 +0000249void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000250
251void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
252u64 get_kvmclock_ns(struct kvm *kvm);
253
254int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
255 gva_t addr, void *val, unsigned int bytes,
256 struct x86_exception *exception);
257
258int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
259 gva_t addr, void *val, unsigned int bytes,
260 struct x86_exception *exception);
261
262int handle_ud(struct kvm_vcpu *vcpu);
263
David Brazdil0f672f62019-12-10 10:32:29 +0000264void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
265
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000266void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
267u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
268bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
269int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
270int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
271bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
272 int page_num);
273bool kvm_vector_hashing_enabled(void);
Olivier Deprez157378f2022-04-04 15:47:50 +0200274void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
275int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
276 void *insn, int insn_len);
Olivier Deprez0e641232021-09-23 10:07:05 +0200277int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000278 int emulation_type, void *insn, int insn_len);
Olivier Deprez157378f2022-04-04 15:47:50 +0200279fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000280
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000281extern u64 host_xcr0;
Olivier Deprez157378f2022-04-04 15:47:50 +0200282extern u64 supported_xcr0;
283extern u64 supported_xss;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000284
Olivier Deprez157378f2022-04-04 15:47:50 +0200285static inline bool kvm_mpx_supported(void)
286{
287 return (supported_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
288 == (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
289}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000290
291extern unsigned int min_timer_period_us;
292
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000293extern bool enable_vmware_backdoor;
294
David Brazdil0f672f62019-12-10 10:32:29 +0000295extern int pi_inject_timer;
296
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000297extern struct static_key kvm_no_apic_vcpu;
298
299static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
300{
301 return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
302 vcpu->arch.virtual_tsc_shift);
303}
304
305/* Same "calling convention" as do_div:
306 * - divide (n << 32) by base
307 * - put result in n
308 * - return remainder
309 */
310#define do_shl32_div32(n, base) \
311 ({ \
312 u32 __quot, __rem; \
313 asm("divl %2" : "=a" (__quot), "=d" (__rem) \
314 : "rm" (base), "0" (0), "1" ((u32) n)); \
315 n = __quot; \
316 __rem; \
317 })
318
319static inline bool kvm_mwait_in_guest(struct kvm *kvm)
320{
321 return kvm->arch.mwait_in_guest;
322}
323
324static inline bool kvm_hlt_in_guest(struct kvm *kvm)
325{
326 return kvm->arch.hlt_in_guest;
327}
328
329static inline bool kvm_pause_in_guest(struct kvm *kvm)
330{
331 return kvm->arch.pause_in_guest;
332}
333
David Brazdil0f672f62019-12-10 10:32:29 +0000334static inline bool kvm_cstate_in_guest(struct kvm *kvm)
335{
336 return kvm->arch.cstate_in_guest;
337}
338
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000339DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
340
341static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
342{
343 __this_cpu_write(current_vcpu, vcpu);
344}
345
346static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
347{
348 __this_cpu_write(current_vcpu, NULL);
349}
350
David Brazdil0f672f62019-12-10 10:32:29 +0000351
352static inline bool kvm_pat_valid(u64 data)
353{
354 if (data & 0xF8F8F8F8F8F8F8F8ull)
355 return false;
356 /* 0, 1, 4, 5, 6, 7 are valid values. */
357 return (data | ((data & 0x0202020202020202ull) << 1)) == data;
358}
359
Olivier Deprez157378f2022-04-04 15:47:50 +0200360static inline bool kvm_dr7_valid(u64 data)
361{
362 /* Bits [63:32] are reserved */
363 return !(data >> 32);
364}
365static inline bool kvm_dr6_valid(u64 data)
366{
367 /* Bits [63:32] are reserved */
368 return !(data >> 32);
369}
370
371void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
372void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
Olivier Deprez0e641232021-09-23 10:07:05 +0200373int kvm_spec_ctrl_test_value(u64 value);
Olivier Deprez157378f2022-04-04 15:47:50 +0200374int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
375bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
376int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
377 struct x86_exception *e);
378int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
379bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
380
381/*
382 * Internal error codes that are used to indicate that MSR emulation encountered
383 * an error that should result in #GP in the guest, unless userspace
384 * handles it.
385 */
386#define KVM_MSR_RET_INVALID 2 /* in-kernel MSR emulation #GP condition */
387#define KVM_MSR_RET_FILTERED 3 /* #GP due to userspace MSR filter */
388
389#define __cr4_reserved_bits(__cpu_has, __c) \
390({ \
391 u64 __reserved_bits = CR4_RESERVED_BITS; \
392 \
393 if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
394 __reserved_bits |= X86_CR4_OSXSAVE; \
395 if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
396 __reserved_bits |= X86_CR4_SMEP; \
397 if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
398 __reserved_bits |= X86_CR4_SMAP; \
399 if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
400 __reserved_bits |= X86_CR4_FSGSBASE; \
401 if (!__cpu_has(__c, X86_FEATURE_PKU)) \
402 __reserved_bits |= X86_CR4_PKE; \
403 if (!__cpu_has(__c, X86_FEATURE_LA57)) \
404 __reserved_bits |= X86_CR4_LA57; \
405 if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
406 __reserved_bits |= X86_CR4_UMIP; \
407 if (!__cpu_has(__c, X86_FEATURE_VMX)) \
408 __reserved_bits |= X86_CR4_VMXE; \
409 if (!__cpu_has(__c, X86_FEATURE_PCID)) \
410 __reserved_bits |= X86_CR4_PCIDE; \
411 __reserved_bits; \
412})
David Brazdil0f672f62019-12-10 10:32:29 +0000413
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000414#endif