David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Kernel-based Virtual Machine driver for Linux |
| 4 | * cpuid support routines |
| 5 | * |
| 6 | * derived from arch/x86/kvm/x86.c |
| 7 | * |
| 8 | * Copyright 2011 Red Hat, Inc. and/or its affiliates. |
| 9 | * Copyright IBM Corporation, 2008 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/kvm_host.h> |
| 13 | #include <linux/export.h> |
| 14 | #include <linux/vmalloc.h> |
| 15 | #include <linux/uaccess.h> |
| 16 | #include <linux/sched/stat.h> |
| 17 | |
| 18 | #include <asm/processor.h> |
| 19 | #include <asm/user.h> |
| 20 | #include <asm/fpu/xstate.h> |
| 21 | #include "cpuid.h" |
| 22 | #include "lapic.h" |
| 23 | #include "mmu.h" |
| 24 | #include "trace.h" |
| 25 | #include "pmu.h" |
| 26 | |
| 27 | static u32 xstate_required_size(u64 xstate_bv, bool compacted) |
| 28 | { |
| 29 | int feature_bit = 0; |
| 30 | u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
| 31 | |
| 32 | xstate_bv &= XFEATURE_MASK_EXTEND; |
| 33 | while (xstate_bv) { |
| 34 | if (xstate_bv & 0x1) { |
| 35 | u32 eax, ebx, ecx, edx, offset; |
| 36 | cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); |
| 37 | offset = compacted ? ret : ebx; |
| 38 | ret = max(ret, offset + eax); |
| 39 | } |
| 40 | |
| 41 | xstate_bv >>= 1; |
| 42 | feature_bit++; |
| 43 | } |
| 44 | |
| 45 | return ret; |
| 46 | } |
| 47 | |
| 48 | bool kvm_mpx_supported(void) |
| 49 | { |
| 50 | return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)) |
| 51 | && kvm_x86_ops->mpx_supported()); |
| 52 | } |
| 53 | EXPORT_SYMBOL_GPL(kvm_mpx_supported); |
| 54 | |
| 55 | u64 kvm_supported_xcr0(void) |
| 56 | { |
| 57 | u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; |
| 58 | |
| 59 | if (!kvm_mpx_supported()) |
| 60 | xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); |
| 61 | |
| 62 | return xcr0; |
| 63 | } |
| 64 | |
| 65 | #define F(x) bit(X86_FEATURE_##x) |
| 66 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 67 | int kvm_update_cpuid(struct kvm_vcpu *vcpu) |
| 68 | { |
| 69 | struct kvm_cpuid_entry2 *best; |
| 70 | struct kvm_lapic *apic = vcpu->arch.apic; |
| 71 | |
| 72 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
| 73 | if (!best) |
| 74 | return 0; |
| 75 | |
| 76 | /* Update OSXSAVE bit */ |
| 77 | if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) { |
| 78 | best->ecx &= ~F(OSXSAVE); |
| 79 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) |
| 80 | best->ecx |= F(OSXSAVE); |
| 81 | } |
| 82 | |
| 83 | best->edx &= ~F(APIC); |
| 84 | if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE) |
| 85 | best->edx |= F(APIC); |
| 86 | |
| 87 | if (apic) { |
| 88 | if (best->ecx & F(TSC_DEADLINE_TIMER)) |
| 89 | apic->lapic_timer.timer_mode_mask = 3 << 17; |
| 90 | else |
| 91 | apic->lapic_timer.timer_mode_mask = 1 << 17; |
| 92 | } |
| 93 | |
| 94 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
| 95 | if (best) { |
| 96 | /* Update OSPKE bit */ |
| 97 | if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) { |
| 98 | best->ecx &= ~F(OSPKE); |
| 99 | if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) |
| 100 | best->ecx |= F(OSPKE); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | best = kvm_find_cpuid_entry(vcpu, 0xD, 0); |
| 105 | if (!best) { |
| 106 | vcpu->arch.guest_supported_xcr0 = 0; |
| 107 | vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; |
| 108 | } else { |
| 109 | vcpu->arch.guest_supported_xcr0 = |
| 110 | (best->eax | ((u64)best->edx << 32)) & |
| 111 | kvm_supported_xcr0(); |
| 112 | vcpu->arch.guest_xstate_size = best->ebx = |
| 113 | xstate_required_size(vcpu->arch.xcr0, false); |
| 114 | } |
| 115 | |
| 116 | best = kvm_find_cpuid_entry(vcpu, 0xD, 1); |
| 117 | if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) |
| 118 | best->ebx = xstate_required_size(vcpu->arch.xcr0, true); |
| 119 | |
| 120 | /* |
| 121 | * The existing code assumes virtual address is 48-bit or 57-bit in the |
| 122 | * canonical address checks; exit if it is ever changed. |
| 123 | */ |
| 124 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
| 125 | if (best) { |
| 126 | int vaddr_bits = (best->eax & 0xff00) >> 8; |
| 127 | |
| 128 | if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0) |
| 129 | return -EINVAL; |
| 130 | } |
| 131 | |
| 132 | best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0); |
| 133 | if (kvm_hlt_in_guest(vcpu->kvm) && best && |
| 134 | (best->eax & (1 << KVM_FEATURE_PV_UNHALT))) |
| 135 | best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); |
| 136 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 137 | if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { |
| 138 | best = kvm_find_cpuid_entry(vcpu, 0x1, 0); |
| 139 | if (best) { |
| 140 | if (vcpu->arch.ia32_misc_enable_msr & MSR_IA32_MISC_ENABLE_MWAIT) |
| 141 | best->ecx |= F(MWAIT); |
| 142 | else |
| 143 | best->ecx &= ~F(MWAIT); |
| 144 | } |
| 145 | } |
| 146 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | /* Update physical-address width */ |
| 148 | vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); |
| 149 | kvm_mmu_reset_context(vcpu); |
| 150 | |
| 151 | kvm_pmu_refresh(vcpu); |
| 152 | return 0; |
| 153 | } |
| 154 | |
| 155 | static int is_efer_nx(void) |
| 156 | { |
| 157 | unsigned long long efer = 0; |
| 158 | |
| 159 | rdmsrl_safe(MSR_EFER, &efer); |
| 160 | return efer & EFER_NX; |
| 161 | } |
| 162 | |
| 163 | static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu) |
| 164 | { |
| 165 | int i; |
| 166 | struct kvm_cpuid_entry2 *e, *entry; |
| 167 | |
| 168 | entry = NULL; |
| 169 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { |
| 170 | e = &vcpu->arch.cpuid_entries[i]; |
| 171 | if (e->function == 0x80000001) { |
| 172 | entry = e; |
| 173 | break; |
| 174 | } |
| 175 | } |
| 176 | if (entry && (entry->edx & F(NX)) && !is_efer_nx()) { |
| 177 | entry->edx &= ~F(NX); |
| 178 | printk(KERN_INFO "kvm: guest NX capability removed\n"); |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu) |
| 183 | { |
| 184 | struct kvm_cpuid_entry2 *best; |
| 185 | |
| 186 | best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0); |
| 187 | if (!best || best->eax < 0x80000008) |
| 188 | goto not_found; |
| 189 | best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0); |
| 190 | if (best) |
| 191 | return best->eax & 0xff; |
| 192 | not_found: |
| 193 | return 36; |
| 194 | } |
| 195 | EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr); |
| 196 | |
| 197 | /* when an old userspace process fills a new kernel module */ |
| 198 | int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, |
| 199 | struct kvm_cpuid *cpuid, |
| 200 | struct kvm_cpuid_entry __user *entries) |
| 201 | { |
| 202 | int r, i; |
| 203 | struct kvm_cpuid_entry *cpuid_entries = NULL; |
| 204 | |
| 205 | r = -E2BIG; |
| 206 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
| 207 | goto out; |
| 208 | r = -ENOMEM; |
| 209 | if (cpuid->nent) { |
| 210 | cpuid_entries = |
| 211 | vmalloc(array_size(sizeof(struct kvm_cpuid_entry), |
| 212 | cpuid->nent)); |
| 213 | if (!cpuid_entries) |
| 214 | goto out; |
| 215 | r = -EFAULT; |
| 216 | if (copy_from_user(cpuid_entries, entries, |
| 217 | cpuid->nent * sizeof(struct kvm_cpuid_entry))) |
| 218 | goto out; |
| 219 | } |
| 220 | for (i = 0; i < cpuid->nent; i++) { |
| 221 | vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function; |
| 222 | vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax; |
| 223 | vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx; |
| 224 | vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx; |
| 225 | vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx; |
| 226 | vcpu->arch.cpuid_entries[i].index = 0; |
| 227 | vcpu->arch.cpuid_entries[i].flags = 0; |
| 228 | vcpu->arch.cpuid_entries[i].padding[0] = 0; |
| 229 | vcpu->arch.cpuid_entries[i].padding[1] = 0; |
| 230 | vcpu->arch.cpuid_entries[i].padding[2] = 0; |
| 231 | } |
| 232 | vcpu->arch.cpuid_nent = cpuid->nent; |
| 233 | cpuid_fix_nx_cap(vcpu); |
| 234 | kvm_apic_set_version(vcpu); |
| 235 | kvm_x86_ops->cpuid_update(vcpu); |
| 236 | r = kvm_update_cpuid(vcpu); |
| 237 | |
| 238 | out: |
| 239 | vfree(cpuid_entries); |
| 240 | return r; |
| 241 | } |
| 242 | |
| 243 | int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, |
| 244 | struct kvm_cpuid2 *cpuid, |
| 245 | struct kvm_cpuid_entry2 __user *entries) |
| 246 | { |
| 247 | int r; |
| 248 | |
| 249 | r = -E2BIG; |
| 250 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
| 251 | goto out; |
| 252 | r = -EFAULT; |
| 253 | if (copy_from_user(&vcpu->arch.cpuid_entries, entries, |
| 254 | cpuid->nent * sizeof(struct kvm_cpuid_entry2))) |
| 255 | goto out; |
| 256 | vcpu->arch.cpuid_nent = cpuid->nent; |
| 257 | kvm_apic_set_version(vcpu); |
| 258 | kvm_x86_ops->cpuid_update(vcpu); |
| 259 | r = kvm_update_cpuid(vcpu); |
| 260 | out: |
| 261 | return r; |
| 262 | } |
| 263 | |
| 264 | int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
| 265 | struct kvm_cpuid2 *cpuid, |
| 266 | struct kvm_cpuid_entry2 __user *entries) |
| 267 | { |
| 268 | int r; |
| 269 | |
| 270 | r = -E2BIG; |
| 271 | if (cpuid->nent < vcpu->arch.cpuid_nent) |
| 272 | goto out; |
| 273 | r = -EFAULT; |
| 274 | if (copy_to_user(entries, &vcpu->arch.cpuid_entries, |
| 275 | vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2))) |
| 276 | goto out; |
| 277 | return 0; |
| 278 | |
| 279 | out: |
| 280 | cpuid->nent = vcpu->arch.cpuid_nent; |
| 281 | return r; |
| 282 | } |
| 283 | |
| 284 | static void cpuid_mask(u32 *word, int wordnum) |
| 285 | { |
| 286 | *word &= boot_cpu_data.x86_capability[wordnum]; |
| 287 | } |
| 288 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 289 | static void do_host_cpuid(struct kvm_cpuid_entry2 *entry, u32 function, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 290 | u32 index) |
| 291 | { |
| 292 | entry->function = function; |
| 293 | entry->index = index; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 294 | entry->flags = 0; |
| 295 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 296 | cpuid_count(entry->function, entry->index, |
| 297 | &entry->eax, &entry->ebx, &entry->ecx, &entry->edx); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 298 | |
| 299 | switch (function) { |
| 300 | case 2: |
| 301 | entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC; |
| 302 | break; |
| 303 | case 4: |
| 304 | case 7: |
| 305 | case 0xb: |
| 306 | case 0xd: |
| 307 | case 0xf: |
| 308 | case 0x10: |
| 309 | case 0x12: |
| 310 | case 0x14: |
| 311 | case 0x17: |
| 312 | case 0x18: |
| 313 | case 0x1f: |
| 314 | case 0x8000001d: |
| 315 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
| 316 | break; |
| 317 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 318 | } |
| 319 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 320 | static int __do_cpuid_func_emulated(struct kvm_cpuid_entry2 *entry, |
| 321 | u32 func, int *nent, int maxnent) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 322 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 323 | entry->function = func; |
| 324 | entry->index = 0; |
| 325 | entry->flags = 0; |
| 326 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 327 | switch (func) { |
| 328 | case 0: |
| 329 | entry->eax = 7; |
| 330 | ++*nent; |
| 331 | break; |
| 332 | case 1: |
| 333 | entry->ecx = F(MOVBE); |
| 334 | ++*nent; |
| 335 | break; |
| 336 | case 7: |
| 337 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 338 | entry->eax = 0; |
| 339 | entry->ecx = F(RDPID); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 340 | ++*nent; |
| 341 | default: |
| 342 | break; |
| 343 | } |
| 344 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 345 | return 0; |
| 346 | } |
| 347 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 348 | static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index) |
| 349 | { |
| 350 | unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; |
| 351 | unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0; |
| 352 | unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; |
| 353 | unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; |
| 354 | unsigned f_la57; |
| 355 | |
| 356 | /* cpuid 7.0.ebx */ |
| 357 | const u32 kvm_cpuid_7_0_ebx_x86_features = |
| 358 | F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | |
| 359 | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | |
| 360 | F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) | |
| 361 | F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) | |
| 362 | F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | f_intel_pt; |
| 363 | |
| 364 | /* cpuid 7.0.ecx*/ |
| 365 | const u32 kvm_cpuid_7_0_ecx_x86_features = |
| 366 | F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) | |
| 367 | F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) | |
| 368 | F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) | |
| 369 | F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/; |
| 370 | |
| 371 | /* cpuid 7.0.edx*/ |
| 372 | const u32 kvm_cpuid_7_0_edx_x86_features = |
| 373 | F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) | |
| 374 | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) | |
| 375 | F(MD_CLEAR); |
| 376 | |
| 377 | /* cpuid 7.1.eax */ |
| 378 | const u32 kvm_cpuid_7_1_eax_x86_features = |
| 379 | F(AVX512_BF16); |
| 380 | |
| 381 | switch (index) { |
| 382 | case 0: |
| 383 | entry->eax = min(entry->eax, 1u); |
| 384 | entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; |
| 385 | cpuid_mask(&entry->ebx, CPUID_7_0_EBX); |
| 386 | /* TSC_ADJUST is emulated */ |
| 387 | entry->ebx |= F(TSC_ADJUST); |
| 388 | |
| 389 | entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; |
| 390 | f_la57 = entry->ecx & F(LA57); |
| 391 | cpuid_mask(&entry->ecx, CPUID_7_ECX); |
| 392 | /* Set LA57 based on hardware capability. */ |
| 393 | entry->ecx |= f_la57; |
| 394 | entry->ecx |= f_umip; |
| 395 | /* PKU is not yet implemented for shadow paging. */ |
| 396 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) |
| 397 | entry->ecx &= ~F(PKU); |
| 398 | |
| 399 | entry->edx &= kvm_cpuid_7_0_edx_x86_features; |
| 400 | cpuid_mask(&entry->edx, CPUID_7_EDX); |
| 401 | if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) |
| 402 | entry->edx |= F(SPEC_CTRL); |
| 403 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
| 404 | entry->edx |= F(INTEL_STIBP); |
| 405 | if (boot_cpu_has(X86_FEATURE_SSBD)) |
| 406 | entry->edx |= F(SPEC_CTRL_SSBD); |
| 407 | /* |
| 408 | * We emulate ARCH_CAPABILITIES in software even |
| 409 | * if the host doesn't support it. |
| 410 | */ |
| 411 | entry->edx |= F(ARCH_CAPABILITIES); |
| 412 | break; |
| 413 | case 1: |
| 414 | entry->eax &= kvm_cpuid_7_1_eax_x86_features; |
| 415 | entry->ebx = 0; |
| 416 | entry->ecx = 0; |
| 417 | entry->edx = 0; |
| 418 | break; |
| 419 | default: |
| 420 | WARN_ON_ONCE(1); |
| 421 | entry->eax = 0; |
| 422 | entry->ebx = 0; |
| 423 | entry->ecx = 0; |
| 424 | entry->edx = 0; |
| 425 | break; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function, |
| 430 | int *nent, int maxnent) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 431 | { |
| 432 | int r; |
| 433 | unsigned f_nx = is_efer_nx() ? F(NX) : 0; |
| 434 | #ifdef CONFIG_X86_64 |
| 435 | unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL) |
| 436 | ? F(GBPAGES) : 0; |
| 437 | unsigned f_lm = F(LM); |
| 438 | #else |
| 439 | unsigned f_gbpages = 0; |
| 440 | unsigned f_lm = 0; |
| 441 | #endif |
| 442 | unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 443 | unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 444 | unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 445 | |
| 446 | /* cpuid 1.edx */ |
| 447 | const u32 kvm_cpuid_1_edx_x86_features = |
| 448 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
| 449 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
| 450 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) | |
| 451 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
| 452 | F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) | |
| 453 | 0 /* Reserved, DS, ACPI */ | F(MMX) | |
| 454 | F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) | |
| 455 | 0 /* HTT, TM, Reserved, PBE */; |
| 456 | /* cpuid 0x80000001.edx */ |
| 457 | const u32 kvm_cpuid_8000_0001_edx_x86_features = |
| 458 | F(FPU) | F(VME) | F(DE) | F(PSE) | |
| 459 | F(TSC) | F(MSR) | F(PAE) | F(MCE) | |
| 460 | F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) | |
| 461 | F(MTRR) | F(PGE) | F(MCA) | F(CMOV) | |
| 462 | F(PAT) | F(PSE36) | 0 /* Reserved */ | |
| 463 | f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) | |
| 464 | F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp | |
| 465 | 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW); |
| 466 | /* cpuid 1.ecx */ |
| 467 | const u32 kvm_cpuid_1_ecx_x86_features = |
| 468 | /* NOTE: MONITOR (and MWAIT) are emulated as NOP, |
| 469 | * but *not* advertised to guests via CPUID ! */ |
| 470 | F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ | |
| 471 | 0 /* DS-CPL, VMX, SMX, EST */ | |
| 472 | 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ | |
| 473 | F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ | |
| 474 | F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) | |
| 475 | F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) | |
| 476 | 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) | |
| 477 | F(F16C) | F(RDRAND); |
| 478 | /* cpuid 0x80000001.ecx */ |
| 479 | const u32 kvm_cpuid_8000_0001_ecx_x86_features = |
| 480 | F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | |
| 481 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
| 482 | F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) | |
| 483 | 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) | |
| 484 | F(TOPOEXT) | F(PERFCTR_CORE); |
| 485 | |
| 486 | /* cpuid 0x80000008.ebx */ |
| 487 | const u32 kvm_cpuid_8000_0008_ebx_x86_features = |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 488 | F(CLZERO) | F(XSAVEERPTR) | |
| 489 | F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) | |
| 490 | F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 491 | |
| 492 | /* cpuid 0xC0000001.edx */ |
| 493 | const u32 kvm_cpuid_C000_0001_edx_x86_features = |
| 494 | F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) | |
| 495 | F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) | |
| 496 | F(PMM) | F(PMM_EN); |
| 497 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 498 | /* cpuid 0xD.1.eax */ |
| 499 | const u32 kvm_cpuid_D_1_eax_x86_features = |
| 500 | F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves; |
| 501 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 502 | /* all calls to cpuid_count() should be made on the same cpu */ |
| 503 | get_cpu(); |
| 504 | |
| 505 | r = -E2BIG; |
| 506 | |
| 507 | if (*nent >= maxnent) |
| 508 | goto out; |
| 509 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 510 | do_host_cpuid(entry, function, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 511 | ++*nent; |
| 512 | |
| 513 | switch (function) { |
| 514 | case 0: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 515 | /* Limited to the highest leaf implemented in KVM. */ |
| 516 | entry->eax = min(entry->eax, 0x1fU); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 517 | break; |
| 518 | case 1: |
| 519 | entry->edx &= kvm_cpuid_1_edx_x86_features; |
| 520 | cpuid_mask(&entry->edx, CPUID_1_EDX); |
| 521 | entry->ecx &= kvm_cpuid_1_ecx_x86_features; |
| 522 | cpuid_mask(&entry->ecx, CPUID_1_ECX); |
| 523 | /* we support x2apic emulation even if host does not support |
| 524 | * it since we emulate x2apic in software */ |
| 525 | entry->ecx |= F(X2APIC); |
| 526 | break; |
| 527 | /* function 2 entries are STATEFUL. That is, repeated cpuid commands |
| 528 | * may return different values. This forces us to get_cpu() before |
| 529 | * issuing the first command, and also to emulate this annoying behavior |
| 530 | * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */ |
| 531 | case 2: { |
| 532 | int t, times = entry->eax & 0xff; |
| 533 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 534 | entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
| 535 | for (t = 1; t < times; ++t) { |
| 536 | if (*nent >= maxnent) |
| 537 | goto out; |
| 538 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 539 | do_host_cpuid(&entry[t], function, 0); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 540 | ++*nent; |
| 541 | } |
| 542 | break; |
| 543 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 544 | /* functions 4 and 0x8000001d have additional index. */ |
| 545 | case 4: |
| 546 | case 0x8000001d: { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 547 | int i, cache_type; |
| 548 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 549 | /* read more entries until cache_type is zero */ |
| 550 | for (i = 1; ; ++i) { |
| 551 | if (*nent >= maxnent) |
| 552 | goto out; |
| 553 | |
| 554 | cache_type = entry[i - 1].eax & 0x1f; |
| 555 | if (!cache_type) |
| 556 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 557 | do_host_cpuid(&entry[i], function, i); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 558 | ++*nent; |
| 559 | } |
| 560 | break; |
| 561 | } |
| 562 | case 6: /* Thermal management */ |
| 563 | entry->eax = 0x4; /* allow ARAT */ |
| 564 | entry->ebx = 0; |
| 565 | entry->ecx = 0; |
| 566 | entry->edx = 0; |
| 567 | break; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 568 | /* function 7 has additional index. */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 569 | case 7: { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 570 | int i; |
| 571 | |
| 572 | for (i = 0; ; ) { |
| 573 | do_cpuid_7_mask(&entry[i], i); |
| 574 | if (i == entry->eax) |
| 575 | break; |
| 576 | if (*nent >= maxnent) |
| 577 | goto out; |
| 578 | |
| 579 | ++i; |
| 580 | do_host_cpuid(&entry[i], function, i); |
| 581 | ++*nent; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 582 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 583 | break; |
| 584 | } |
| 585 | case 9: |
| 586 | break; |
| 587 | case 0xa: { /* Architectural Performance Monitoring */ |
| 588 | struct x86_pmu_capability cap; |
| 589 | union cpuid10_eax eax; |
| 590 | union cpuid10_edx edx; |
| 591 | |
| 592 | perf_get_x86_pmu_capability(&cap); |
| 593 | |
| 594 | /* |
| 595 | * Only support guest architectural pmu on a host |
| 596 | * with architectural pmu. |
| 597 | */ |
| 598 | if (!cap.version) |
| 599 | memset(&cap, 0, sizeof(cap)); |
| 600 | |
| 601 | eax.split.version_id = min(cap.version, 2); |
| 602 | eax.split.num_counters = cap.num_counters_gp; |
| 603 | eax.split.bit_width = cap.bit_width_gp; |
| 604 | eax.split.mask_length = cap.events_mask_len; |
| 605 | |
| 606 | edx.split.num_counters_fixed = cap.num_counters_fixed; |
| 607 | edx.split.bit_width_fixed = cap.bit_width_fixed; |
| 608 | edx.split.reserved = 0; |
| 609 | |
| 610 | entry->eax = eax.full; |
| 611 | entry->ebx = cap.events_mask; |
| 612 | entry->ecx = 0; |
| 613 | entry->edx = edx.full; |
| 614 | break; |
| 615 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 616 | /* |
| 617 | * Per Intel's SDM, the 0x1f is a superset of 0xb, |
| 618 | * thus they can be handled by common code. |
| 619 | */ |
| 620 | case 0x1f: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 621 | case 0xb: { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 622 | int i; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 623 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 624 | /* |
| 625 | * We filled in entry[0] for CPUID(EAX=<function>, |
| 626 | * ECX=00H) above. If its level type (ECX[15:8]) is |
| 627 | * zero, then the leaf is unimplemented, and we're |
| 628 | * done. Otherwise, continue to populate entries |
| 629 | * until the level type (ECX[15:8]) of the previously |
| 630 | * added entry is zero. |
| 631 | */ |
| 632 | for (i = 1; entry[i - 1].ecx & 0xff00; ++i) { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 633 | if (*nent >= maxnent) |
| 634 | goto out; |
| 635 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 636 | do_host_cpuid(&entry[i], function, i); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 637 | ++*nent; |
| 638 | } |
| 639 | break; |
| 640 | } |
| 641 | case 0xd: { |
| 642 | int idx, i; |
| 643 | u64 supported = kvm_supported_xcr0(); |
| 644 | |
| 645 | entry->eax &= supported; |
| 646 | entry->ebx = xstate_required_size(supported, false); |
| 647 | entry->ecx = entry->ebx; |
| 648 | entry->edx &= supported >> 32; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 649 | if (!supported) |
| 650 | break; |
| 651 | |
| 652 | for (idx = 1, i = 1; idx < 64; ++idx) { |
| 653 | u64 mask = ((u64)1 << idx); |
| 654 | if (*nent >= maxnent) |
| 655 | goto out; |
| 656 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 657 | do_host_cpuid(&entry[i], function, idx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 658 | if (idx == 1) { |
| 659 | entry[i].eax &= kvm_cpuid_D_1_eax_x86_features; |
| 660 | cpuid_mask(&entry[i].eax, CPUID_D_1_EAX); |
| 661 | entry[i].ebx = 0; |
| 662 | if (entry[i].eax & (F(XSAVES)|F(XSAVEC))) |
| 663 | entry[i].ebx = |
| 664 | xstate_required_size(supported, |
| 665 | true); |
| 666 | } else { |
| 667 | if (entry[i].eax == 0 || !(supported & mask)) |
| 668 | continue; |
| 669 | if (WARN_ON_ONCE(entry[i].ecx & 1)) |
| 670 | continue; |
| 671 | } |
| 672 | entry[i].ecx = 0; |
| 673 | entry[i].edx = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 674 | ++*nent; |
| 675 | ++i; |
| 676 | } |
| 677 | break; |
| 678 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 679 | /* Intel PT */ |
| 680 | case 0x14: { |
| 681 | int t, times = entry->eax; |
| 682 | |
| 683 | if (!f_intel_pt) |
| 684 | break; |
| 685 | |
| 686 | for (t = 1; t <= times; ++t) { |
| 687 | if (*nent >= maxnent) |
| 688 | goto out; |
| 689 | do_host_cpuid(&entry[t], function, t); |
| 690 | ++*nent; |
| 691 | } |
| 692 | break; |
| 693 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 694 | case KVM_CPUID_SIGNATURE: { |
| 695 | static const char signature[12] = "KVMKVMKVM\0\0"; |
| 696 | const u32 *sigptr = (const u32 *)signature; |
| 697 | entry->eax = KVM_CPUID_FEATURES; |
| 698 | entry->ebx = sigptr[0]; |
| 699 | entry->ecx = sigptr[1]; |
| 700 | entry->edx = sigptr[2]; |
| 701 | break; |
| 702 | } |
| 703 | case KVM_CPUID_FEATURES: |
| 704 | entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) | |
| 705 | (1 << KVM_FEATURE_NOP_IO_DELAY) | |
| 706 | (1 << KVM_FEATURE_CLOCKSOURCE2) | |
| 707 | (1 << KVM_FEATURE_ASYNC_PF) | |
| 708 | (1 << KVM_FEATURE_PV_EOI) | |
| 709 | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | |
| 710 | (1 << KVM_FEATURE_PV_UNHALT) | |
| 711 | (1 << KVM_FEATURE_PV_TLB_FLUSH) | |
| 712 | (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 713 | (1 << KVM_FEATURE_PV_SEND_IPI) | |
| 714 | (1 << KVM_FEATURE_POLL_CONTROL) | |
| 715 | (1 << KVM_FEATURE_PV_SCHED_YIELD); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 716 | |
| 717 | if (sched_info_on()) |
| 718 | entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); |
| 719 | |
| 720 | entry->ebx = 0; |
| 721 | entry->ecx = 0; |
| 722 | entry->edx = 0; |
| 723 | break; |
| 724 | case 0x80000000: |
| 725 | entry->eax = min(entry->eax, 0x8000001f); |
| 726 | break; |
| 727 | case 0x80000001: |
| 728 | entry->edx &= kvm_cpuid_8000_0001_edx_x86_features; |
| 729 | cpuid_mask(&entry->edx, CPUID_8000_0001_EDX); |
| 730 | entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features; |
| 731 | cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX); |
| 732 | break; |
| 733 | case 0x80000007: /* Advanced power management */ |
| 734 | /* invariant TSC is CPUID.80000007H:EDX[8] */ |
| 735 | entry->edx &= (1 << 8); |
| 736 | /* mask against host */ |
| 737 | entry->edx &= boot_cpu_data.x86_power; |
| 738 | entry->eax = entry->ebx = entry->ecx = 0; |
| 739 | break; |
| 740 | case 0x80000008: { |
| 741 | unsigned g_phys_as = (entry->eax >> 16) & 0xff; |
| 742 | unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U); |
| 743 | unsigned phys_as = entry->eax & 0xff; |
| 744 | |
| 745 | if (!g_phys_as) |
| 746 | g_phys_as = phys_as; |
| 747 | entry->eax = g_phys_as | (virt_as << 8); |
| 748 | entry->edx = 0; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 749 | entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; |
| 750 | cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); |
| 751 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 752 | * AMD has separate bits for each SPEC_CTRL bit. |
| 753 | * arch/x86/kernel/cpu/bugs.c is kind enough to |
| 754 | * record that in cpufeatures so use them. |
| 755 | */ |
| 756 | if (boot_cpu_has(X86_FEATURE_IBPB)) |
| 757 | entry->ebx |= F(AMD_IBPB); |
| 758 | if (boot_cpu_has(X86_FEATURE_IBRS)) |
| 759 | entry->ebx |= F(AMD_IBRS); |
| 760 | if (boot_cpu_has(X86_FEATURE_STIBP)) |
| 761 | entry->ebx |= F(AMD_STIBP); |
| 762 | if (boot_cpu_has(X86_FEATURE_SSBD)) |
| 763 | entry->ebx |= F(AMD_SSBD); |
| 764 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) |
| 765 | entry->ebx |= F(AMD_SSB_NO); |
| 766 | /* |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 767 | * The preference is to use SPEC CTRL MSR instead of the |
| 768 | * VIRT_SPEC MSR. |
| 769 | */ |
| 770 | if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) && |
| 771 | !boot_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 772 | entry->ebx |= F(VIRT_SSBD); |
| 773 | break; |
| 774 | } |
| 775 | case 0x80000019: |
| 776 | entry->ecx = entry->edx = 0; |
| 777 | break; |
| 778 | case 0x8000001a: |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 779 | case 0x8000001e: |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 780 | break; |
| 781 | /*Add support for Centaur's CPUID instruction*/ |
| 782 | case 0xC0000000: |
| 783 | /*Just support up to 0xC0000004 now*/ |
| 784 | entry->eax = min(entry->eax, 0xC0000004); |
| 785 | break; |
| 786 | case 0xC0000001: |
| 787 | entry->edx &= kvm_cpuid_C000_0001_edx_x86_features; |
| 788 | cpuid_mask(&entry->edx, CPUID_C000_0001_EDX); |
| 789 | break; |
| 790 | case 3: /* Processor serial number */ |
| 791 | case 5: /* MONITOR/MWAIT */ |
| 792 | case 0xC0000002: |
| 793 | case 0xC0000003: |
| 794 | case 0xC0000004: |
| 795 | default: |
| 796 | entry->eax = entry->ebx = entry->ecx = entry->edx = 0; |
| 797 | break; |
| 798 | } |
| 799 | |
| 800 | kvm_x86_ops->set_supported_cpuid(function, entry); |
| 801 | |
| 802 | r = 0; |
| 803 | |
| 804 | out: |
| 805 | put_cpu(); |
| 806 | |
| 807 | return r; |
| 808 | } |
| 809 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 810 | static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func, |
| 811 | int *nent, int maxnent, unsigned int type) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 812 | { |
| 813 | if (type == KVM_GET_EMULATED_CPUID) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 814 | return __do_cpuid_func_emulated(entry, func, nent, maxnent); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 815 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 816 | return __do_cpuid_func(entry, func, nent, maxnent); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 817 | } |
| 818 | |
| 819 | #undef F |
| 820 | |
| 821 | struct kvm_cpuid_param { |
| 822 | u32 func; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 823 | bool (*qualifier)(const struct kvm_cpuid_param *param); |
| 824 | }; |
| 825 | |
| 826 | static bool is_centaur_cpu(const struct kvm_cpuid_param *param) |
| 827 | { |
| 828 | return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR; |
| 829 | } |
| 830 | |
| 831 | static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries, |
| 832 | __u32 num_entries, unsigned int ioctl_type) |
| 833 | { |
| 834 | int i; |
| 835 | __u32 pad[3]; |
| 836 | |
| 837 | if (ioctl_type != KVM_GET_EMULATED_CPUID) |
| 838 | return false; |
| 839 | |
| 840 | /* |
| 841 | * We want to make sure that ->padding is being passed clean from |
| 842 | * userspace in case we want to use it for something in the future. |
| 843 | * |
| 844 | * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we |
| 845 | * have to give ourselves satisfied only with the emulated side. /me |
| 846 | * sheds a tear. |
| 847 | */ |
| 848 | for (i = 0; i < num_entries; i++) { |
| 849 | if (copy_from_user(pad, entries[i].padding, sizeof(pad))) |
| 850 | return true; |
| 851 | |
| 852 | if (pad[0] || pad[1] || pad[2]) |
| 853 | return true; |
| 854 | } |
| 855 | return false; |
| 856 | } |
| 857 | |
| 858 | int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, |
| 859 | struct kvm_cpuid_entry2 __user *entries, |
| 860 | unsigned int type) |
| 861 | { |
| 862 | struct kvm_cpuid_entry2 *cpuid_entries; |
| 863 | int limit, nent = 0, r = -E2BIG, i; |
| 864 | u32 func; |
| 865 | static const struct kvm_cpuid_param param[] = { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 866 | { .func = 0 }, |
| 867 | { .func = 0x80000000 }, |
| 868 | { .func = 0xC0000000, .qualifier = is_centaur_cpu }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 869 | { .func = KVM_CPUID_SIGNATURE }, |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 870 | }; |
| 871 | |
| 872 | if (cpuid->nent < 1) |
| 873 | goto out; |
| 874 | if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) |
| 875 | cpuid->nent = KVM_MAX_CPUID_ENTRIES; |
| 876 | |
| 877 | if (sanity_check_entries(entries, cpuid->nent, type)) |
| 878 | return -EINVAL; |
| 879 | |
| 880 | r = -ENOMEM; |
| 881 | cpuid_entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2), |
| 882 | cpuid->nent)); |
| 883 | if (!cpuid_entries) |
| 884 | goto out; |
| 885 | |
| 886 | r = 0; |
| 887 | for (i = 0; i < ARRAY_SIZE(param); i++) { |
| 888 | const struct kvm_cpuid_param *ent = ¶m[i]; |
| 889 | |
| 890 | if (ent->qualifier && !ent->qualifier(ent)) |
| 891 | continue; |
| 892 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 893 | r = do_cpuid_func(&cpuid_entries[nent], ent->func, |
| 894 | &nent, cpuid->nent, type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 895 | |
| 896 | if (r) |
| 897 | goto out_free; |
| 898 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 899 | limit = cpuid_entries[nent - 1].eax; |
| 900 | for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 901 | r = do_cpuid_func(&cpuid_entries[nent], func, |
| 902 | &nent, cpuid->nent, type); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 903 | |
| 904 | if (r) |
| 905 | goto out_free; |
| 906 | } |
| 907 | |
| 908 | r = -EFAULT; |
| 909 | if (copy_to_user(entries, cpuid_entries, |
| 910 | nent * sizeof(struct kvm_cpuid_entry2))) |
| 911 | goto out_free; |
| 912 | cpuid->nent = nent; |
| 913 | r = 0; |
| 914 | |
| 915 | out_free: |
| 916 | vfree(cpuid_entries); |
| 917 | out: |
| 918 | return r; |
| 919 | } |
| 920 | |
| 921 | static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) |
| 922 | { |
| 923 | struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; |
| 924 | struct kvm_cpuid_entry2 *ej; |
| 925 | int j = i; |
| 926 | int nent = vcpu->arch.cpuid_nent; |
| 927 | |
| 928 | e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; |
| 929 | /* when no next entry is found, the current entry[i] is reselected */ |
| 930 | do { |
| 931 | j = (j + 1) % nent; |
| 932 | ej = &vcpu->arch.cpuid_entries[j]; |
| 933 | } while (ej->function != e->function); |
| 934 | |
| 935 | ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; |
| 936 | |
| 937 | return j; |
| 938 | } |
| 939 | |
| 940 | /* find an entry with matching function, matching index (if needed), and that |
| 941 | * should be read next (if it's stateful) */ |
| 942 | static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e, |
| 943 | u32 function, u32 index) |
| 944 | { |
| 945 | if (e->function != function) |
| 946 | return 0; |
| 947 | if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index) |
| 948 | return 0; |
| 949 | if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) && |
| 950 | !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT)) |
| 951 | return 0; |
| 952 | return 1; |
| 953 | } |
| 954 | |
| 955 | struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, |
| 956 | u32 function, u32 index) |
| 957 | { |
| 958 | int i; |
| 959 | struct kvm_cpuid_entry2 *best = NULL; |
| 960 | |
| 961 | for (i = 0; i < vcpu->arch.cpuid_nent; ++i) { |
| 962 | struct kvm_cpuid_entry2 *e; |
| 963 | |
| 964 | e = &vcpu->arch.cpuid_entries[i]; |
| 965 | if (is_matching_cpuid_entry(e, function, index)) { |
| 966 | if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) |
| 967 | move_to_next_stateful_cpuid_entry(vcpu, i); |
| 968 | best = e; |
| 969 | break; |
| 970 | } |
| 971 | } |
| 972 | return best; |
| 973 | } |
| 974 | EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry); |
| 975 | |
| 976 | /* |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 977 | * If the basic or extended CPUID leaf requested is higher than the |
| 978 | * maximum supported basic or extended leaf, respectively, then it is |
| 979 | * out of range. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 980 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 981 | static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 982 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 983 | struct kvm_cpuid_entry2 *max; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 984 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 985 | max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0); |
| 986 | return max && function <= max->eax; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 987 | } |
| 988 | |
| 989 | bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, |
| 990 | u32 *ecx, u32 *edx, bool check_limit) |
| 991 | { |
| 992 | u32 function = *eax, index = *ecx; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 993 | struct kvm_cpuid_entry2 *entry; |
| 994 | struct kvm_cpuid_entry2 *max; |
| 995 | bool found; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 996 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 997 | entry = kvm_find_cpuid_entry(vcpu, function, index); |
| 998 | found = entry; |
| 999 | /* |
| 1000 | * Intel CPUID semantics treats any query for an out-of-range |
| 1001 | * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were |
| 1002 | * requested. AMD CPUID semantics returns all zeroes for any |
| 1003 | * undefined leaf, whether or not the leaf is in range. |
| 1004 | */ |
| 1005 | if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) && |
| 1006 | !cpuid_function_in_range(vcpu, function)) { |
| 1007 | max = kvm_find_cpuid_entry(vcpu, 0, 0); |
| 1008 | if (max) { |
| 1009 | function = max->eax; |
| 1010 | entry = kvm_find_cpuid_entry(vcpu, function, index); |
| 1011 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1012 | } |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1013 | if (entry) { |
| 1014 | *eax = entry->eax; |
| 1015 | *ebx = entry->ebx; |
| 1016 | *ecx = entry->ecx; |
| 1017 | *edx = entry->edx; |
| 1018 | } else { |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1019 | *eax = *ebx = *ecx = *edx = 0; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1020 | /* |
| 1021 | * When leaf 0BH or 1FH is defined, CL is pass-through |
| 1022 | * and EDX is always the x2APIC ID, even for undefined |
| 1023 | * subleaves. Index 1 will exist iff the leaf is |
| 1024 | * implemented, so we pass through CL iff leaf 1 |
| 1025 | * exists. EDX can be copied from any existing index. |
| 1026 | */ |
| 1027 | if (function == 0xb || function == 0x1f) { |
| 1028 | entry = kvm_find_cpuid_entry(vcpu, function, 1); |
| 1029 | if (entry) { |
| 1030 | *ecx = index & 0xff; |
| 1031 | *edx = entry->edx; |
| 1032 | } |
| 1033 | } |
| 1034 | } |
| 1035 | trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found); |
| 1036 | return found; |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1037 | } |
| 1038 | EXPORT_SYMBOL_GPL(kvm_cpuid); |
| 1039 | |
| 1040 | int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) |
| 1041 | { |
| 1042 | u32 eax, ebx, ecx, edx; |
| 1043 | |
| 1044 | if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0)) |
| 1045 | return 1; |
| 1046 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1047 | eax = kvm_rax_read(vcpu); |
| 1048 | ecx = kvm_rcx_read(vcpu); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1049 | kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1050 | kvm_rax_write(vcpu, eax); |
| 1051 | kvm_rbx_write(vcpu, ebx); |
| 1052 | kvm_rcx_write(vcpu, ecx); |
| 1053 | kvm_rdx_write(vcpu, edx); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1054 | return kvm_skip_emulated_instruction(vcpu); |
| 1055 | } |
| 1056 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |