David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __KVM_X86_VMX_NESTED_H |
| 3 | #define __KVM_X86_VMX_NESTED_H |
| 4 | |
| 5 | #include "kvm_cache_regs.h" |
| 6 | #include "vmcs12.h" |
| 7 | #include "vmx.h" |
| 8 | |
| 9 | /* |
| 10 | * Status returned by nested_vmx_enter_non_root_mode(): |
| 11 | */ |
| 12 | enum nvmx_vmentry_status { |
| 13 | NVMX_VMENTRY_SUCCESS, /* Entered VMX non-root mode */ |
| 14 | NVMX_VMENTRY_VMFAIL, /* Consistency check VMFail */ |
| 15 | NVMX_VMENTRY_VMEXIT, /* Consistency check VMExit */ |
| 16 | NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */ |
| 17 | }; |
| 18 | |
| 19 | void vmx_leave_nested(struct kvm_vcpu *vcpu); |
| 20 | void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, |
| 21 | bool apicv); |
| 22 | void nested_vmx_hardware_unsetup(void); |
| 23 | __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *)); |
| 24 | void nested_vmx_vcpu_setup(void); |
| 25 | void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu); |
| 26 | enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, |
| 27 | bool from_vmentry); |
| 28 | bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason); |
| 29 | void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, |
| 30 | u32 exit_intr_info, unsigned long exit_qualification); |
| 31 | void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu); |
| 32 | int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
| 33 | int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata); |
| 34 | int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, |
| 35 | u32 vmx_instruction_info, bool wr, int len, gva_t *ret); |
| 36 | |
| 37 | static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu) |
| 38 | { |
| 39 | return to_vmx(vcpu)->nested.cached_vmcs12; |
| 40 | } |
| 41 | |
| 42 | static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) |
| 43 | { |
| 44 | return to_vmx(vcpu)->nested.cached_shadow_vmcs12; |
| 45 | } |
| 46 | |
| 47 | static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) |
| 48 | { |
| 49 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 50 | |
| 51 | /* |
| 52 | * In case we do two consecutive get/set_nested_state()s while L2 was |
| 53 | * running hv_evmcs may end up not being mapped (we map it from |
| 54 | * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always |
| 55 | * have vmcs12 if it is true. |
| 56 | */ |
| 57 | return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull || |
| 58 | vmx->nested.hv_evmcs; |
| 59 | } |
| 60 | |
| 61 | static inline unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) |
| 62 | { |
| 63 | /* return the page table to be shadowed - in our case, EPT12 */ |
| 64 | return get_vmcs12(vcpu)->ept_pointer; |
| 65 | } |
| 66 | |
| 67 | static inline bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu) |
| 68 | { |
| 69 | return nested_ept_get_cr3(vcpu) & VMX_EPTP_AD_ENABLE_BIT; |
| 70 | } |
| 71 | |
| 72 | /* |
| 73 | * Reflect a VM Exit into L1. |
| 74 | */ |
| 75 | static inline int nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu, |
| 76 | u32 exit_reason) |
| 77 | { |
| 78 | u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
| 79 | |
| 80 | /* |
| 81 | * At this point, the exit interruption info in exit_intr_info |
| 82 | * is only valid for EXCEPTION_NMI exits. For EXTERNAL_INTERRUPT |
| 83 | * we need to query the in-kernel LAPIC. |
| 84 | */ |
| 85 | WARN_ON(exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT); |
| 86 | if ((exit_intr_info & |
| 87 | (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == |
| 88 | (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) { |
| 89 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 90 | |
| 91 | vmcs12->vm_exit_intr_error_code = |
| 92 | vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
| 93 | } |
| 94 | |
| 95 | nested_vmx_vmexit(vcpu, exit_reason, exit_intr_info, |
| 96 | vmcs_readl(EXIT_QUALIFICATION)); |
| 97 | return 1; |
| 98 | } |
| 99 | |
| 100 | /* |
| 101 | * Return the cr0 value that a nested guest would read. This is a combination |
| 102 | * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by |
| 103 | * its hypervisor (cr0_read_shadow). |
| 104 | */ |
| 105 | static inline unsigned long nested_read_cr0(struct vmcs12 *fields) |
| 106 | { |
| 107 | return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) | |
| 108 | (fields->cr0_read_shadow & fields->cr0_guest_host_mask); |
| 109 | } |
| 110 | static inline unsigned long nested_read_cr4(struct vmcs12 *fields) |
| 111 | { |
| 112 | return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) | |
| 113 | (fields->cr4_read_shadow & fields->cr4_guest_host_mask); |
| 114 | } |
| 115 | |
| 116 | static inline unsigned nested_cpu_vmx_misc_cr3_count(struct kvm_vcpu *vcpu) |
| 117 | { |
| 118 | return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); |
| 119 | } |
| 120 | |
| 121 | /* |
| 122 | * Do the virtual VMX capability MSRs specify that L1 can use VMWRITE |
| 123 | * to modify any valid field of the VMCS, or are the VM-exit |
| 124 | * information fields read-only? |
| 125 | */ |
| 126 | static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu) |
| 127 | { |
| 128 | return to_vmx(vcpu)->nested.msrs.misc_low & |
| 129 | MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS; |
| 130 | } |
| 131 | |
| 132 | static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu) |
| 133 | { |
| 134 | return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; |
| 135 | } |
| 136 | |
| 137 | static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu) |
| 138 | { |
| 139 | return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & |
| 140 | CPU_BASED_MONITOR_TRAP_FLAG; |
| 141 | } |
| 142 | |
| 143 | static inline bool nested_cpu_has_vmx_shadow_vmcs(struct kvm_vcpu *vcpu) |
| 144 | { |
| 145 | return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & |
| 146 | SECONDARY_EXEC_SHADOW_VMCS; |
| 147 | } |
| 148 | |
| 149 | static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit) |
| 150 | { |
| 151 | return vmcs12->cpu_based_vm_exec_control & bit; |
| 152 | } |
| 153 | |
| 154 | static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit) |
| 155 | { |
| 156 | return (vmcs12->cpu_based_vm_exec_control & |
| 157 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) && |
| 158 | (vmcs12->secondary_vm_exec_control & bit); |
| 159 | } |
| 160 | |
| 161 | static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12) |
| 162 | { |
| 163 | return vmcs12->pin_based_vm_exec_control & |
| 164 | PIN_BASED_VMX_PREEMPTION_TIMER; |
| 165 | } |
| 166 | |
| 167 | static inline bool nested_cpu_has_nmi_exiting(struct vmcs12 *vmcs12) |
| 168 | { |
| 169 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_NMI_EXITING; |
| 170 | } |
| 171 | |
| 172 | static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12) |
| 173 | { |
| 174 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; |
| 175 | } |
| 176 | |
| 177 | static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) |
| 178 | { |
| 179 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); |
| 180 | } |
| 181 | |
| 182 | static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12) |
| 183 | { |
| 184 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES); |
| 185 | } |
| 186 | |
| 187 | static inline bool nested_cpu_has_pml(struct vmcs12 *vmcs12) |
| 188 | { |
| 189 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML); |
| 190 | } |
| 191 | |
| 192 | static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12) |
| 193 | { |
| 194 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE); |
| 195 | } |
| 196 | |
| 197 | static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12) |
| 198 | { |
| 199 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID); |
| 200 | } |
| 201 | |
| 202 | static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12) |
| 203 | { |
| 204 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT); |
| 205 | } |
| 206 | |
| 207 | static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12) |
| 208 | { |
| 209 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
| 210 | } |
| 211 | |
| 212 | static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12) |
| 213 | { |
| 214 | return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR; |
| 215 | } |
| 216 | |
| 217 | static inline bool nested_cpu_has_vmfunc(struct vmcs12 *vmcs12) |
| 218 | { |
| 219 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VMFUNC); |
| 220 | } |
| 221 | |
| 222 | static inline bool nested_cpu_has_eptp_switching(struct vmcs12 *vmcs12) |
| 223 | { |
| 224 | return nested_cpu_has_vmfunc(vmcs12) && |
| 225 | (vmcs12->vm_function_control & |
| 226 | VMX_VMFUNC_EPTP_SWITCHING); |
| 227 | } |
| 228 | |
| 229 | static inline bool nested_cpu_has_shadow_vmcs(struct vmcs12 *vmcs12) |
| 230 | { |
| 231 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_SHADOW_VMCS); |
| 232 | } |
| 233 | |
| 234 | static inline bool nested_cpu_has_save_preemption_timer(struct vmcs12 *vmcs12) |
| 235 | { |
| 236 | return vmcs12->vm_exit_controls & |
| 237 | VM_EXIT_SAVE_VMX_PREEMPTION_TIMER; |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * In nested virtualization, check if L1 asked to exit on external interrupts. |
| 242 | * For most existing hypervisors, this will always return true. |
| 243 | */ |
| 244 | static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu) |
| 245 | { |
| 246 | return get_vmcs12(vcpu)->pin_based_vm_exec_control & |
| 247 | PIN_BASED_EXT_INTR_MASK; |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * if fixed0[i] == 1: val[i] must be 1 |
| 252 | * if fixed1[i] == 0: val[i] must be 0 |
| 253 | */ |
| 254 | static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1) |
| 255 | { |
| 256 | return ((val & fixed1) | fixed0) == val; |
| 257 | } |
| 258 | |
| 259 | static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) |
| 260 | { |
| 261 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; |
| 262 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; |
| 263 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 264 | |
| 265 | if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & |
| 266 | SECONDARY_EXEC_UNRESTRICTED_GUEST && |
| 267 | nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST)) |
| 268 | fixed0 &= ~(X86_CR0_PE | X86_CR0_PG); |
| 269 | |
| 270 | return fixed_bits_valid(val, fixed0, fixed1); |
| 271 | } |
| 272 | |
| 273 | static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val) |
| 274 | { |
| 275 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; |
| 276 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; |
| 277 | |
| 278 | return fixed_bits_valid(val, fixed0, fixed1); |
| 279 | } |
| 280 | |
| 281 | static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val) |
| 282 | { |
| 283 | u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; |
| 284 | u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; |
| 285 | |
| 286 | return fixed_bits_valid(val, fixed0, fixed1); |
| 287 | } |
| 288 | |
| 289 | /* No difference in the restrictions on guest and host CR4 in VMX operation. */ |
| 290 | #define nested_guest_cr4_valid nested_cr4_valid |
| 291 | #define nested_host_cr4_valid nested_cr4_valid |
| 292 | |
| 293 | #endif /* __KVM_X86_VMX_NESTED_H */ |