David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * KVM Microsoft Hyper-V emulation |
| 4 | * |
| 5 | * derived from arch/x86/kvm/x86.c |
| 6 | * |
| 7 | * Copyright (C) 2006 Qumranet, Inc. |
| 8 | * Copyright (C) 2008 Qumranet, Inc. |
| 9 | * Copyright IBM Corporation, 2008 |
| 10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
| 11 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> |
| 12 | * |
| 13 | * Authors: |
| 14 | * Avi Kivity <avi@qumranet.com> |
| 15 | * Yaniv Kamay <yaniv@qumranet.com> |
| 16 | * Amit Shah <amit.shah@qumranet.com> |
| 17 | * Ben-Ami Yassour <benami@il.ibm.com> |
| 18 | * Andrey Smetanin <asmetanin@virtuozzo.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 19 | */ |
| 20 | |
| 21 | #ifndef __ARCH_X86_KVM_HYPERV_H__ |
| 22 | #define __ARCH_X86_KVM_HYPERV_H__ |
| 23 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 24 | #include <linux/kvm_host.h> |
| 25 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 26 | /* |
| 27 | * The #defines related to the synthetic debugger are required by KDNet, but |
| 28 | * they are not documented in the Hyper-V TLFS because the synthetic debugger |
| 29 | * functionality has been deprecated and is subject to removal in future |
| 30 | * versions of Windows. |
| 31 | */ |
| 32 | #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS 0x40000080 |
| 33 | #define HYPERV_CPUID_SYNDBG_INTERFACE 0x40000081 |
| 34 | #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES 0x40000082 |
| 35 | |
| 36 | /* |
| 37 | * Hyper-V synthetic debugger platform capabilities |
| 38 | * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits. |
| 39 | */ |
| 40 | #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING BIT(1) |
| 41 | |
| 42 | /* Hyper-V Synthetic debug options MSR */ |
| 43 | #define HV_X64_MSR_SYNDBG_CONTROL 0x400000F1 |
| 44 | #define HV_X64_MSR_SYNDBG_STATUS 0x400000F2 |
| 45 | #define HV_X64_MSR_SYNDBG_SEND_BUFFER 0x400000F3 |
| 46 | #define HV_X64_MSR_SYNDBG_RECV_BUFFER 0x400000F4 |
| 47 | #define HV_X64_MSR_SYNDBG_PENDING_BUFFER 0x400000F5 |
| 48 | #define HV_X64_MSR_SYNDBG_OPTIONS 0x400000FF |
| 49 | |
| 50 | /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */ |
| 51 | #define HV_X64_SYNDBG_OPTION_USE_HCALLS BIT(2) |
| 52 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | static inline struct kvm_vcpu_hv *vcpu_to_hv_vcpu(struct kvm_vcpu *vcpu) |
| 54 | { |
| 55 | return &vcpu->arch.hyperv; |
| 56 | } |
| 57 | |
| 58 | static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) |
| 59 | { |
| 60 | struct kvm_vcpu_arch *arch; |
| 61 | |
| 62 | arch = container_of(hv_vcpu, struct kvm_vcpu_arch, hyperv); |
| 63 | return container_of(arch, struct kvm_vcpu, arch); |
| 64 | } |
| 65 | |
| 66 | static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu) |
| 67 | { |
| 68 | return &vcpu->arch.hyperv.synic; |
| 69 | } |
| 70 | |
| 71 | static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) |
| 72 | { |
| 73 | return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); |
| 74 | } |
| 75 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 76 | static inline struct kvm_hv_syndbg *vcpu_to_hv_syndbg(struct kvm_vcpu *vcpu) |
| 77 | { |
| 78 | return &vcpu->kvm->arch.hyperv.hv_syndbg; |
| 79 | } |
| 80 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 81 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); |
| 82 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host); |
| 83 | |
| 84 | bool kvm_hv_hypercall_enabled(struct kvm *kvm); |
| 85 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu); |
| 86 | |
| 87 | void kvm_hv_irq_routing_update(struct kvm *kvm); |
| 88 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint); |
| 89 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector); |
| 90 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages); |
| 91 | |
| 92 | void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu); |
| 93 | void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu); |
| 94 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu); |
| 95 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 96 | bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu); |
| 97 | bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu, |
| 98 | struct hv_vp_assist_page *assist_page); |
| 99 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 100 | static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu, |
| 101 | int timer_index) |
| 102 | { |
| 103 | return &vcpu_to_hv_vcpu(vcpu)->stimer[timer_index]; |
| 104 | } |
| 105 | |
| 106 | static inline struct kvm_vcpu *stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer) |
| 107 | { |
| 108 | struct kvm_vcpu_hv *hv_vcpu; |
| 109 | |
| 110 | hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv, |
| 111 | stimer[0]); |
| 112 | return hv_vcpu_to_vcpu(hv_vcpu); |
| 113 | } |
| 114 | |
| 115 | static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu) |
| 116 | { |
| 117 | return !bitmap_empty(vcpu->arch.hyperv.stimer_pending_bitmap, |
| 118 | HV_SYNIC_STIMER_COUNT); |
| 119 | } |
| 120 | |
| 121 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu); |
| 122 | |
| 123 | void kvm_hv_setup_tsc_page(struct kvm *kvm, |
| 124 | struct pvclock_vcpu_time_info *hv_clock); |
| 125 | |
| 126 | void kvm_hv_init_vm(struct kvm *kvm); |
| 127 | void kvm_hv_destroy_vm(struct kvm *kvm); |
| 128 | int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 129 | int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, |
| 130 | struct kvm_cpuid_entry2 __user *entries); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | |
| 132 | #endif |