Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * KVM PMU support for AMD |
| 4 | * |
| 5 | * Copyright 2015, Red Hat, Inc. and/or its affiliates. |
| 6 | * |
| 7 | * Author: |
| 8 | * Wei Huang <wei@redhat.com> |
| 9 | * |
| 10 | * Implementation is based on pmu_intel.c file |
| 11 | */ |
| 12 | #include <linux/types.h> |
| 13 | #include <linux/kvm_host.h> |
| 14 | #include <linux/perf_event.h> |
| 15 | #include "x86.h" |
| 16 | #include "cpuid.h" |
| 17 | #include "lapic.h" |
| 18 | #include "pmu.h" |
| 19 | |
| 20 | enum pmu_type { |
| 21 | PMU_TYPE_COUNTER = 0, |
| 22 | PMU_TYPE_EVNTSEL, |
| 23 | }; |
| 24 | |
| 25 | enum index { |
| 26 | INDEX_ZERO = 0, |
| 27 | INDEX_ONE, |
| 28 | INDEX_TWO, |
| 29 | INDEX_THREE, |
| 30 | INDEX_FOUR, |
| 31 | INDEX_FIVE, |
| 32 | INDEX_ERROR, |
| 33 | }; |
| 34 | |
| 35 | /* duplicated from amd_perfmon_event_map, K7 and above should work. */ |
| 36 | static struct kvm_event_hw_type_mapping amd_event_mapping[] = { |
| 37 | [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, |
| 38 | [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, |
| 39 | [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES }, |
| 40 | [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES }, |
| 41 | [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
| 42 | [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, |
| 43 | [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, |
| 44 | [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, |
| 45 | }; |
| 46 | |
| 47 | static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) |
| 48 | { |
| 49 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); |
| 50 | |
| 51 | if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { |
| 52 | if (type == PMU_TYPE_COUNTER) |
| 53 | return MSR_F15H_PERF_CTR; |
| 54 | else |
| 55 | return MSR_F15H_PERF_CTL; |
| 56 | } else { |
| 57 | if (type == PMU_TYPE_COUNTER) |
| 58 | return MSR_K7_PERFCTR0; |
| 59 | else |
| 60 | return MSR_K7_EVNTSEL0; |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | static enum index msr_to_index(u32 msr) |
| 65 | { |
| 66 | switch (msr) { |
| 67 | case MSR_F15H_PERF_CTL0: |
| 68 | case MSR_F15H_PERF_CTR0: |
| 69 | case MSR_K7_EVNTSEL0: |
| 70 | case MSR_K7_PERFCTR0: |
| 71 | return INDEX_ZERO; |
| 72 | case MSR_F15H_PERF_CTL1: |
| 73 | case MSR_F15H_PERF_CTR1: |
| 74 | case MSR_K7_EVNTSEL1: |
| 75 | case MSR_K7_PERFCTR1: |
| 76 | return INDEX_ONE; |
| 77 | case MSR_F15H_PERF_CTL2: |
| 78 | case MSR_F15H_PERF_CTR2: |
| 79 | case MSR_K7_EVNTSEL2: |
| 80 | case MSR_K7_PERFCTR2: |
| 81 | return INDEX_TWO; |
| 82 | case MSR_F15H_PERF_CTL3: |
| 83 | case MSR_F15H_PERF_CTR3: |
| 84 | case MSR_K7_EVNTSEL3: |
| 85 | case MSR_K7_PERFCTR3: |
| 86 | return INDEX_THREE; |
| 87 | case MSR_F15H_PERF_CTL4: |
| 88 | case MSR_F15H_PERF_CTR4: |
| 89 | return INDEX_FOUR; |
| 90 | case MSR_F15H_PERF_CTL5: |
| 91 | case MSR_F15H_PERF_CTR5: |
| 92 | return INDEX_FIVE; |
| 93 | default: |
| 94 | return INDEX_ERROR; |
| 95 | } |
| 96 | } |
| 97 | |
| 98 | static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, |
| 99 | enum pmu_type type) |
| 100 | { |
| 101 | switch (msr) { |
| 102 | case MSR_F15H_PERF_CTL0: |
| 103 | case MSR_F15H_PERF_CTL1: |
| 104 | case MSR_F15H_PERF_CTL2: |
| 105 | case MSR_F15H_PERF_CTL3: |
| 106 | case MSR_F15H_PERF_CTL4: |
| 107 | case MSR_F15H_PERF_CTL5: |
| 108 | case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3: |
| 109 | if (type != PMU_TYPE_EVNTSEL) |
| 110 | return NULL; |
| 111 | break; |
| 112 | case MSR_F15H_PERF_CTR0: |
| 113 | case MSR_F15H_PERF_CTR1: |
| 114 | case MSR_F15H_PERF_CTR2: |
| 115 | case MSR_F15H_PERF_CTR3: |
| 116 | case MSR_F15H_PERF_CTR4: |
| 117 | case MSR_F15H_PERF_CTR5: |
| 118 | case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3: |
| 119 | if (type != PMU_TYPE_COUNTER) |
| 120 | return NULL; |
| 121 | break; |
| 122 | default: |
| 123 | return NULL; |
| 124 | } |
| 125 | |
| 126 | return &pmu->gp_counters[msr_to_index(msr)]; |
| 127 | } |
| 128 | |
| 129 | static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) |
| 130 | { |
| 131 | u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; |
| 132 | u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; |
| 133 | int i; |
| 134 | |
| 135 | for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) |
| 136 | if (amd_event_mapping[i].eventsel == event_select |
| 137 | && amd_event_mapping[i].unit_mask == unit_mask) |
| 138 | break; |
| 139 | |
| 140 | if (i == ARRAY_SIZE(amd_event_mapping)) |
| 141 | return PERF_COUNT_HW_MAX; |
| 142 | |
| 143 | return amd_event_mapping[i].event_type; |
| 144 | } |
| 145 | |
| 146 | /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ |
| 147 | static unsigned amd_find_fixed_event(int idx) |
| 148 | { |
| 149 | return PERF_COUNT_HW_MAX; |
| 150 | } |
| 151 | |
| 152 | /* check if a PMC is enabled by comparing it against global_ctrl bits. Because |
| 153 | * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). |
| 154 | */ |
| 155 | static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) |
| 156 | { |
| 157 | return true; |
| 158 | } |
| 159 | |
| 160 | static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) |
| 161 | { |
| 162 | unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER); |
| 163 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); |
| 164 | |
| 165 | if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) { |
| 166 | /* |
| 167 | * The idx is contiguous. The MSRs are not. The counter MSRs |
| 168 | * are interleaved with the event select MSRs. |
| 169 | */ |
| 170 | pmc_idx *= 2; |
| 171 | } |
| 172 | |
| 173 | return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER); |
| 174 | } |
| 175 | |
| 176 | /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ |
| 177 | static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx) |
| 178 | { |
| 179 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 180 | |
| 181 | idx &= ~(3u << 30); |
| 182 | |
| 183 | return (idx >= pmu->nr_arch_gp_counters); |
| 184 | } |
| 185 | |
| 186 | /* idx is the ECX register of RDPMC instruction */ |
| 187 | static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, |
| 188 | unsigned int idx, u64 *mask) |
| 189 | { |
| 190 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 191 | struct kvm_pmc *counters; |
| 192 | |
| 193 | idx &= ~(3u << 30); |
| 194 | if (idx >= pmu->nr_arch_gp_counters) |
| 195 | return NULL; |
| 196 | counters = pmu->gp_counters; |
| 197 | |
| 198 | return &counters[idx]; |
| 199 | } |
| 200 | |
| 201 | static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
| 202 | { |
| 203 | /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */ |
| 204 | return false; |
| 205 | } |
| 206 | |
| 207 | static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) |
| 208 | { |
| 209 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 210 | struct kvm_pmc *pmc; |
| 211 | |
| 212 | pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); |
| 213 | pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); |
| 214 | |
| 215 | return pmc; |
| 216 | } |
| 217 | |
| 218 | static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 219 | { |
| 220 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 221 | struct kvm_pmc *pmc; |
| 222 | u32 msr = msr_info->index; |
| 223 | |
| 224 | /* MSR_PERFCTRn */ |
| 225 | pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); |
| 226 | if (pmc) { |
| 227 | msr_info->data = pmc_read_counter(pmc); |
| 228 | return 0; |
| 229 | } |
| 230 | /* MSR_EVNTSELn */ |
| 231 | pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); |
| 232 | if (pmc) { |
| 233 | msr_info->data = pmc->eventsel; |
| 234 | return 0; |
| 235 | } |
| 236 | |
| 237 | return 1; |
| 238 | } |
| 239 | |
| 240 | static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 241 | { |
| 242 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 243 | struct kvm_pmc *pmc; |
| 244 | u32 msr = msr_info->index; |
| 245 | u64 data = msr_info->data; |
| 246 | |
| 247 | /* MSR_PERFCTRn */ |
| 248 | pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); |
| 249 | if (pmc) { |
| 250 | pmc->counter += data - pmc_read_counter(pmc); |
| 251 | return 0; |
| 252 | } |
| 253 | /* MSR_EVNTSELn */ |
| 254 | pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); |
| 255 | if (pmc) { |
| 256 | if (data == pmc->eventsel) |
| 257 | return 0; |
| 258 | if (!(data & pmu->reserved_bits)) { |
| 259 | reprogram_gp_counter(pmc, data); |
| 260 | return 0; |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | return 1; |
| 265 | } |
| 266 | |
| 267 | static void amd_pmu_refresh(struct kvm_vcpu *vcpu) |
| 268 | { |
| 269 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 270 | |
| 271 | if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) |
| 272 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE; |
| 273 | else |
| 274 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; |
| 275 | |
| 276 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; |
| 277 | pmu->reserved_bits = 0xfffffff000280000ull; |
| 278 | pmu->version = 1; |
| 279 | /* not applicable to AMD; but clean them to prevent any fall out */ |
| 280 | pmu->counter_bitmask[KVM_PMC_FIXED] = 0; |
| 281 | pmu->nr_arch_fixed_counters = 0; |
| 282 | pmu->global_status = 0; |
| 283 | bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters); |
| 284 | } |
| 285 | |
| 286 | static void amd_pmu_init(struct kvm_vcpu *vcpu) |
| 287 | { |
| 288 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 289 | int i; |
| 290 | |
| 291 | BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC); |
| 292 | |
| 293 | for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) { |
| 294 | pmu->gp_counters[i].type = KVM_PMC_GP; |
| 295 | pmu->gp_counters[i].vcpu = vcpu; |
| 296 | pmu->gp_counters[i].idx = i; |
| 297 | pmu->gp_counters[i].current_config = 0; |
| 298 | } |
| 299 | } |
| 300 | |
| 301 | static void amd_pmu_reset(struct kvm_vcpu *vcpu) |
| 302 | { |
| 303 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 304 | int i; |
| 305 | |
| 306 | for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) { |
| 307 | struct kvm_pmc *pmc = &pmu->gp_counters[i]; |
| 308 | |
| 309 | pmc_stop_counter(pmc); |
| 310 | pmc->counter = pmc->eventsel = 0; |
| 311 | } |
| 312 | } |
| 313 | |
| 314 | struct kvm_pmu_ops amd_pmu_ops = { |
| 315 | .pmc_perf_hw_id = amd_pmc_perf_hw_id, |
| 316 | .find_fixed_event = amd_find_fixed_event, |
| 317 | .pmc_is_enabled = amd_pmc_is_enabled, |
| 318 | .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, |
| 319 | .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc, |
| 320 | .msr_idx_to_pmc = amd_msr_idx_to_pmc, |
| 321 | .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx, |
| 322 | .is_valid_msr = amd_is_valid_msr, |
| 323 | .get_msr = amd_pmu_get_msr, |
| 324 | .set_msr = amd_pmu_set_msr, |
| 325 | .refresh = amd_pmu_refresh, |
| 326 | .init = amd_pmu_init, |
| 327 | .reset = amd_pmu_reset, |
| 328 | }; |