blob: 2f83b5d948b33c4129dfdc011ba8c9dd7be5f992 [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4 *
5 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
6 *
7 * Authors:
8 * Avi Kivity <avi@redhat.com>
9 * Gleb Natapov <gleb@redhat.com>
10 * Wei Huang <wei@redhat.com>
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000011 */
12
13#include <linux/types.h>
14#include <linux/kvm_host.h>
15#include <linux/perf_event.h>
16#include <asm/perf_event.h>
17#include "x86.h"
18#include "cpuid.h"
19#include "lapic.h"
20#include "pmu.h"
21
David Brazdil0f672f62019-12-10 10:32:29 +000022/* This is enough to filter the vast majority of currently defined events. */
23#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
24
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000025/* NOTE:
26 * - Each perf counter is defined as "struct kvm_pmc";
27 * - There are two types of perf counters: general purpose (gp) and fixed.
28 * gp counters are stored in gp_counters[] and fixed counters are stored
29 * in fixed_counters[] respectively. Both of them are part of "struct
30 * kvm_pmu";
31 * - pmu.c understands the difference between gp counters and fixed counters.
32 * However AMD doesn't support fixed-counters;
33 * - There are three types of index to access perf counters (PMC):
34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
35 * has MSR_K7_PERFCTRn.
36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
39 * that it also supports fixed counters. idx can be used to as index to
40 * gp and fixed counters.
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
43 * all perf counters (both gp and fixed). The mapping relationship
44 * between pmc and perf counters is as the following:
45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
48 */
49
50static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
51{
52 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
54
55 kvm_pmu_deliver_pmi(vcpu);
56}
57
58static void kvm_perf_overflow(struct perf_event *perf_event,
59 struct perf_sample_data *data,
60 struct pt_regs *regs)
61{
62 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
63 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
64
Olivier Deprez157378f2022-04-04 15:47:50 +020065 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
67 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
68 }
69}
70
71static void kvm_perf_overflow_intr(struct perf_event *perf_event,
72 struct perf_sample_data *data,
73 struct pt_regs *regs)
74{
75 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
76 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
77
Olivier Deprez157378f2022-04-04 15:47:50 +020078 if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000079 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
80 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
81
82 /*
83 * Inject PMI. If vcpu was in a guest mode during NMI PMI
84 * can be ejected on a guest mode re-entry. Otherwise we can't
85 * be sure that vcpu wasn't executing hlt instruction at the
86 * time of vmexit and is not going to re-enter guest mode until
87 * woken up. So we should wake it, but this is impossible from
88 * NMI context. Do it from irq work instead.
89 */
90 if (!kvm_is_in_guest())
91 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
92 else
93 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
94 }
95}
96
97static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Olivier Deprez157378f2022-04-04 15:47:50 +020098 u64 config, bool exclude_user,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000099 bool exclude_kernel, bool intr,
100 bool in_tx, bool in_tx_cp)
101{
102 struct perf_event *event;
103 struct perf_event_attr attr = {
104 .type = type,
105 .size = sizeof(attr),
106 .pinned = true,
107 .exclude_idle = true,
108 .exclude_host = 1,
109 .exclude_user = exclude_user,
110 .exclude_kernel = exclude_kernel,
111 .config = config,
112 };
113
Olivier Deprez157378f2022-04-04 15:47:50 +0200114 attr.sample_period = get_sample_period(pmc, pmc->counter);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000115
116 if (in_tx)
117 attr.config |= HSW_IN_TX;
118 if (in_tx_cp) {
119 /*
120 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
121 * period. Just clear the sample period so at least
122 * allocating the counter doesn't fail.
123 */
124 attr.sample_period = 0;
125 attr.config |= HSW_IN_TX_CHECKPOINTED;
126 }
127
128 event = perf_event_create_kernel_counter(&attr, -1, current,
129 intr ? kvm_perf_overflow_intr :
130 kvm_perf_overflow, pmc);
131 if (IS_ERR(event)) {
David Brazdil0f672f62019-12-10 10:32:29 +0000132 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
133 PTR_ERR(event), pmc->idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000134 return;
135 }
136
137 pmc->perf_event = event;
Olivier Deprez157378f2022-04-04 15:47:50 +0200138 pmc_to_pmu(pmc)->event_count++;
139 clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
140}
141
142static void pmc_pause_counter(struct kvm_pmc *pmc)
143{
144 u64 counter = pmc->counter;
145
146 if (!pmc->perf_event)
147 return;
148
149 /* update counter, reset event value to avoid redundant accumulation */
150 counter += perf_event_pause(pmc->perf_event, true);
151 pmc->counter = counter & pmc_bitmask(pmc);
152}
153
154static bool pmc_resume_counter(struct kvm_pmc *pmc)
155{
156 if (!pmc->perf_event)
157 return false;
158
159 /* recalibrate sample period and check if it's accepted by perf core */
160 if (perf_event_period(pmc->perf_event,
161 get_sample_period(pmc, pmc->counter)))
162 return false;
163
164 /* reuse perf_event to serve as pmc_reprogram_counter() does*/
165 perf_event_enable(pmc->perf_event);
166
167 clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
168 return true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000169}
170
171void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
172{
Olivier Deprez157378f2022-04-04 15:47:50 +0200173 u64 config;
174 u32 type = PERF_TYPE_RAW;
David Brazdil0f672f62019-12-10 10:32:29 +0000175 struct kvm *kvm = pmc->vcpu->kvm;
176 struct kvm_pmu_event_filter *filter;
177 int i;
178 bool allow_event = true;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000179
180 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
181 printk_once("kvm pmu: pin control bit is ignored\n");
182
183 pmc->eventsel = eventsel;
184
Olivier Deprez157378f2022-04-04 15:47:50 +0200185 pmc_pause_counter(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000186
187 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
188 return;
189
David Brazdil0f672f62019-12-10 10:32:29 +0000190 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
191 if (filter) {
192 for (i = 0; i < filter->nevents; i++)
193 if (filter->events[i] ==
194 (eventsel & AMD64_RAW_EVENT_MASK_NB))
195 break;
196 if (filter->action == KVM_PMU_EVENT_ALLOW &&
197 i == filter->nevents)
198 allow_event = false;
199 if (filter->action == KVM_PMU_EVENT_DENY &&
200 i < filter->nevents)
201 allow_event = false;
202 }
203 if (!allow_event)
204 return;
205
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000206 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
207 ARCH_PERFMON_EVENTSEL_INV |
208 ARCH_PERFMON_EVENTSEL_CMASK |
209 HSW_IN_TX |
210 HSW_IN_TX_CHECKPOINTED))) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200211 config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000212 if (config != PERF_COUNT_HW_MAX)
213 type = PERF_TYPE_HARDWARE;
214 }
215
216 if (type == PERF_TYPE_RAW)
Olivier Deprez157378f2022-04-04 15:47:50 +0200217 config = eventsel & AMD64_RAW_EVENT_MASK;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000218
Olivier Deprez157378f2022-04-04 15:47:50 +0200219 if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
220 return;
221
222 pmc_release_perf_event(pmc);
223
224 pmc->current_config = eventsel;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000225 pmc_reprogram_counter(pmc, type, config,
226 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
227 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
228 eventsel & ARCH_PERFMON_EVENTSEL_INT,
229 (eventsel & HSW_IN_TX),
230 (eventsel & HSW_IN_TX_CHECKPOINTED));
231}
232EXPORT_SYMBOL_GPL(reprogram_gp_counter);
233
234void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
235{
236 unsigned en_field = ctrl & 0x3;
237 bool pmi = ctrl & 0x8;
David Brazdil0f672f62019-12-10 10:32:29 +0000238 struct kvm_pmu_event_filter *filter;
239 struct kvm *kvm = pmc->vcpu->kvm;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000240
Olivier Deprez157378f2022-04-04 15:47:50 +0200241 pmc_pause_counter(pmc);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000242
243 if (!en_field || !pmc_is_enabled(pmc))
244 return;
245
David Brazdil0f672f62019-12-10 10:32:29 +0000246 filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
247 if (filter) {
248 if (filter->action == KVM_PMU_EVENT_DENY &&
249 test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
250 return;
251 if (filter->action == KVM_PMU_EVENT_ALLOW &&
252 !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
253 return;
254 }
255
Olivier Deprez157378f2022-04-04 15:47:50 +0200256 if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
257 return;
258
259 pmc_release_perf_event(pmc);
260
261 pmc->current_config = (u64)ctrl;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000262 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Olivier Deprez157378f2022-04-04 15:47:50 +0200263 kvm_x86_ops.pmu_ops->find_fixed_event(idx),
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000264 !(en_field & 0x2), /* exclude user */
265 !(en_field & 0x1), /* exclude kernel */
266 pmi, false, false);
267}
268EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
269
270void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
271{
Olivier Deprez157378f2022-04-04 15:47:50 +0200272 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000273
274 if (!pmc)
275 return;
276
277 if (pmc_is_gp(pmc))
278 reprogram_gp_counter(pmc, pmc->eventsel);
279 else {
280 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
281 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
282
283 reprogram_fixed_counter(pmc, ctrl, idx);
284 }
285}
286EXPORT_SYMBOL_GPL(reprogram_counter);
287
288void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
289{
290 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000291 int bit;
292
Olivier Deprez157378f2022-04-04 15:47:50 +0200293 for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
294 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000295
296 if (unlikely(!pmc || !pmc->perf_event)) {
Olivier Deprez157378f2022-04-04 15:47:50 +0200297 clear_bit(bit, pmu->reprogram_pmi);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000298 continue;
299 }
300
301 reprogram_counter(pmu, bit);
302 }
Olivier Deprez157378f2022-04-04 15:47:50 +0200303
304 /*
305 * Unused perf_events are only released if the corresponding MSRs
306 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
307 * triggers KVM_REQ_PMU if cleanup is needed.
308 */
309 if (unlikely(pmu->need_cleanup))
310 kvm_pmu_cleanup(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000311}
312
313/* check if idx is a valid index to access PMU */
Olivier Deprez157378f2022-04-04 15:47:50 +0200314int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000315{
Olivier Deprez157378f2022-04-04 15:47:50 +0200316 return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000317}
318
319bool is_vmware_backdoor_pmc(u32 pmc_idx)
320{
321 switch (pmc_idx) {
322 case VMWARE_BACKDOOR_PMC_HOST_TSC:
323 case VMWARE_BACKDOOR_PMC_REAL_TIME:
324 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
325 return true;
326 }
327 return false;
328}
329
330static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
331{
332 u64 ctr_val;
333
334 switch (idx) {
335 case VMWARE_BACKDOOR_PMC_HOST_TSC:
336 ctr_val = rdtsc();
337 break;
338 case VMWARE_BACKDOOR_PMC_REAL_TIME:
David Brazdil0f672f62019-12-10 10:32:29 +0000339 ctr_val = ktime_get_boottime_ns();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000340 break;
341 case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
David Brazdil0f672f62019-12-10 10:32:29 +0000342 ctr_val = ktime_get_boottime_ns() +
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000343 vcpu->kvm->arch.kvmclock_offset;
344 break;
345 default:
346 return 1;
347 }
348
349 *data = ctr_val;
350 return 0;
351}
352
353int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
354{
355 bool fast_mode = idx & (1u << 31);
David Brazdil0f672f62019-12-10 10:32:29 +0000356 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357 struct kvm_pmc *pmc;
David Brazdil0f672f62019-12-10 10:32:29 +0000358 u64 mask = fast_mode ? ~0u : ~0ull;
359
360 if (!pmu->version)
361 return 1;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000362
363 if (is_vmware_backdoor_pmc(idx))
364 return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
365
Olivier Deprez157378f2022-04-04 15:47:50 +0200366 pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000367 if (!pmc)
368 return 1;
369
Olivier Deprez157378f2022-04-04 15:47:50 +0200370 if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
371 (kvm_x86_ops.get_cpl(vcpu) != 0) &&
372 (kvm_read_cr0(vcpu) & X86_CR0_PE))
373 return 1;
374
David Brazdil0f672f62019-12-10 10:32:29 +0000375 *data = pmc_read_counter(pmc) & mask;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000376 return 0;
377}
378
379void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
380{
381 if (lapic_in_kernel(vcpu))
382 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
383}
384
385bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
386{
Olivier Deprez157378f2022-04-04 15:47:50 +0200387 return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
388 kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000389}
390
Olivier Deprez157378f2022-04-04 15:47:50 +0200391static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000392{
Olivier Deprez157378f2022-04-04 15:47:50 +0200393 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
394 struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
395
396 if (pmc)
397 __set_bit(pmc->idx, pmu->pmc_in_use);
398}
399
400int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
401{
402 return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000403}
404
405int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
406{
Olivier Deprez157378f2022-04-04 15:47:50 +0200407 kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
408 return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000409}
410
411/* refresh PMU settings. This function generally is called when underlying
412 * settings are changed (such as changes of PMU CPUID by guest VMs), which
413 * should rarely happen.
414 */
415void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
416{
Olivier Deprez157378f2022-04-04 15:47:50 +0200417 kvm_x86_ops.pmu_ops->refresh(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000418}
419
420void kvm_pmu_reset(struct kvm_vcpu *vcpu)
421{
422 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
423
424 irq_work_sync(&pmu->irq_work);
Olivier Deprez157378f2022-04-04 15:47:50 +0200425 kvm_x86_ops.pmu_ops->reset(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000426}
427
428void kvm_pmu_init(struct kvm_vcpu *vcpu)
429{
430 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
431
432 memset(pmu, 0, sizeof(*pmu));
Olivier Deprez157378f2022-04-04 15:47:50 +0200433 kvm_x86_ops.pmu_ops->init(vcpu);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000434 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
Olivier Deprez157378f2022-04-04 15:47:50 +0200435 pmu->event_count = 0;
436 pmu->need_cleanup = false;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000437 kvm_pmu_refresh(vcpu);
438}
439
Olivier Deprez157378f2022-04-04 15:47:50 +0200440static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
441{
442 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
443
444 if (pmc_is_fixed(pmc))
445 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
446 pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
447
448 return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
449}
450
451/* Release perf_events for vPMCs that have been unused for a full time slice. */
452void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
453{
454 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
455 struct kvm_pmc *pmc = NULL;
456 DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
457 int i;
458
459 pmu->need_cleanup = false;
460
461 bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
462 pmu->pmc_in_use, X86_PMC_IDX_MAX);
463
464 for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
465 pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
466
467 if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
468 pmc_stop_counter(pmc);
469 }
470
471 bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
472}
473
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000474void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
475{
476 kvm_pmu_reset(vcpu);
477}
David Brazdil0f672f62019-12-10 10:32:29 +0000478
479int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
480{
481 struct kvm_pmu_event_filter tmp, *filter;
482 size_t size;
483 int r;
484
485 if (copy_from_user(&tmp, argp, sizeof(tmp)))
486 return -EFAULT;
487
488 if (tmp.action != KVM_PMU_EVENT_ALLOW &&
489 tmp.action != KVM_PMU_EVENT_DENY)
490 return -EINVAL;
491
492 if (tmp.flags != 0)
493 return -EINVAL;
494
495 if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
496 return -E2BIG;
497
498 size = struct_size(filter, events, tmp.nevents);
499 filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
500 if (!filter)
501 return -ENOMEM;
502
503 r = -EFAULT;
504 if (copy_from_user(filter, argp, size))
505 goto cleanup;
506
507 /* Ensure nevents can't be changed between the user copies. */
508 *filter = tmp;
509
510 mutex_lock(&kvm->lock);
Olivier Deprez157378f2022-04-04 15:47:50 +0200511 filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
512 mutex_is_locked(&kvm->lock));
David Brazdil0f672f62019-12-10 10:32:29 +0000513 mutex_unlock(&kvm->lock);
514
515 synchronize_srcu_expedited(&kvm->srcu);
516 r = 0;
517cleanup:
518 kfree(filter);
519 return r;
520}