1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Linaro Ltd.
4*4882a593Smuzhiyun * Author: Shannon Zhao <shannon.zhao@linaro.org>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/cpu.h>
8*4882a593Smuzhiyun #include <linux/kvm.h>
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun #include <linux/perf_event.h>
11*4882a593Smuzhiyun #include <linux/perf/arm_pmu.h>
12*4882a593Smuzhiyun #include <linux/uaccess.h>
13*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
14*4882a593Smuzhiyun #include <kvm/arm_pmu.h>
15*4882a593Smuzhiyun #include <kvm/arm_vgic.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
18*4882a593Smuzhiyun static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
19*4882a593Smuzhiyun static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define PERF_ATTR_CFG1_KVM_PMU_CHAINED 0x1
22*4882a593Smuzhiyun
kvm_pmu_event_mask(struct kvm * kvm)23*4882a593Smuzhiyun static u32 kvm_pmu_event_mask(struct kvm *kvm)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun switch (kvm->arch.pmuver) {
26*4882a593Smuzhiyun case ID_AA64DFR0_PMUVER_8_0:
27*4882a593Smuzhiyun return GENMASK(9, 0);
28*4882a593Smuzhiyun case ID_AA64DFR0_PMUVER_8_1:
29*4882a593Smuzhiyun case ID_AA64DFR0_PMUVER_8_4:
30*4882a593Smuzhiyun case ID_AA64DFR0_PMUVER_8_5:
31*4882a593Smuzhiyun return GENMASK(15, 0);
32*4882a593Smuzhiyun default: /* Shouldn't be here, just for sanity */
33*4882a593Smuzhiyun WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
34*4882a593Smuzhiyun return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
40*4882a593Smuzhiyun * @vcpu: The vcpu pointer
41*4882a593Smuzhiyun * @select_idx: The counter index
42*4882a593Smuzhiyun */
kvm_pmu_idx_is_64bit(struct kvm_vcpu * vcpu,u64 select_idx)43*4882a593Smuzhiyun static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun return (select_idx == ARMV8_PMU_CYCLE_IDX &&
46*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
kvm_pmc_to_vcpu(struct kvm_pmc * pmc)49*4882a593Smuzhiyun static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct kvm_pmu *pmu;
52*4882a593Smuzhiyun struct kvm_vcpu_arch *vcpu_arch;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun pmc -= pmc->idx;
55*4882a593Smuzhiyun pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
56*4882a593Smuzhiyun vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
57*4882a593Smuzhiyun return container_of(vcpu_arch, struct kvm_vcpu, arch);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /**
61*4882a593Smuzhiyun * kvm_pmu_pmc_is_chained - determine if the pmc is chained
62*4882a593Smuzhiyun * @pmc: The PMU counter pointer
63*4882a593Smuzhiyun */
kvm_pmu_pmc_is_chained(struct kvm_pmc * pmc)64*4882a593Smuzhiyun static bool kvm_pmu_pmc_is_chained(struct kvm_pmc *pmc)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun * kvm_pmu_idx_is_high_counter - determine if select_idx is a high/low counter
73*4882a593Smuzhiyun * @select_idx: The counter index
74*4882a593Smuzhiyun */
kvm_pmu_idx_is_high_counter(u64 select_idx)75*4882a593Smuzhiyun static bool kvm_pmu_idx_is_high_counter(u64 select_idx)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun return select_idx & 0x1;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun * kvm_pmu_get_canonical_pmc - obtain the canonical pmc
82*4882a593Smuzhiyun * @pmc: The PMU counter pointer
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * When a pair of PMCs are chained together we use the low counter (canonical)
85*4882a593Smuzhiyun * to hold the underlying perf event.
86*4882a593Smuzhiyun */
kvm_pmu_get_canonical_pmc(struct kvm_pmc * pmc)87*4882a593Smuzhiyun static struct kvm_pmc *kvm_pmu_get_canonical_pmc(struct kvm_pmc *pmc)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(pmc) &&
90*4882a593Smuzhiyun kvm_pmu_idx_is_high_counter(pmc->idx))
91*4882a593Smuzhiyun return pmc - 1;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return pmc;
94*4882a593Smuzhiyun }
kvm_pmu_get_alternate_pmc(struct kvm_pmc * pmc)95*4882a593Smuzhiyun static struct kvm_pmc *kvm_pmu_get_alternate_pmc(struct kvm_pmc *pmc)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun if (kvm_pmu_idx_is_high_counter(pmc->idx))
98*4882a593Smuzhiyun return pmc - 1;
99*4882a593Smuzhiyun else
100*4882a593Smuzhiyun return pmc + 1;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun * kvm_pmu_idx_has_chain_evtype - determine if the event type is chain
105*4882a593Smuzhiyun * @vcpu: The vcpu pointer
106*4882a593Smuzhiyun * @select_idx: The counter index
107*4882a593Smuzhiyun */
kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu * vcpu,u64 select_idx)108*4882a593Smuzhiyun static bool kvm_pmu_idx_has_chain_evtype(struct kvm_vcpu *vcpu, u64 select_idx)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun u64 eventsel, reg;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun select_idx |= 0x1;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (select_idx == ARMV8_PMU_CYCLE_IDX)
115*4882a593Smuzhiyun return false;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun reg = PMEVTYPER0_EL0 + select_idx;
118*4882a593Smuzhiyun eventsel = __vcpu_sys_reg(vcpu, reg) & kvm_pmu_event_mask(vcpu->kvm);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return eventsel == ARMV8_PMUV3_PERFCTR_CHAIN;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun * kvm_pmu_get_pair_counter_value - get PMU counter value
125*4882a593Smuzhiyun * @vcpu: The vcpu pointer
126*4882a593Smuzhiyun * @pmc: The PMU counter pointer
127*4882a593Smuzhiyun */
kvm_pmu_get_pair_counter_value(struct kvm_vcpu * vcpu,struct kvm_pmc * pmc)128*4882a593Smuzhiyun static u64 kvm_pmu_get_pair_counter_value(struct kvm_vcpu *vcpu,
129*4882a593Smuzhiyun struct kvm_pmc *pmc)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun u64 counter, counter_high, reg, enabled, running;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(pmc)) {
134*4882a593Smuzhiyun pmc = kvm_pmu_get_canonical_pmc(pmc);
135*4882a593Smuzhiyun reg = PMEVCNTR0_EL0 + pmc->idx;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun counter = __vcpu_sys_reg(vcpu, reg);
138*4882a593Smuzhiyun counter_high = __vcpu_sys_reg(vcpu, reg + 1);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun counter = lower_32_bits(counter) | (counter_high << 32);
141*4882a593Smuzhiyun } else {
142*4882a593Smuzhiyun reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
143*4882a593Smuzhiyun ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
144*4882a593Smuzhiyun counter = __vcpu_sys_reg(vcpu, reg);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * The real counter value is equal to the value of counter register plus
149*4882a593Smuzhiyun * the value perf event counts.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (pmc->perf_event)
152*4882a593Smuzhiyun counter += perf_event_read_value(pmc->perf_event, &enabled,
153*4882a593Smuzhiyun &running);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun return counter;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /**
159*4882a593Smuzhiyun * kvm_pmu_get_counter_value - get PMU counter value
160*4882a593Smuzhiyun * @vcpu: The vcpu pointer
161*4882a593Smuzhiyun * @select_idx: The counter index
162*4882a593Smuzhiyun */
kvm_pmu_get_counter_value(struct kvm_vcpu * vcpu,u64 select_idx)163*4882a593Smuzhiyun u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun u64 counter;
166*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
167*4882a593Smuzhiyun struct kvm_pmc *pmc = &pmu->pmc[select_idx];
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(pmc) &&
172*4882a593Smuzhiyun kvm_pmu_idx_is_high_counter(select_idx))
173*4882a593Smuzhiyun counter = upper_32_bits(counter);
174*4882a593Smuzhiyun else if (select_idx != ARMV8_PMU_CYCLE_IDX)
175*4882a593Smuzhiyun counter = lower_32_bits(counter);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return counter;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun * kvm_pmu_set_counter_value - set PMU counter value
182*4882a593Smuzhiyun * @vcpu: The vcpu pointer
183*4882a593Smuzhiyun * @select_idx: The counter index
184*4882a593Smuzhiyun * @val: The counter value
185*4882a593Smuzhiyun */
kvm_pmu_set_counter_value(struct kvm_vcpu * vcpu,u64 select_idx,u64 val)186*4882a593Smuzhiyun void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun u64 reg;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
191*4882a593Smuzhiyun ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
192*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Recreate the perf event to reflect the updated sample_period */
195*4882a593Smuzhiyun kvm_pmu_create_perf_event(vcpu, select_idx);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * kvm_pmu_release_perf_event - remove the perf event
200*4882a593Smuzhiyun * @pmc: The PMU counter pointer
201*4882a593Smuzhiyun */
kvm_pmu_release_perf_event(struct kvm_pmc * pmc)202*4882a593Smuzhiyun static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun pmc = kvm_pmu_get_canonical_pmc(pmc);
205*4882a593Smuzhiyun if (pmc->perf_event) {
206*4882a593Smuzhiyun perf_event_disable(pmc->perf_event);
207*4882a593Smuzhiyun perf_event_release_kernel(pmc->perf_event);
208*4882a593Smuzhiyun pmc->perf_event = NULL;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun * kvm_pmu_stop_counter - stop PMU counter
214*4882a593Smuzhiyun * @pmc: The PMU counter pointer
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * If this counter has been configured to monitor some event, release it here.
217*4882a593Smuzhiyun */
kvm_pmu_stop_counter(struct kvm_vcpu * vcpu,struct kvm_pmc * pmc)218*4882a593Smuzhiyun static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun u64 counter, reg, val;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun pmc = kvm_pmu_get_canonical_pmc(pmc);
223*4882a593Smuzhiyun if (!pmc->perf_event)
224*4882a593Smuzhiyun return;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
229*4882a593Smuzhiyun reg = PMCCNTR_EL0;
230*4882a593Smuzhiyun val = counter;
231*4882a593Smuzhiyun } else {
232*4882a593Smuzhiyun reg = PMEVCNTR0_EL0 + pmc->idx;
233*4882a593Smuzhiyun val = lower_32_bits(counter);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, reg) = val;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(pmc))
239*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun kvm_pmu_release_perf_event(pmc);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun * kvm_pmu_vcpu_init - assign pmu counter idx for cpu
246*4882a593Smuzhiyun * @vcpu: The vcpu pointer
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun */
kvm_pmu_vcpu_init(struct kvm_vcpu * vcpu)249*4882a593Smuzhiyun void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun int i;
252*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
255*4882a593Smuzhiyun pmu->pmc[i].idx = i;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * kvm_pmu_vcpu_reset - reset pmu state for cpu
260*4882a593Smuzhiyun * @vcpu: The vcpu pointer
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun */
kvm_pmu_vcpu_reset(struct kvm_vcpu * vcpu)263*4882a593Smuzhiyun void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
266*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
267*4882a593Smuzhiyun int i;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun for_each_set_bit(i, &mask, 32)
270*4882a593Smuzhiyun kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
277*4882a593Smuzhiyun * @vcpu: The vcpu pointer
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun */
kvm_pmu_vcpu_destroy(struct kvm_vcpu * vcpu)280*4882a593Smuzhiyun void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun int i;
283*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++)
286*4882a593Smuzhiyun kvm_pmu_release_perf_event(&pmu->pmc[i]);
287*4882a593Smuzhiyun irq_work_sync(&vcpu->arch.pmu.overflow_work);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
kvm_pmu_valid_counter_mask(struct kvm_vcpu * vcpu)290*4882a593Smuzhiyun u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun val &= ARMV8_PMU_PMCR_N_MASK;
295*4882a593Smuzhiyun if (val == 0)
296*4882a593Smuzhiyun return BIT(ARMV8_PMU_CYCLE_IDX);
297*4882a593Smuzhiyun else
298*4882a593Smuzhiyun return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun * kvm_pmu_enable_counter_mask - enable selected PMU counters
303*4882a593Smuzhiyun * @vcpu: The vcpu pointer
304*4882a593Smuzhiyun * @val: the value guest writes to PMCNTENSET register
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * Call perf_event_enable to start counting the perf event
307*4882a593Smuzhiyun */
kvm_pmu_enable_counter_mask(struct kvm_vcpu * vcpu,u64 val)308*4882a593Smuzhiyun void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun int i;
311*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
312*4882a593Smuzhiyun struct kvm_pmc *pmc;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
315*4882a593Smuzhiyun return;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
318*4882a593Smuzhiyun if (!(val & BIT(i)))
319*4882a593Smuzhiyun continue;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun pmc = &pmu->pmc[i];
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* A change in the enable state may affect the chain state */
324*4882a593Smuzhiyun kvm_pmu_update_pmc_chained(vcpu, i);
325*4882a593Smuzhiyun kvm_pmu_create_perf_event(vcpu, i);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* At this point, pmc must be the canonical */
328*4882a593Smuzhiyun if (pmc->perf_event) {
329*4882a593Smuzhiyun perf_event_enable(pmc->perf_event);
330*4882a593Smuzhiyun if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
331*4882a593Smuzhiyun kvm_debug("fail to enable perf event\n");
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /**
337*4882a593Smuzhiyun * kvm_pmu_disable_counter_mask - disable selected PMU counters
338*4882a593Smuzhiyun * @vcpu: The vcpu pointer
339*4882a593Smuzhiyun * @val: the value guest writes to PMCNTENCLR register
340*4882a593Smuzhiyun *
341*4882a593Smuzhiyun * Call perf_event_disable to stop counting the perf event
342*4882a593Smuzhiyun */
kvm_pmu_disable_counter_mask(struct kvm_vcpu * vcpu,u64 val)343*4882a593Smuzhiyun void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun int i;
346*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
347*4882a593Smuzhiyun struct kvm_pmc *pmc;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (!val)
350*4882a593Smuzhiyun return;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
353*4882a593Smuzhiyun if (!(val & BIT(i)))
354*4882a593Smuzhiyun continue;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun pmc = &pmu->pmc[i];
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* A change in the enable state may affect the chain state */
359*4882a593Smuzhiyun kvm_pmu_update_pmc_chained(vcpu, i);
360*4882a593Smuzhiyun kvm_pmu_create_perf_event(vcpu, i);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* At this point, pmc must be the canonical */
363*4882a593Smuzhiyun if (pmc->perf_event)
364*4882a593Smuzhiyun perf_event_disable(pmc->perf_event);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
kvm_pmu_overflow_status(struct kvm_vcpu * vcpu)368*4882a593Smuzhiyun static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun u64 reg = 0;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
373*4882a593Smuzhiyun reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
374*4882a593Smuzhiyun reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
375*4882a593Smuzhiyun reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
376*4882a593Smuzhiyun reg &= kvm_pmu_valid_counter_mask(vcpu);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return reg;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
kvm_pmu_update_state(struct kvm_vcpu * vcpu)382*4882a593Smuzhiyun static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
385*4882a593Smuzhiyun bool overflow;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!kvm_vcpu_has_pmu(vcpu))
388*4882a593Smuzhiyun return;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun overflow = !!kvm_pmu_overflow_status(vcpu);
391*4882a593Smuzhiyun if (pmu->irq_level == overflow)
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun pmu->irq_level = overflow;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun if (likely(irqchip_in_kernel(vcpu->kvm))) {
397*4882a593Smuzhiyun int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
398*4882a593Smuzhiyun pmu->irq_num, overflow, pmu);
399*4882a593Smuzhiyun WARN_ON(ret);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
kvm_pmu_should_notify_user(struct kvm_vcpu * vcpu)403*4882a593Smuzhiyun bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
406*4882a593Smuzhiyun struct kvm_sync_regs *sregs = &vcpu->run->s.regs;
407*4882a593Smuzhiyun bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (likely(irqchip_in_kernel(vcpu->kvm)))
410*4882a593Smuzhiyun return false;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun return pmu->irq_level != run_level;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * Reflect the PMU overflow interrupt output level into the kvm_run structure
417*4882a593Smuzhiyun */
kvm_pmu_update_run(struct kvm_vcpu * vcpu)418*4882a593Smuzhiyun void kvm_pmu_update_run(struct kvm_vcpu *vcpu)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun struct kvm_sync_regs *regs = &vcpu->run->s.regs;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Populate the timer bitmap for user space */
423*4882a593Smuzhiyun regs->device_irq_level &= ~KVM_ARM_DEV_PMU;
424*4882a593Smuzhiyun if (vcpu->arch.pmu.irq_level)
425*4882a593Smuzhiyun regs->device_irq_level |= KVM_ARM_DEV_PMU;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun * kvm_pmu_flush_hwstate - flush pmu state to cpu
430*4882a593Smuzhiyun * @vcpu: The vcpu pointer
431*4882a593Smuzhiyun *
432*4882a593Smuzhiyun * Check if the PMU has overflowed while we were running in the host, and inject
433*4882a593Smuzhiyun * an interrupt if that was the case.
434*4882a593Smuzhiyun */
kvm_pmu_flush_hwstate(struct kvm_vcpu * vcpu)435*4882a593Smuzhiyun void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun kvm_pmu_update_state(vcpu);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * kvm_pmu_sync_hwstate - sync pmu state from cpu
442*4882a593Smuzhiyun * @vcpu: The vcpu pointer
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * Check if the PMU has overflowed while we were running in the guest, and
445*4882a593Smuzhiyun * inject an interrupt if that was the case.
446*4882a593Smuzhiyun */
kvm_pmu_sync_hwstate(struct kvm_vcpu * vcpu)447*4882a593Smuzhiyun void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun kvm_pmu_update_state(vcpu);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding
454*4882a593Smuzhiyun * to the event.
455*4882a593Smuzhiyun * This is why we need a callback to do it once outside of the NMI context.
456*4882a593Smuzhiyun */
kvm_pmu_perf_overflow_notify_vcpu(struct irq_work * work)457*4882a593Smuzhiyun static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
460*4882a593Smuzhiyun struct kvm_pmu *pmu;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun pmu = container_of(work, struct kvm_pmu, overflow_work);
463*4882a593Smuzhiyun vcpu = kvm_pmc_to_vcpu(pmu->pmc);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun kvm_vcpu_kick(vcpu);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /**
469*4882a593Smuzhiyun * When the perf event overflows, set the overflow status and inform the vcpu.
470*4882a593Smuzhiyun */
kvm_pmu_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)471*4882a593Smuzhiyun static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
472*4882a593Smuzhiyun struct perf_sample_data *data,
473*4882a593Smuzhiyun struct pt_regs *regs)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun struct kvm_pmc *pmc = perf_event->overflow_handler_context;
476*4882a593Smuzhiyun struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
477*4882a593Smuzhiyun struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
478*4882a593Smuzhiyun int idx = pmc->idx;
479*4882a593Smuzhiyun u64 period;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * Reset the sample period to the architectural limit,
485*4882a593Smuzhiyun * i.e. the point where the counter overflows.
486*4882a593Smuzhiyun */
487*4882a593Smuzhiyun period = -(local64_read(&perf_event->count));
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
490*4882a593Smuzhiyun period &= GENMASK(31, 0);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun local64_set(&perf_event->hw.period_left, 0);
493*4882a593Smuzhiyun perf_event->attr.sample_period = period;
494*4882a593Smuzhiyun perf_event->hw.sample_period = period;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (kvm_pmu_overflow_status(vcpu)) {
499*4882a593Smuzhiyun kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (!in_nmi())
502*4882a593Smuzhiyun kvm_vcpu_kick(vcpu);
503*4882a593Smuzhiyun else
504*4882a593Smuzhiyun irq_work_queue(&vcpu->arch.pmu.overflow_work);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /**
511*4882a593Smuzhiyun * kvm_pmu_software_increment - do software increment
512*4882a593Smuzhiyun * @vcpu: The vcpu pointer
513*4882a593Smuzhiyun * @val: the value guest writes to PMSWINC register
514*4882a593Smuzhiyun */
kvm_pmu_software_increment(struct kvm_vcpu * vcpu,u64 val)515*4882a593Smuzhiyun void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
518*4882a593Smuzhiyun int i;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
521*4882a593Smuzhiyun return;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* Weed out disabled counters */
524*4882a593Smuzhiyun val &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
527*4882a593Smuzhiyun u64 type, reg;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (!(val & BIT(i)))
530*4882a593Smuzhiyun continue;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* PMSWINC only applies to ... SW_INC! */
533*4882a593Smuzhiyun type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i);
534*4882a593Smuzhiyun type &= kvm_pmu_event_mask(vcpu->kvm);
535*4882a593Smuzhiyun if (type != ARMV8_PMUV3_PERFCTR_SW_INCR)
536*4882a593Smuzhiyun continue;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* increment this even SW_INC counter */
539*4882a593Smuzhiyun reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
540*4882a593Smuzhiyun reg = lower_32_bits(reg);
541*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (reg) /* no overflow on the low part */
544*4882a593Smuzhiyun continue;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) {
547*4882a593Smuzhiyun /* increment the high counter */
548*4882a593Smuzhiyun reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) + 1;
549*4882a593Smuzhiyun reg = lower_32_bits(reg);
550*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i + 1) = reg;
551*4882a593Smuzhiyun if (!reg) /* mark overflow on the high counter */
552*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i + 1);
553*4882a593Smuzhiyun } else {
554*4882a593Smuzhiyun /* mark overflow on low counter */
555*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /**
561*4882a593Smuzhiyun * kvm_pmu_handle_pmcr - handle PMCR register
562*4882a593Smuzhiyun * @vcpu: The vcpu pointer
563*4882a593Smuzhiyun * @val: the value guest writes to PMCR register
564*4882a593Smuzhiyun */
kvm_pmu_handle_pmcr(struct kvm_vcpu * vcpu,u64 val)565*4882a593Smuzhiyun void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun unsigned long mask = kvm_pmu_valid_counter_mask(vcpu);
568*4882a593Smuzhiyun int i;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (val & ARMV8_PMU_PMCR_E) {
571*4882a593Smuzhiyun kvm_pmu_enable_counter_mask(vcpu,
572*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
573*4882a593Smuzhiyun } else {
574*4882a593Smuzhiyun kvm_pmu_disable_counter_mask(vcpu, mask);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (val & ARMV8_PMU_PMCR_C)
578*4882a593Smuzhiyun kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun if (val & ARMV8_PMU_PMCR_P) {
581*4882a593Smuzhiyun mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
582*4882a593Smuzhiyun for_each_set_bit(i, &mask, 32)
583*4882a593Smuzhiyun kvm_pmu_set_counter_value(vcpu, i, 0);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
kvm_pmu_counter_is_enabled(struct kvm_vcpu * vcpu,u64 select_idx)587*4882a593Smuzhiyun static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
590*4882a593Smuzhiyun (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /**
594*4882a593Smuzhiyun * kvm_pmu_create_perf_event - create a perf event for a counter
595*4882a593Smuzhiyun * @vcpu: The vcpu pointer
596*4882a593Smuzhiyun * @select_idx: The number of selected counter
597*4882a593Smuzhiyun */
kvm_pmu_create_perf_event(struct kvm_vcpu * vcpu,u64 select_idx)598*4882a593Smuzhiyun static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
601*4882a593Smuzhiyun struct kvm_pmc *pmc;
602*4882a593Smuzhiyun struct perf_event *event;
603*4882a593Smuzhiyun struct perf_event_attr attr;
604*4882a593Smuzhiyun u64 eventsel, counter, reg, data;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /*
607*4882a593Smuzhiyun * For chained counters the event type and filtering attributes are
608*4882a593Smuzhiyun * obtained from the low/even counter. We also use this counter to
609*4882a593Smuzhiyun * determine if the event is enabled/disabled.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
614*4882a593Smuzhiyun ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx;
615*4882a593Smuzhiyun data = __vcpu_sys_reg(vcpu, reg);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun kvm_pmu_stop_counter(vcpu, pmc);
618*4882a593Smuzhiyun if (pmc->idx == ARMV8_PMU_CYCLE_IDX)
619*4882a593Smuzhiyun eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
620*4882a593Smuzhiyun else
621*4882a593Smuzhiyun eventsel = data & kvm_pmu_event_mask(vcpu->kvm);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /* Software increment event doesn't need to be backed by a perf event */
624*4882a593Smuzhiyun if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR)
625*4882a593Smuzhiyun return;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * If we have a filter in place and that the event isn't allowed, do
629*4882a593Smuzhiyun * not install a perf event either.
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun if (vcpu->kvm->arch.pmu_filter &&
632*4882a593Smuzhiyun !test_bit(eventsel, vcpu->kvm->arch.pmu_filter))
633*4882a593Smuzhiyun return;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun memset(&attr, 0, sizeof(struct perf_event_attr));
636*4882a593Smuzhiyun attr.type = PERF_TYPE_RAW;
637*4882a593Smuzhiyun attr.size = sizeof(attr);
638*4882a593Smuzhiyun attr.pinned = 1;
639*4882a593Smuzhiyun attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx);
640*4882a593Smuzhiyun attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
641*4882a593Smuzhiyun attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
642*4882a593Smuzhiyun attr.exclude_hv = 1; /* Don't count EL2 events */
643*4882a593Smuzhiyun attr.exclude_host = 1; /* Don't count host events */
644*4882a593Smuzhiyun attr.config = eventsel;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (kvm_pmu_pmc_is_chained(pmc)) {
649*4882a593Smuzhiyun /**
650*4882a593Smuzhiyun * The initial sample period (overflow count) of an event. For
651*4882a593Smuzhiyun * chained counters we only support overflow interrupts on the
652*4882a593Smuzhiyun * high counter.
653*4882a593Smuzhiyun */
654*4882a593Smuzhiyun attr.sample_period = (-counter) & GENMASK(63, 0);
655*4882a593Smuzhiyun attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun event = perf_event_create_kernel_counter(&attr, -1, current,
658*4882a593Smuzhiyun kvm_pmu_perf_overflow,
659*4882a593Smuzhiyun pmc + 1);
660*4882a593Smuzhiyun } else {
661*4882a593Smuzhiyun /* The initial sample period (overflow count) of an event. */
662*4882a593Smuzhiyun if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
663*4882a593Smuzhiyun attr.sample_period = (-counter) & GENMASK(63, 0);
664*4882a593Smuzhiyun else
665*4882a593Smuzhiyun attr.sample_period = (-counter) & GENMASK(31, 0);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun event = perf_event_create_kernel_counter(&attr, -1, current,
668*4882a593Smuzhiyun kvm_pmu_perf_overflow, pmc);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (IS_ERR(event)) {
672*4882a593Smuzhiyun pr_err_once("kvm: pmu event creation failed %ld\n",
673*4882a593Smuzhiyun PTR_ERR(event));
674*4882a593Smuzhiyun return;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun pmc->perf_event = event;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /**
681*4882a593Smuzhiyun * kvm_pmu_update_pmc_chained - update chained bitmap
682*4882a593Smuzhiyun * @vcpu: The vcpu pointer
683*4882a593Smuzhiyun * @select_idx: The number of selected counter
684*4882a593Smuzhiyun *
685*4882a593Smuzhiyun * Update the chained bitmap based on the event type written in the
686*4882a593Smuzhiyun * typer register and the enable state of the odd register.
687*4882a593Smuzhiyun */
kvm_pmu_update_pmc_chained(struct kvm_vcpu * vcpu,u64 select_idx)688*4882a593Smuzhiyun static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun struct kvm_pmu *pmu = &vcpu->arch.pmu;
691*4882a593Smuzhiyun struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc;
692*4882a593Smuzhiyun bool new_state, old_state;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun old_state = kvm_pmu_pmc_is_chained(pmc);
695*4882a593Smuzhiyun new_state = kvm_pmu_idx_has_chain_evtype(vcpu, pmc->idx) &&
696*4882a593Smuzhiyun kvm_pmu_counter_is_enabled(vcpu, pmc->idx | 0x1);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun if (old_state == new_state)
699*4882a593Smuzhiyun return;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun canonical_pmc = kvm_pmu_get_canonical_pmc(pmc);
702*4882a593Smuzhiyun kvm_pmu_stop_counter(vcpu, canonical_pmc);
703*4882a593Smuzhiyun if (new_state) {
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * During promotion from !chained to chained we must ensure
706*4882a593Smuzhiyun * the adjacent counter is stopped and its event destroyed
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun kvm_pmu_stop_counter(vcpu, kvm_pmu_get_alternate_pmc(pmc));
709*4882a593Smuzhiyun set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
710*4882a593Smuzhiyun return;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained);
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /**
716*4882a593Smuzhiyun * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
717*4882a593Smuzhiyun * @vcpu: The vcpu pointer
718*4882a593Smuzhiyun * @data: The data guest writes to PMXEVTYPER_EL0
719*4882a593Smuzhiyun * @select_idx: The number of selected counter
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
722*4882a593Smuzhiyun * event with given hardware event number. Here we call perf_event API to
723*4882a593Smuzhiyun * emulate this action and create a kernel perf event for it.
724*4882a593Smuzhiyun */
kvm_pmu_set_counter_event_type(struct kvm_vcpu * vcpu,u64 data,u64 select_idx)725*4882a593Smuzhiyun void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
726*4882a593Smuzhiyun u64 select_idx)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun u64 reg, mask;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun mask = ARMV8_PMU_EVTYPE_MASK;
731*4882a593Smuzhiyun mask &= ~ARMV8_PMU_EVTYPE_EVENT;
732*4882a593Smuzhiyun mask |= kvm_pmu_event_mask(vcpu->kvm);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
735*4882a593Smuzhiyun ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun __vcpu_sys_reg(vcpu, reg) = data & mask;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun kvm_pmu_update_pmc_chained(vcpu, select_idx);
740*4882a593Smuzhiyun kvm_pmu_create_perf_event(vcpu, select_idx);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
kvm_pmu_probe_pmuver(void)743*4882a593Smuzhiyun static int kvm_pmu_probe_pmuver(void)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun struct perf_event_attr attr = { };
746*4882a593Smuzhiyun struct perf_event *event;
747*4882a593Smuzhiyun struct arm_pmu *pmu;
748*4882a593Smuzhiyun int pmuver = 0xf;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /*
751*4882a593Smuzhiyun * Create a dummy event that only counts user cycles. As we'll never
752*4882a593Smuzhiyun * leave this function with the event being live, it will never
753*4882a593Smuzhiyun * count anything. But it allows us to probe some of the PMU
754*4882a593Smuzhiyun * details. Yes, this is terrible.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun attr.type = PERF_TYPE_RAW;
757*4882a593Smuzhiyun attr.size = sizeof(attr);
758*4882a593Smuzhiyun attr.pinned = 1;
759*4882a593Smuzhiyun attr.disabled = 0;
760*4882a593Smuzhiyun attr.exclude_user = 0;
761*4882a593Smuzhiyun attr.exclude_kernel = 1;
762*4882a593Smuzhiyun attr.exclude_hv = 1;
763*4882a593Smuzhiyun attr.exclude_host = 1;
764*4882a593Smuzhiyun attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
765*4882a593Smuzhiyun attr.sample_period = GENMASK(63, 0);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun event = perf_event_create_kernel_counter(&attr, -1, current,
768*4882a593Smuzhiyun kvm_pmu_perf_overflow, &attr);
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (IS_ERR(event)) {
771*4882a593Smuzhiyun pr_err_once("kvm: pmu event creation failed %ld\n",
772*4882a593Smuzhiyun PTR_ERR(event));
773*4882a593Smuzhiyun return 0xf;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (event->pmu) {
777*4882a593Smuzhiyun pmu = to_arm_pmu(event->pmu);
778*4882a593Smuzhiyun if (pmu->pmuver)
779*4882a593Smuzhiyun pmuver = pmu->pmuver;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun perf_event_disable(event);
783*4882a593Smuzhiyun perf_event_release_kernel(event);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun return pmuver;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
kvm_pmu_get_pmceid(struct kvm_vcpu * vcpu,bool pmceid1)788*4882a593Smuzhiyun u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
791*4882a593Smuzhiyun u64 val, mask = 0;
792*4882a593Smuzhiyun int base, i, nr_events;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (!pmceid1) {
795*4882a593Smuzhiyun val = read_sysreg(pmceid0_el0);
796*4882a593Smuzhiyun base = 0;
797*4882a593Smuzhiyun } else {
798*4882a593Smuzhiyun val = read_sysreg(pmceid1_el0);
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
801*4882a593Smuzhiyun * as RAZ
802*4882a593Smuzhiyun */
803*4882a593Smuzhiyun if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
804*4882a593Smuzhiyun val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
805*4882a593Smuzhiyun base = 32;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (!bmap)
809*4882a593Smuzhiyun return val;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun for (i = 0; i < 32; i += 8) {
814*4882a593Smuzhiyun u64 byte;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun byte = bitmap_get_value8(bmap, base + i);
817*4882a593Smuzhiyun mask |= byte << i;
818*4882a593Smuzhiyun if (nr_events >= (0x4000 + base + 32)) {
819*4882a593Smuzhiyun byte = bitmap_get_value8(bmap, 0x4000 + base + i);
820*4882a593Smuzhiyun mask |= byte << (32 + i);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun return val & mask;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
kvm_arm_pmu_v3_enable(struct kvm_vcpu * vcpu)827*4882a593Smuzhiyun int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
828*4882a593Smuzhiyun {
829*4882a593Smuzhiyun if (!kvm_vcpu_has_pmu(vcpu))
830*4882a593Smuzhiyun return 0;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (!vcpu->arch.pmu.created)
833*4882a593Smuzhiyun return -EINVAL;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * A valid interrupt configuration for the PMU is either to have a
837*4882a593Smuzhiyun * properly configured interrupt number and using an in-kernel
838*4882a593Smuzhiyun * irqchip, or to not have an in-kernel GIC and not set an IRQ.
839*4882a593Smuzhiyun */
840*4882a593Smuzhiyun if (irqchip_in_kernel(vcpu->kvm)) {
841*4882a593Smuzhiyun int irq = vcpu->arch.pmu.irq_num;
842*4882a593Smuzhiyun /*
843*4882a593Smuzhiyun * If we are using an in-kernel vgic, at this point we know
844*4882a593Smuzhiyun * the vgic will be initialized, so we can check the PMU irq
845*4882a593Smuzhiyun * number against the dimensions of the vgic and make sure
846*4882a593Smuzhiyun * it's valid.
847*4882a593Smuzhiyun */
848*4882a593Smuzhiyun if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq))
849*4882a593Smuzhiyun return -EINVAL;
850*4882a593Smuzhiyun } else if (kvm_arm_pmu_irq_initialized(vcpu)) {
851*4882a593Smuzhiyun return -EINVAL;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
kvm_arm_pmu_v3_init(struct kvm_vcpu * vcpu)857*4882a593Smuzhiyun static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun if (irqchip_in_kernel(vcpu->kvm)) {
860*4882a593Smuzhiyun int ret;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * If using the PMU with an in-kernel virtual GIC
864*4882a593Smuzhiyun * implementation, we require the GIC to be already
865*4882a593Smuzhiyun * initialized when initializing the PMU.
866*4882a593Smuzhiyun */
867*4882a593Smuzhiyun if (!vgic_initialized(vcpu->kvm))
868*4882a593Smuzhiyun return -ENODEV;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if (!kvm_arm_pmu_irq_initialized(vcpu))
871*4882a593Smuzhiyun return -ENXIO;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num,
874*4882a593Smuzhiyun &vcpu->arch.pmu);
875*4882a593Smuzhiyun if (ret)
876*4882a593Smuzhiyun return ret;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun init_irq_work(&vcpu->arch.pmu.overflow_work,
880*4882a593Smuzhiyun kvm_pmu_perf_overflow_notify_vcpu);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun vcpu->arch.pmu.created = true;
883*4882a593Smuzhiyun return 0;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * For one VM the interrupt type must be same for each vcpu.
888*4882a593Smuzhiyun * As a PPI, the interrupt number is the same for all vcpus,
889*4882a593Smuzhiyun * while as an SPI it must be a separate number per vcpu.
890*4882a593Smuzhiyun */
pmu_irq_is_valid(struct kvm * kvm,int irq)891*4882a593Smuzhiyun static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun int i;
894*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
897*4882a593Smuzhiyun if (!kvm_arm_pmu_irq_initialized(vcpu))
898*4882a593Smuzhiyun continue;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (irq_is_ppi(irq)) {
901*4882a593Smuzhiyun if (vcpu->arch.pmu.irq_num != irq)
902*4882a593Smuzhiyun return false;
903*4882a593Smuzhiyun } else {
904*4882a593Smuzhiyun if (vcpu->arch.pmu.irq_num == irq)
905*4882a593Smuzhiyun return false;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return true;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
kvm_arm_pmu_v3_set_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)912*4882a593Smuzhiyun int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun if (!kvm_vcpu_has_pmu(vcpu))
915*4882a593Smuzhiyun return -ENODEV;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (vcpu->arch.pmu.created)
918*4882a593Smuzhiyun return -EBUSY;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun if (!vcpu->kvm->arch.pmuver)
921*4882a593Smuzhiyun vcpu->kvm->arch.pmuver = kvm_pmu_probe_pmuver();
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun if (vcpu->kvm->arch.pmuver == 0xf)
924*4882a593Smuzhiyun return -ENODEV;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun switch (attr->attr) {
927*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_IRQ: {
928*4882a593Smuzhiyun int __user *uaddr = (int __user *)(long)attr->addr;
929*4882a593Smuzhiyun int irq;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (!irqchip_in_kernel(vcpu->kvm))
932*4882a593Smuzhiyun return -EINVAL;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun if (get_user(irq, uaddr))
935*4882a593Smuzhiyun return -EFAULT;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /* The PMU overflow interrupt can be a PPI or a valid SPI. */
938*4882a593Smuzhiyun if (!(irq_is_ppi(irq) || irq_is_spi(irq)))
939*4882a593Smuzhiyun return -EINVAL;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun if (!pmu_irq_is_valid(vcpu->kvm, irq))
942*4882a593Smuzhiyun return -EINVAL;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (kvm_arm_pmu_irq_initialized(vcpu))
945*4882a593Smuzhiyun return -EBUSY;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
948*4882a593Smuzhiyun vcpu->arch.pmu.irq_num = irq;
949*4882a593Smuzhiyun return 0;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_FILTER: {
952*4882a593Smuzhiyun struct kvm_pmu_event_filter __user *uaddr;
953*4882a593Smuzhiyun struct kvm_pmu_event_filter filter;
954*4882a593Smuzhiyun int nr_events;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (copy_from_user(&filter, uaddr, sizeof(filter)))
961*4882a593Smuzhiyun return -EFAULT;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun if (((u32)filter.base_event + filter.nevents) > nr_events ||
964*4882a593Smuzhiyun (filter.action != KVM_PMU_EVENT_ALLOW &&
965*4882a593Smuzhiyun filter.action != KVM_PMU_EVENT_DENY))
966*4882a593Smuzhiyun return -EINVAL;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun mutex_lock(&vcpu->kvm->lock);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun if (!vcpu->kvm->arch.pmu_filter) {
971*4882a593Smuzhiyun vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL);
972*4882a593Smuzhiyun if (!vcpu->kvm->arch.pmu_filter) {
973*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
974*4882a593Smuzhiyun return -ENOMEM;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /*
978*4882a593Smuzhiyun * The default depends on the first applied filter.
979*4882a593Smuzhiyun * If it allows events, the default is to deny.
980*4882a593Smuzhiyun * Conversely, if the first filter denies a set of
981*4882a593Smuzhiyun * events, the default is to allow.
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun if (filter.action == KVM_PMU_EVENT_ALLOW)
984*4882a593Smuzhiyun bitmap_zero(vcpu->kvm->arch.pmu_filter, nr_events);
985*4882a593Smuzhiyun else
986*4882a593Smuzhiyun bitmap_fill(vcpu->kvm->arch.pmu_filter, nr_events);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun if (filter.action == KVM_PMU_EVENT_ALLOW)
990*4882a593Smuzhiyun bitmap_set(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
991*4882a593Smuzhiyun else
992*4882a593Smuzhiyun bitmap_clear(vcpu->kvm->arch.pmu_filter, filter.base_event, filter.nevents);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun mutex_unlock(&vcpu->kvm->lock);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun return 0;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_INIT:
999*4882a593Smuzhiyun return kvm_arm_pmu_v3_init(vcpu);
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun return -ENXIO;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
kvm_arm_pmu_v3_get_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1005*4882a593Smuzhiyun int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun switch (attr->attr) {
1008*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_IRQ: {
1009*4882a593Smuzhiyun int __user *uaddr = (int __user *)(long)attr->addr;
1010*4882a593Smuzhiyun int irq;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun if (!irqchip_in_kernel(vcpu->kvm))
1013*4882a593Smuzhiyun return -EINVAL;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (!kvm_vcpu_has_pmu(vcpu))
1016*4882a593Smuzhiyun return -ENODEV;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun if (!kvm_arm_pmu_irq_initialized(vcpu))
1019*4882a593Smuzhiyun return -ENXIO;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun irq = vcpu->arch.pmu.irq_num;
1022*4882a593Smuzhiyun return put_user(irq, uaddr);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun return -ENXIO;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun
kvm_arm_pmu_v3_has_attr(struct kvm_vcpu * vcpu,struct kvm_device_attr * attr)1029*4882a593Smuzhiyun int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun switch (attr->attr) {
1032*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_IRQ:
1033*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_INIT:
1034*4882a593Smuzhiyun case KVM_ARM_VCPU_PMU_V3_FILTER:
1035*4882a593Smuzhiyun if (kvm_vcpu_has_pmu(vcpu))
1036*4882a593Smuzhiyun return 0;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun return -ENXIO;
1040*4882a593Smuzhiyun }
1041