xref: /OK3568_Linux_fs/kernel/arch/x86/kvm/pmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Kernel-based Virtual Machine -- Performance Monitoring Unit support
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2015 Red Hat, Inc. and/or its affiliates.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *   Avi Kivity   <avi@redhat.com>
9*4882a593Smuzhiyun  *   Gleb Natapov <gleb@redhat.com>
10*4882a593Smuzhiyun  *   Wei Huang    <wei@redhat.com>
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun #include <linux/perf_event.h>
16*4882a593Smuzhiyun #include <linux/bsearch.h>
17*4882a593Smuzhiyun #include <linux/sort.h>
18*4882a593Smuzhiyun #include <asm/perf_event.h>
19*4882a593Smuzhiyun #include "x86.h"
20*4882a593Smuzhiyun #include "cpuid.h"
21*4882a593Smuzhiyun #include "lapic.h"
22*4882a593Smuzhiyun #include "pmu.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* This is enough to filter the vast majority of currently defined events. */
25*4882a593Smuzhiyun #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* NOTE:
28*4882a593Smuzhiyun  * - Each perf counter is defined as "struct kvm_pmc";
29*4882a593Smuzhiyun  * - There are two types of perf counters: general purpose (gp) and fixed.
30*4882a593Smuzhiyun  *   gp counters are stored in gp_counters[] and fixed counters are stored
31*4882a593Smuzhiyun  *   in fixed_counters[] respectively. Both of them are part of "struct
32*4882a593Smuzhiyun  *   kvm_pmu";
33*4882a593Smuzhiyun  * - pmu.c understands the difference between gp counters and fixed counters.
34*4882a593Smuzhiyun  *   However AMD doesn't support fixed-counters;
35*4882a593Smuzhiyun  * - There are three types of index to access perf counters (PMC):
36*4882a593Smuzhiyun  *     1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
37*4882a593Smuzhiyun  *        has MSR_K7_PERFCTRn.
38*4882a593Smuzhiyun  *     2. MSR Index (named idx): This normally is used by RDPMC instruction.
39*4882a593Smuzhiyun  *        For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
40*4882a593Smuzhiyun  *        C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
41*4882a593Smuzhiyun  *        that it also supports fixed counters. idx can be used to as index to
42*4882a593Smuzhiyun  *        gp and fixed counters.
43*4882a593Smuzhiyun  *     3. Global PMC Index (named pmc): pmc is an index specific to PMU
44*4882a593Smuzhiyun  *        code. Each pmc, stored in kvm_pmc.idx field, is unique across
45*4882a593Smuzhiyun  *        all perf counters (both gp and fixed). The mapping relationship
46*4882a593Smuzhiyun  *        between pmc and perf counters is as the following:
47*4882a593Smuzhiyun  *        * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
48*4882a593Smuzhiyun  *                 [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
49*4882a593Smuzhiyun  *        * AMD:   [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun 
kvm_pmi_trigger_fn(struct irq_work * irq_work)52*4882a593Smuzhiyun static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
55*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	kvm_pmu_deliver_pmi(vcpu);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
kvm_perf_overflow(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)60*4882a593Smuzhiyun static void kvm_perf_overflow(struct perf_event *perf_event,
61*4882a593Smuzhiyun 			      struct perf_sample_data *data,
62*4882a593Smuzhiyun 			      struct pt_regs *regs)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
65*4882a593Smuzhiyun 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
68*4882a593Smuzhiyun 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
69*4882a593Smuzhiyun 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
kvm_perf_overflow_intr(struct perf_event * perf_event,struct perf_sample_data * data,struct pt_regs * regs)73*4882a593Smuzhiyun static void kvm_perf_overflow_intr(struct perf_event *perf_event,
74*4882a593Smuzhiyun 				   struct perf_sample_data *data,
75*4882a593Smuzhiyun 				   struct pt_regs *regs)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
78*4882a593Smuzhiyun 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
81*4882a593Smuzhiyun 		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
82*4882a593Smuzhiyun 		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		/*
85*4882a593Smuzhiyun 		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
86*4882a593Smuzhiyun 		 * can be ejected on a guest mode re-entry. Otherwise we can't
87*4882a593Smuzhiyun 		 * be sure that vcpu wasn't executing hlt instruction at the
88*4882a593Smuzhiyun 		 * time of vmexit and is not going to re-enter guest mode until
89*4882a593Smuzhiyun 		 * woken up. So we should wake it, but this is impossible from
90*4882a593Smuzhiyun 		 * NMI context. Do it from irq work instead.
91*4882a593Smuzhiyun 		 */
92*4882a593Smuzhiyun 		if (!kvm_is_in_guest())
93*4882a593Smuzhiyun 			irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
94*4882a593Smuzhiyun 		else
95*4882a593Smuzhiyun 			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
pmc_reprogram_counter(struct kvm_pmc * pmc,u32 type,u64 config,bool exclude_user,bool exclude_kernel,bool intr,bool in_tx,bool in_tx_cp)99*4882a593Smuzhiyun static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
100*4882a593Smuzhiyun 				  u64 config, bool exclude_user,
101*4882a593Smuzhiyun 				  bool exclude_kernel, bool intr,
102*4882a593Smuzhiyun 				  bool in_tx, bool in_tx_cp)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct perf_event *event;
105*4882a593Smuzhiyun 	struct perf_event_attr attr = {
106*4882a593Smuzhiyun 		.type = type,
107*4882a593Smuzhiyun 		.size = sizeof(attr),
108*4882a593Smuzhiyun 		.pinned = true,
109*4882a593Smuzhiyun 		.exclude_idle = true,
110*4882a593Smuzhiyun 		.exclude_host = 1,
111*4882a593Smuzhiyun 		.exclude_user = exclude_user,
112*4882a593Smuzhiyun 		.exclude_kernel = exclude_kernel,
113*4882a593Smuzhiyun 		.config = config,
114*4882a593Smuzhiyun 	};
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	attr.sample_period = get_sample_period(pmc, pmc->counter);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (in_tx)
119*4882a593Smuzhiyun 		attr.config |= HSW_IN_TX;
120*4882a593Smuzhiyun 	if (in_tx_cp) {
121*4882a593Smuzhiyun 		/*
122*4882a593Smuzhiyun 		 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
123*4882a593Smuzhiyun 		 * period. Just clear the sample period so at least
124*4882a593Smuzhiyun 		 * allocating the counter doesn't fail.
125*4882a593Smuzhiyun 		 */
126*4882a593Smuzhiyun 		attr.sample_period = 0;
127*4882a593Smuzhiyun 		attr.config |= HSW_IN_TX_CHECKPOINTED;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	event = perf_event_create_kernel_counter(&attr, -1, current,
131*4882a593Smuzhiyun 						 intr ? kvm_perf_overflow_intr :
132*4882a593Smuzhiyun 						 kvm_perf_overflow, pmc);
133*4882a593Smuzhiyun 	if (IS_ERR(event)) {
134*4882a593Smuzhiyun 		pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
135*4882a593Smuzhiyun 			    PTR_ERR(event), pmc->idx);
136*4882a593Smuzhiyun 		return;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	pmc->perf_event = event;
140*4882a593Smuzhiyun 	pmc_to_pmu(pmc)->event_count++;
141*4882a593Smuzhiyun 	clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
pmc_pause_counter(struct kvm_pmc * pmc)144*4882a593Smuzhiyun static void pmc_pause_counter(struct kvm_pmc *pmc)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	u64 counter = pmc->counter;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (!pmc->perf_event)
149*4882a593Smuzhiyun 		return;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* update counter, reset event value to avoid redundant accumulation */
152*4882a593Smuzhiyun 	counter += perf_event_pause(pmc->perf_event, true);
153*4882a593Smuzhiyun 	pmc->counter = counter & pmc_bitmask(pmc);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
pmc_resume_counter(struct kvm_pmc * pmc)156*4882a593Smuzhiyun static bool pmc_resume_counter(struct kvm_pmc *pmc)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	if (!pmc->perf_event)
159*4882a593Smuzhiyun 		return false;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* recalibrate sample period and check if it's accepted by perf core */
162*4882a593Smuzhiyun 	if (perf_event_period(pmc->perf_event,
163*4882a593Smuzhiyun 			      get_sample_period(pmc, pmc->counter)))
164*4882a593Smuzhiyun 		return false;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* reuse perf_event to serve as pmc_reprogram_counter() does*/
167*4882a593Smuzhiyun 	perf_event_enable(pmc->perf_event);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	clear_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->reprogram_pmi);
170*4882a593Smuzhiyun 	return true;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
cmp_u64(const void * pa,const void * pb)173*4882a593Smuzhiyun static int cmp_u64(const void *pa, const void *pb)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	u64 a = *(u64 *)pa;
176*4882a593Smuzhiyun 	u64 b = *(u64 *)pb;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return (a > b) - (a < b);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
reprogram_gp_counter(struct kvm_pmc * pmc,u64 eventsel)181*4882a593Smuzhiyun void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	u64 config;
184*4882a593Smuzhiyun 	u32 type = PERF_TYPE_RAW;
185*4882a593Smuzhiyun 	struct kvm *kvm = pmc->vcpu->kvm;
186*4882a593Smuzhiyun 	struct kvm_pmu_event_filter *filter;
187*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(pmc->vcpu);
188*4882a593Smuzhiyun 	bool allow_event = true;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
191*4882a593Smuzhiyun 		printk_once("kvm pmu: pin control bit is ignored\n");
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	pmc->eventsel = eventsel;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	pmc_pause_counter(pmc);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
198*4882a593Smuzhiyun 		return;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
201*4882a593Smuzhiyun 	if (filter) {
202*4882a593Smuzhiyun 		__u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		if (bsearch(&key, filter->events, filter->nevents,
205*4882a593Smuzhiyun 			    sizeof(__u64), cmp_u64))
206*4882a593Smuzhiyun 			allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
207*4882a593Smuzhiyun 		else
208*4882a593Smuzhiyun 			allow_event = filter->action == KVM_PMU_EVENT_DENY;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 	if (!allow_event)
211*4882a593Smuzhiyun 		return;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
214*4882a593Smuzhiyun 			  ARCH_PERFMON_EVENTSEL_INV |
215*4882a593Smuzhiyun 			  ARCH_PERFMON_EVENTSEL_CMASK |
216*4882a593Smuzhiyun 			  HSW_IN_TX |
217*4882a593Smuzhiyun 			  HSW_IN_TX_CHECKPOINTED))) {
218*4882a593Smuzhiyun 		config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
219*4882a593Smuzhiyun 		if (config != PERF_COUNT_HW_MAX)
220*4882a593Smuzhiyun 			type = PERF_TYPE_HARDWARE;
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (type == PERF_TYPE_RAW)
224*4882a593Smuzhiyun 		config = eventsel & pmu->raw_event_mask;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
227*4882a593Smuzhiyun 		return;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	pmc_release_perf_event(pmc);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	pmc->current_config = eventsel;
232*4882a593Smuzhiyun 	pmc_reprogram_counter(pmc, type, config,
233*4882a593Smuzhiyun 			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
234*4882a593Smuzhiyun 			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
235*4882a593Smuzhiyun 			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
236*4882a593Smuzhiyun 			      (eventsel & HSW_IN_TX),
237*4882a593Smuzhiyun 			      (eventsel & HSW_IN_TX_CHECKPOINTED));
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reprogram_gp_counter);
240*4882a593Smuzhiyun 
reprogram_fixed_counter(struct kvm_pmc * pmc,u8 ctrl,int idx)241*4882a593Smuzhiyun void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	unsigned en_field = ctrl & 0x3;
244*4882a593Smuzhiyun 	bool pmi = ctrl & 0x8;
245*4882a593Smuzhiyun 	struct kvm_pmu_event_filter *filter;
246*4882a593Smuzhiyun 	struct kvm *kvm = pmc->vcpu->kvm;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	pmc_pause_counter(pmc);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!en_field || !pmc_is_enabled(pmc))
251*4882a593Smuzhiyun 		return;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
254*4882a593Smuzhiyun 	if (filter) {
255*4882a593Smuzhiyun 		if (filter->action == KVM_PMU_EVENT_DENY &&
256*4882a593Smuzhiyun 		    test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
257*4882a593Smuzhiyun 			return;
258*4882a593Smuzhiyun 		if (filter->action == KVM_PMU_EVENT_ALLOW &&
259*4882a593Smuzhiyun 		    !test_bit(idx, (ulong *)&filter->fixed_counter_bitmap))
260*4882a593Smuzhiyun 			return;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (pmc->current_config == (u64)ctrl && pmc_resume_counter(pmc))
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	pmc_release_perf_event(pmc);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	pmc->current_config = (u64)ctrl;
269*4882a593Smuzhiyun 	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
270*4882a593Smuzhiyun 			      kvm_x86_ops.pmu_ops->find_fixed_event(idx),
271*4882a593Smuzhiyun 			      !(en_field & 0x2), /* exclude user */
272*4882a593Smuzhiyun 			      !(en_field & 0x1), /* exclude kernel */
273*4882a593Smuzhiyun 			      pmi, false, false);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
276*4882a593Smuzhiyun 
reprogram_counter(struct kvm_pmu * pmu,int pmc_idx)277*4882a593Smuzhiyun void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (!pmc)
282*4882a593Smuzhiyun 		return;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (pmc_is_gp(pmc))
285*4882a593Smuzhiyun 		reprogram_gp_counter(pmc, pmc->eventsel);
286*4882a593Smuzhiyun 	else {
287*4882a593Smuzhiyun 		int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
288*4882a593Smuzhiyun 		u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		reprogram_fixed_counter(pmc, ctrl, idx);
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(reprogram_counter);
294*4882a593Smuzhiyun 
kvm_pmu_handle_event(struct kvm_vcpu * vcpu)295*4882a593Smuzhiyun void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
298*4882a593Smuzhiyun 	int bit;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
301*4882a593Smuzhiyun 		struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, bit);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		if (unlikely(!pmc || !pmc->perf_event)) {
304*4882a593Smuzhiyun 			clear_bit(bit, pmu->reprogram_pmi);
305*4882a593Smuzhiyun 			continue;
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		reprogram_counter(pmu, bit);
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * Unused perf_events are only released if the corresponding MSRs
313*4882a593Smuzhiyun 	 * weren't accessed during the last vCPU time slice. kvm_arch_sched_in
314*4882a593Smuzhiyun 	 * triggers KVM_REQ_PMU if cleanup is needed.
315*4882a593Smuzhiyun 	 */
316*4882a593Smuzhiyun 	if (unlikely(pmu->need_cleanup))
317*4882a593Smuzhiyun 		kvm_pmu_cleanup(vcpu);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun /* check if idx is a valid index to access PMU */
kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu * vcpu,unsigned int idx)321*4882a593Smuzhiyun int kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	return kvm_x86_ops.pmu_ops->is_valid_rdpmc_ecx(vcpu, idx);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
is_vmware_backdoor_pmc(u32 pmc_idx)326*4882a593Smuzhiyun bool is_vmware_backdoor_pmc(u32 pmc_idx)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	switch (pmc_idx) {
329*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_HOST_TSC:
330*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_REAL_TIME:
331*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
332*4882a593Smuzhiyun 		return true;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 	return false;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
kvm_pmu_rdpmc_vmware(struct kvm_vcpu * vcpu,unsigned idx,u64 * data)337*4882a593Smuzhiyun static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	u64 ctr_val;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	switch (idx) {
342*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_HOST_TSC:
343*4882a593Smuzhiyun 		ctr_val = rdtsc();
344*4882a593Smuzhiyun 		break;
345*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_REAL_TIME:
346*4882a593Smuzhiyun 		ctr_val = ktime_get_boottime_ns();
347*4882a593Smuzhiyun 		break;
348*4882a593Smuzhiyun 	case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
349*4882a593Smuzhiyun 		ctr_val = ktime_get_boottime_ns() +
350*4882a593Smuzhiyun 			vcpu->kvm->arch.kvmclock_offset;
351*4882a593Smuzhiyun 		break;
352*4882a593Smuzhiyun 	default:
353*4882a593Smuzhiyun 		return 1;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	*data = ctr_val;
357*4882a593Smuzhiyun 	return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
kvm_pmu_rdpmc(struct kvm_vcpu * vcpu,unsigned idx,u64 * data)360*4882a593Smuzhiyun int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	bool fast_mode = idx & (1u << 31);
363*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
364*4882a593Smuzhiyun 	struct kvm_pmc *pmc;
365*4882a593Smuzhiyun 	u64 mask = fast_mode ? ~0u : ~0ull;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (!pmu->version)
368*4882a593Smuzhiyun 		return 1;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (is_vmware_backdoor_pmc(idx))
371*4882a593Smuzhiyun 		return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	pmc = kvm_x86_ops.pmu_ops->rdpmc_ecx_to_pmc(vcpu, idx, &mask);
374*4882a593Smuzhiyun 	if (!pmc)
375*4882a593Smuzhiyun 		return 1;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (!(kvm_read_cr4(vcpu) & X86_CR4_PCE) &&
378*4882a593Smuzhiyun 	    (kvm_x86_ops.get_cpl(vcpu) != 0) &&
379*4882a593Smuzhiyun 	    (kvm_read_cr0(vcpu) & X86_CR0_PE))
380*4882a593Smuzhiyun 		return 1;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	*data = pmc_read_counter(pmc) & mask;
383*4882a593Smuzhiyun 	return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
kvm_pmu_deliver_pmi(struct kvm_vcpu * vcpu)386*4882a593Smuzhiyun void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	if (lapic_in_kernel(vcpu))
389*4882a593Smuzhiyun 		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
kvm_pmu_is_valid_msr(struct kvm_vcpu * vcpu,u32 msr)392*4882a593Smuzhiyun bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	return kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr) ||
395*4882a593Smuzhiyun 		kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, msr);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
kvm_pmu_mark_pmc_in_use(struct kvm_vcpu * vcpu,u32 msr)398*4882a593Smuzhiyun static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
401*4882a593Smuzhiyun 	struct kvm_pmc *pmc = kvm_x86_ops.pmu_ops->msr_idx_to_pmc(vcpu, msr);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (pmc)
404*4882a593Smuzhiyun 		__set_bit(pmc->idx, pmu->pmc_in_use);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
kvm_pmu_get_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)407*4882a593Smuzhiyun int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	return kvm_x86_ops.pmu_ops->get_msr(vcpu, msr_info);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
kvm_pmu_set_msr(struct kvm_vcpu * vcpu,struct msr_data * msr_info)412*4882a593Smuzhiyun int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
415*4882a593Smuzhiyun 	return kvm_x86_ops.pmu_ops->set_msr(vcpu, msr_info);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /* refresh PMU settings. This function generally is called when underlying
419*4882a593Smuzhiyun  * settings are changed (such as changes of PMU CPUID by guest VMs), which
420*4882a593Smuzhiyun  * should rarely happen.
421*4882a593Smuzhiyun  */
kvm_pmu_refresh(struct kvm_vcpu * vcpu)422*4882a593Smuzhiyun void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	kvm_x86_ops.pmu_ops->refresh(vcpu);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
kvm_pmu_reset(struct kvm_vcpu * vcpu)427*4882a593Smuzhiyun void kvm_pmu_reset(struct kvm_vcpu *vcpu)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	irq_work_sync(&pmu->irq_work);
432*4882a593Smuzhiyun 	kvm_x86_ops.pmu_ops->reset(vcpu);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
kvm_pmu_init(struct kvm_vcpu * vcpu)435*4882a593Smuzhiyun void kvm_pmu_init(struct kvm_vcpu *vcpu)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	memset(pmu, 0, sizeof(*pmu));
440*4882a593Smuzhiyun 	kvm_x86_ops.pmu_ops->init(vcpu);
441*4882a593Smuzhiyun 	init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
442*4882a593Smuzhiyun 	pmu->event_count = 0;
443*4882a593Smuzhiyun 	pmu->need_cleanup = false;
444*4882a593Smuzhiyun 	kvm_pmu_refresh(vcpu);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
pmc_speculative_in_use(struct kvm_pmc * pmc)447*4882a593Smuzhiyun static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (pmc_is_fixed(pmc))
452*4882a593Smuzhiyun 		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
453*4882a593Smuzhiyun 			pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun /* Release perf_events for vPMCs that have been unused for a full time slice.  */
kvm_pmu_cleanup(struct kvm_vcpu * vcpu)459*4882a593Smuzhiyun void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
462*4882a593Smuzhiyun 	struct kvm_pmc *pmc = NULL;
463*4882a593Smuzhiyun 	DECLARE_BITMAP(bitmask, X86_PMC_IDX_MAX);
464*4882a593Smuzhiyun 	int i;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	pmu->need_cleanup = false;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	bitmap_andnot(bitmask, pmu->all_valid_pmc_idx,
469*4882a593Smuzhiyun 		      pmu->pmc_in_use, X86_PMC_IDX_MAX);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
472*4882a593Smuzhiyun 		pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
475*4882a593Smuzhiyun 			pmc_stop_counter(pmc);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	bitmap_zero(pmu->pmc_in_use, X86_PMC_IDX_MAX);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
kvm_pmu_destroy(struct kvm_vcpu * vcpu)481*4882a593Smuzhiyun void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	kvm_pmu_reset(vcpu);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
kvm_vm_ioctl_set_pmu_event_filter(struct kvm * kvm,void __user * argp)486*4882a593Smuzhiyun int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct kvm_pmu_event_filter tmp, *filter;
489*4882a593Smuzhiyun 	size_t size;
490*4882a593Smuzhiyun 	int r;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	if (copy_from_user(&tmp, argp, sizeof(tmp)))
493*4882a593Smuzhiyun 		return -EFAULT;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (tmp.action != KVM_PMU_EVENT_ALLOW &&
496*4882a593Smuzhiyun 	    tmp.action != KVM_PMU_EVENT_DENY)
497*4882a593Smuzhiyun 		return -EINVAL;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	if (tmp.flags != 0)
500*4882a593Smuzhiyun 		return -EINVAL;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS)
503*4882a593Smuzhiyun 		return -E2BIG;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	size = struct_size(filter, events, tmp.nevents);
506*4882a593Smuzhiyun 	filter = kmalloc(size, GFP_KERNEL_ACCOUNT);
507*4882a593Smuzhiyun 	if (!filter)
508*4882a593Smuzhiyun 		return -ENOMEM;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	r = -EFAULT;
511*4882a593Smuzhiyun 	if (copy_from_user(filter, argp, size))
512*4882a593Smuzhiyun 		goto cleanup;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Ensure nevents can't be changed between the user copies. */
515*4882a593Smuzhiyun 	*filter = tmp;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/*
518*4882a593Smuzhiyun 	 * Sort the in-kernel list so that we can search it with bsearch.
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
523*4882a593Smuzhiyun 	filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
524*4882a593Smuzhiyun 				     mutex_is_locked(&kvm->lock));
525*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	synchronize_srcu_expedited(&kvm->srcu);
528*4882a593Smuzhiyun 	r = 0;
529*4882a593Smuzhiyun cleanup:
530*4882a593Smuzhiyun 	kfree(filter);
531*4882a593Smuzhiyun 	return r;
532*4882a593Smuzhiyun }
533