Lines Matching full:pmu

3  * KVM PMU support for Intel CPUs
19 #include "pmu.h"
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument
42 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters()
44 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i); in reprogram_fixed_counters()
47 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters()
52 __set_bit(INTEL_PMC_IDX_FIXED + i, pmu->pmc_in_use); in reprogram_fixed_counters()
56 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters()
60 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data) in global_ctrl_changed() argument
63 u64 diff = pmu->global_ctrl ^ data; in global_ctrl_changed()
65 pmu->global_ctrl = data; in global_ctrl_changed()
68 reprogram_counter(pmu, bit); in global_ctrl_changed()
73 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_perf_hw_id() local
81 && (pmu->available_event_types & (1 << i))) in intel_pmc_perf_hw_id()
105 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in intel_pmc_is_enabled() local
107 if (pmu->version < 2) in intel_pmc_is_enabled()
110 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); in intel_pmc_is_enabled()
113 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) in intel_pmc_idx_to_pmc() argument
116 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx, in intel_pmc_idx_to_pmc()
121 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0); in intel_pmc_idx_to_pmc()
128 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_rdpmc_ecx() local
133 return (!fixed && idx >= pmu->nr_arch_gp_counters) || in intel_is_valid_rdpmc_ecx()
134 (fixed && idx >= pmu->nr_arch_fixed_counters); in intel_is_valid_rdpmc_ecx()
140 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local
147 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc()
148 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc()
150 counters = pmu->gp_counters; in intel_rdpmc_ecx_to_pmc()
151 num_counters = pmu->nr_arch_gp_counters; in intel_rdpmc_ecx_to_pmc()
155 *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP]; in intel_rdpmc_ecx_to_pmc()
172 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument
174 if (!fw_writes_is_enabled(pmu_to_vcpu(pmu))) in get_fw_gp_pmc()
177 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc()
182 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_is_valid_msr() local
190 ret = pmu->version > 1; in intel_is_valid_msr()
193 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr()
194 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr()
195 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr); in intel_is_valid_msr()
204 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_msr_idx_to_pmc() local
207 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc()
208 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0); in intel_msr_idx_to_pmc()
209 pmc = pmc ? pmc : get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0); in intel_msr_idx_to_pmc()
216 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_get_msr() local
222 msr_info->data = pmu->fixed_ctr_ctrl; in intel_pmu_get_msr()
225 msr_info->data = pmu->global_status; in intel_pmu_get_msr()
228 msr_info->data = pmu->global_ctrl; in intel_pmu_get_msr()
231 msr_info->data = pmu->global_ovf_ctrl; in intel_pmu_get_msr()
234 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_get_msr()
235 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_get_msr()
238 val & pmu->counter_bitmask[KVM_PMC_GP]; in intel_pmu_get_msr()
240 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_get_msr()
243 val & pmu->counter_bitmask[KVM_PMC_FIXED]; in intel_pmu_get_msr()
245 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_get_msr()
256 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_set_msr() local
263 if (pmu->fixed_ctr_ctrl == data) in intel_pmu_set_msr()
265 if (!(data & pmu->fixed_ctr_ctrl_mask)) { in intel_pmu_set_msr()
266 reprogram_fixed_counters(pmu, data); in intel_pmu_set_msr()
272 pmu->global_status = data; in intel_pmu_set_msr()
277 if (pmu->global_ctrl == data) in intel_pmu_set_msr()
279 if (kvm_valid_perf_global_ctrl(pmu, data)) { in intel_pmu_set_msr()
280 global_ctrl_changed(pmu, data); in intel_pmu_set_msr()
285 if (!(data & pmu->global_ovf_ctrl_mask)) { in intel_pmu_set_msr()
287 pmu->global_status &= ~data; in intel_pmu_set_msr()
288 pmu->global_ovf_ctrl = data; in intel_pmu_set_msr()
293 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) || in intel_pmu_set_msr()
294 (pmc = get_gp_pmc(pmu, msr, MSR_IA32_PMC0))) { in intel_pmu_set_msr()
296 (data & ~pmu->counter_bitmask[KVM_PMC_GP])) in intel_pmu_set_msr()
306 } else if ((pmc = get_fixed_pmc(pmu, msr))) { in intel_pmu_set_msr()
312 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) { in intel_pmu_set_msr()
315 if (!(data & pmu->reserved_bits)) { in intel_pmu_set_msr()
327 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_refresh() local
334 pmu->nr_arch_gp_counters = 0; in intel_pmu_refresh()
335 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
336 pmu->counter_bitmask[KVM_PMC_GP] = 0; in intel_pmu_refresh()
337 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; in intel_pmu_refresh()
338 pmu->version = 0; in intel_pmu_refresh()
339 pmu->reserved_bits = 0xffffffff00200000ull; in intel_pmu_refresh()
340 pmu->raw_event_mask = X86_RAW_EVENT_MASK; in intel_pmu_refresh()
341 pmu->global_ctrl_mask = ~0ull; in intel_pmu_refresh()
342 pmu->global_ovf_ctrl_mask = ~0ull; in intel_pmu_refresh()
343 pmu->fixed_ctr_ctrl_mask = ~0ull; in intel_pmu_refresh()
351 pmu->version = eax.split.version_id; in intel_pmu_refresh()
352 if (!pmu->version) in intel_pmu_refresh()
357 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, in intel_pmu_refresh()
360 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1; in intel_pmu_refresh()
362 pmu->available_event_types = ~entry->ebx & in intel_pmu_refresh()
365 if (pmu->version == 1) { in intel_pmu_refresh()
366 pmu->nr_arch_fixed_counters = 0; in intel_pmu_refresh()
368 pmu->nr_arch_fixed_counters = in intel_pmu_refresh()
373 pmu->counter_bitmask[KVM_PMC_FIXED] = in intel_pmu_refresh()
377 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) in intel_pmu_refresh()
378 pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); in intel_pmu_refresh()
379 pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) | in intel_pmu_refresh()
380 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); in intel_pmu_refresh()
381 pmu->global_ctrl_mask = ~pmu->global_ctrl; in intel_pmu_refresh()
382 pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask in intel_pmu_refresh()
386 pmu->global_ovf_ctrl_mask &= in intel_pmu_refresh()
393 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED; in intel_pmu_refresh()
395 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
396 0, pmu->nr_arch_gp_counters); in intel_pmu_refresh()
397 bitmap_set(pmu->all_valid_pmc_idx, in intel_pmu_refresh()
398 INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters); in intel_pmu_refresh()
406 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_init() local
409 pmu->gp_counters[i].type = KVM_PMC_GP; in intel_pmu_init()
410 pmu->gp_counters[i].vcpu = vcpu; in intel_pmu_init()
411 pmu->gp_counters[i].idx = i; in intel_pmu_init()
412 pmu->gp_counters[i].current_config = 0; in intel_pmu_init()
416 pmu->fixed_counters[i].type = KVM_PMC_FIXED; in intel_pmu_init()
417 pmu->fixed_counters[i].vcpu = vcpu; in intel_pmu_init()
418 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; in intel_pmu_init()
419 pmu->fixed_counters[i].current_config = 0; in intel_pmu_init()
427 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_pmu_reset() local
432 pmc = &pmu->gp_counters[i]; in intel_pmu_reset()
439 pmc = &pmu->fixed_counters[i]; in intel_pmu_reset()
445 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = in intel_pmu_reset()
446 pmu->global_ovf_ctrl = 0; in intel_pmu_reset()