Lines Matching refs:pmu
51 struct kvm_pmu *pmu; in kvm_pmc_to_vcpu() local
55 pmu = container_of(pmc, struct kvm_pmu, pmc[0]); in kvm_pmc_to_vcpu()
56 vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); in kvm_pmc_to_vcpu()
68 return test_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_pmc_is_chained()
166 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_get_counter_value() local
167 struct kvm_pmc *pmc = &pmu->pmc[select_idx]; in kvm_pmu_get_counter_value()
252 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_init() local
255 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
266 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_reset() local
270 kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); in kvm_pmu_vcpu_reset()
272 bitmap_zero(vcpu->arch.pmu.chained, ARMV8_PMU_MAX_COUNTER_PAIRS); in kvm_pmu_vcpu_reset()
283 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_vcpu_destroy() local
286 kvm_pmu_release_perf_event(&pmu->pmc[i]); in kvm_pmu_vcpu_destroy()
287 irq_work_sync(&vcpu->arch.pmu.overflow_work); in kvm_pmu_vcpu_destroy()
311 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_enable_counter_mask() local
321 pmc = &pmu->pmc[i]; in kvm_pmu_enable_counter_mask()
346 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_disable_counter_mask() local
356 pmc = &pmu->pmc[i]; in kvm_pmu_disable_counter_mask()
384 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_state() local
391 if (pmu->irq_level == overflow) in kvm_pmu_update_state()
394 pmu->irq_level = overflow; in kvm_pmu_update_state()
398 pmu->irq_num, overflow, pmu); in kvm_pmu_update_state()
405 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_should_notify_user() local
412 return pmu->irq_level != run_level; in kvm_pmu_should_notify_user()
424 if (vcpu->arch.pmu.irq_level) in kvm_pmu_update_run()
460 struct kvm_pmu *pmu; in kvm_pmu_perf_overflow_notify_vcpu() local
462 pmu = container_of(work, struct kvm_pmu, overflow_work); in kvm_pmu_perf_overflow_notify_vcpu()
463 vcpu = kvm_pmc_to_vcpu(pmu->pmc); in kvm_pmu_perf_overflow_notify_vcpu()
476 struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); in kvm_pmu_perf_overflow()
481 cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); in kvm_pmu_perf_overflow()
504 irq_work_queue(&vcpu->arch.pmu.overflow_work); in kvm_pmu_perf_overflow()
507 cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); in kvm_pmu_perf_overflow()
517 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_software_increment() local
546 if (kvm_pmu_pmc_is_chained(&pmu->pmc[i])) { in kvm_pmu_software_increment()
600 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_create_perf_event() local
611 pmc = kvm_pmu_get_canonical_pmc(&pmu->pmc[select_idx]); in kvm_pmu_create_perf_event()
690 struct kvm_pmu *pmu = &vcpu->arch.pmu; in kvm_pmu_update_pmc_chained() local
691 struct kvm_pmc *pmc = &pmu->pmc[select_idx], *canonical_pmc; in kvm_pmu_update_pmc_chained()
709 set_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
712 clear_bit(pmc->idx >> 1, vcpu->arch.pmu.chained); in kvm_pmu_update_pmc_chained()
747 struct arm_pmu *pmu; in kvm_pmu_probe_pmuver() local
776 if (event->pmu) { in kvm_pmu_probe_pmuver()
777 pmu = to_arm_pmu(event->pmu); in kvm_pmu_probe_pmuver()
778 if (pmu->pmuver) in kvm_pmu_probe_pmuver()
779 pmuver = pmu->pmuver; in kvm_pmu_probe_pmuver()
832 if (!vcpu->arch.pmu.created) in kvm_arm_pmu_v3_enable()
841 int irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_enable()
873 ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, in kvm_arm_pmu_v3_init()
874 &vcpu->arch.pmu); in kvm_arm_pmu_v3_init()
879 init_irq_work(&vcpu->arch.pmu.overflow_work, in kvm_arm_pmu_v3_init()
882 vcpu->arch.pmu.created = true; in kvm_arm_pmu_v3_init()
901 if (vcpu->arch.pmu.irq_num != irq) in pmu_irq_is_valid()
904 if (vcpu->arch.pmu.irq_num == irq) in pmu_irq_is_valid()
917 if (vcpu->arch.pmu.created) in kvm_arm_pmu_v3_set_attr()
948 vcpu->arch.pmu.irq_num = irq; in kvm_arm_pmu_v3_set_attr()
1021 irq = vcpu->arch.pmu.irq_num; in kvm_arm_pmu_v3_get_attr()