xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/perf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Based on the x86 implementation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
6*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/perf_event.h>
10*4882a593Smuzhiyun #include <linux/kvm_host.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
15*4882a593Smuzhiyun 
kvm_is_in_guest(void)16*4882a593Smuzhiyun static int kvm_is_in_guest(void)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun         return kvm_get_running_vcpu() != NULL;
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
kvm_is_user_mode(void)21*4882a593Smuzhiyun static int kvm_is_user_mode(void)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	vcpu = kvm_get_running_vcpu();
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	if (vcpu)
28*4882a593Smuzhiyun 		return !vcpu_mode_priv(vcpu);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	return 0;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
kvm_get_guest_ip(void)33*4882a593Smuzhiyun static unsigned long kvm_get_guest_ip(void)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	vcpu = kvm_get_running_vcpu();
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	if (vcpu)
40*4882a593Smuzhiyun 		return *vcpu_pc(vcpu);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static struct perf_guest_info_callbacks kvm_guest_cbs = {
46*4882a593Smuzhiyun 	.is_in_guest	= kvm_is_in_guest,
47*4882a593Smuzhiyun 	.is_user_mode	= kvm_is_user_mode,
48*4882a593Smuzhiyun 	.get_guest_ip	= kvm_get_guest_ip,
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
kvm_perf_init(void)51*4882a593Smuzhiyun int kvm_perf_init(void)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	/*
54*4882a593Smuzhiyun 	 * Check if HW_PERF_EVENTS are supported by checking the number of
55*4882a593Smuzhiyun 	 * hardware performance counters. This could ensure the presence of
56*4882a593Smuzhiyun 	 * a physical PMU and CONFIG_PERF_EVENT is selected.
57*4882a593Smuzhiyun 	 */
58*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ARM_PMU) && perf_num_counters() > 0
59*4882a593Smuzhiyun 				       && !is_protected_kvm_enabled())
60*4882a593Smuzhiyun 		static_branch_enable(&kvm_arm_pmu_available);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return perf_register_guest_info_callbacks(&kvm_guest_cbs);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
kvm_perf_teardown(void)65*4882a593Smuzhiyun int kvm_perf_teardown(void)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
68*4882a593Smuzhiyun }
69