xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/pmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2019 Arm Limited
4*4882a593Smuzhiyun  * Author: Andrew Murray <Andrew.Murray@arm.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/kvm_host.h>
7*4882a593Smuzhiyun #include <linux/perf_event.h>
8*4882a593Smuzhiyun #include <asm/kvm_hyp.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * Given the perf event attributes and system type, determine
12*4882a593Smuzhiyun  * if we are going to need to switch counters at guest entry/exit.
13*4882a593Smuzhiyun  */
kvm_pmu_switch_needed(struct perf_event_attr * attr)14*4882a593Smuzhiyun static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	/**
17*4882a593Smuzhiyun 	 * With VHE the guest kernel runs at EL1 and the host at EL2,
18*4882a593Smuzhiyun 	 * where user (EL0) is excluded then we have no reason to switch
19*4882a593Smuzhiyun 	 * counters.
20*4882a593Smuzhiyun 	 */
21*4882a593Smuzhiyun 	if (has_vhe() && attr->exclude_user)
22*4882a593Smuzhiyun 		return false;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun 	/* Only switch if attributes are different */
25*4882a593Smuzhiyun 	return (attr->exclude_host != attr->exclude_guest);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * Add events to track that we may want to switch at guest entry/exit
30*4882a593Smuzhiyun  * time.
31*4882a593Smuzhiyun  */
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)32*4882a593Smuzhiyun void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
37*4882a593Smuzhiyun 		return;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	if (!attr->exclude_host)
40*4882a593Smuzhiyun 		ctx->pmu_events.events_host |= set;
41*4882a593Smuzhiyun 	if (!attr->exclude_guest)
42*4882a593Smuzhiyun 		ctx->pmu_events.events_guest |= set;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Stop tracking events
47*4882a593Smuzhiyun  */
kvm_clr_pmu_events(u32 clr)48*4882a593Smuzhiyun void kvm_clr_pmu_events(u32 clr)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (!kvm_arm_support_pmu_v3() || !ctx)
53*4882a593Smuzhiyun 		return;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	ctx->pmu_events.events_host &= ~clr;
56*4882a593Smuzhiyun 	ctx->pmu_events.events_guest &= ~clr;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define PMEVTYPER_READ_CASE(idx)				\
60*4882a593Smuzhiyun 	case idx:						\
61*4882a593Smuzhiyun 		return read_sysreg(pmevtyper##idx##_el0)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define PMEVTYPER_WRITE_CASE(idx)				\
64*4882a593Smuzhiyun 	case idx:						\
65*4882a593Smuzhiyun 		write_sysreg(val, pmevtyper##idx##_el0);	\
66*4882a593Smuzhiyun 		break
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define PMEVTYPER_CASES(readwrite)				\
69*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(0);			\
70*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(1);			\
71*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(2);			\
72*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(3);			\
73*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(4);			\
74*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(5);			\
75*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(6);			\
76*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(7);			\
77*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(8);			\
78*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(9);			\
79*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(10);			\
80*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(11);			\
81*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(12);			\
82*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(13);			\
83*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(14);			\
84*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(15);			\
85*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(16);			\
86*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(17);			\
87*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(18);			\
88*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(19);			\
89*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(20);			\
90*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(21);			\
91*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(22);			\
92*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(23);			\
93*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(24);			\
94*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(25);			\
95*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(26);			\
96*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(27);			\
97*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(28);			\
98*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(29);			\
99*4882a593Smuzhiyun 	PMEVTYPER_##readwrite##_CASE(30)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * Read a value direct from PMEVTYPER<idx> where idx is 0-30
103*4882a593Smuzhiyun  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
104*4882a593Smuzhiyun  */
kvm_vcpu_pmu_read_evtype_direct(int idx)105*4882a593Smuzhiyun static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	switch (idx) {
108*4882a593Smuzhiyun 	PMEVTYPER_CASES(READ);
109*4882a593Smuzhiyun 	case ARMV8_PMU_CYCLE_IDX:
110*4882a593Smuzhiyun 		return read_sysreg(pmccfiltr_el0);
111*4882a593Smuzhiyun 	default:
112*4882a593Smuzhiyun 		WARN_ON(1);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * Write a value direct to PMEVTYPER<idx> where idx is 0-30
120*4882a593Smuzhiyun  * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
121*4882a593Smuzhiyun  */
kvm_vcpu_pmu_write_evtype_direct(int idx,u32 val)122*4882a593Smuzhiyun static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	switch (idx) {
125*4882a593Smuzhiyun 	PMEVTYPER_CASES(WRITE);
126*4882a593Smuzhiyun 	case ARMV8_PMU_CYCLE_IDX:
127*4882a593Smuzhiyun 		write_sysreg(val, pmccfiltr_el0);
128*4882a593Smuzhiyun 		break;
129*4882a593Smuzhiyun 	default:
130*4882a593Smuzhiyun 		WARN_ON(1);
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun  * Modify ARMv8 PMU events to include EL0 counting
136*4882a593Smuzhiyun  */
kvm_vcpu_pmu_enable_el0(unsigned long events)137*4882a593Smuzhiyun static void kvm_vcpu_pmu_enable_el0(unsigned long events)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	u64 typer;
140*4882a593Smuzhiyun 	u32 counter;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	for_each_set_bit(counter, &events, 32) {
143*4882a593Smuzhiyun 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
144*4882a593Smuzhiyun 		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
145*4882a593Smuzhiyun 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * Modify ARMv8 PMU events to exclude EL0 counting
151*4882a593Smuzhiyun  */
kvm_vcpu_pmu_disable_el0(unsigned long events)152*4882a593Smuzhiyun static void kvm_vcpu_pmu_disable_el0(unsigned long events)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	u64 typer;
155*4882a593Smuzhiyun 	u32 counter;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for_each_set_bit(counter, &events, 32) {
158*4882a593Smuzhiyun 		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
159*4882a593Smuzhiyun 		typer |= ARMV8_PMU_EXCLUDE_EL0;
160*4882a593Smuzhiyun 		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * On VHE ensure that only guest events have EL0 counting enabled.
166*4882a593Smuzhiyun  * This is called from both vcpu_{load,put} and the sysreg handling.
167*4882a593Smuzhiyun  * Since the latter is preemptible, special care must be taken to
168*4882a593Smuzhiyun  * disable preemption.
169*4882a593Smuzhiyun  */
kvm_vcpu_pmu_restore_guest(struct kvm_vcpu * vcpu)170*4882a593Smuzhiyun void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct kvm_host_data *host;
173*4882a593Smuzhiyun 	u32 events_guest, events_host;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!kvm_arm_support_pmu_v3() || !has_vhe())
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	preempt_disable();
179*4882a593Smuzhiyun 	host = this_cpu_ptr_hyp_sym(kvm_host_data);
180*4882a593Smuzhiyun 	events_guest = host->pmu_events.events_guest;
181*4882a593Smuzhiyun 	events_host = host->pmu_events.events_host;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	kvm_vcpu_pmu_enable_el0(events_guest);
184*4882a593Smuzhiyun 	kvm_vcpu_pmu_disable_el0(events_host);
185*4882a593Smuzhiyun 	preempt_enable();
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun  * On VHE ensure that only host events have EL0 counting enabled
190*4882a593Smuzhiyun  */
kvm_vcpu_pmu_restore_host(struct kvm_vcpu * vcpu)191*4882a593Smuzhiyun void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct kvm_host_data *host;
194*4882a593Smuzhiyun 	u32 events_guest, events_host;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (!kvm_arm_support_pmu_v3() || !has_vhe())
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	host = this_cpu_ptr_hyp_sym(kvm_host_data);
200*4882a593Smuzhiyun 	events_guest = host->pmu_events.events_guest;
201*4882a593Smuzhiyun 	events_host = host->pmu_events.events_host;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	kvm_vcpu_pmu_enable_el0(events_host);
204*4882a593Smuzhiyun 	kvm_vcpu_pmu_disable_el0(events_guest);
205*4882a593Smuzhiyun }
206