1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_PERF_EVENT_H
3*4882a593Smuzhiyun #define _ASM_X86_PERF_EVENT_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Performance event hw details:
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #define INTEL_PMC_MAX_GENERIC 32
10*4882a593Smuzhiyun #define INTEL_PMC_MAX_FIXED 4
11*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED 32
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define X86_PMC_IDX_MAX 64
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_PERFCTR1 0xc2
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_EVENTSEL1 0x187
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL (1ULL << 19)
27*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
28*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
29*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
30*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
31*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define HSW_IN_TX (1ULL << 32)
34*4882a593Smuzhiyun #define HSW_IN_TX_CHECKPOINTED (1ULL << 33)
35*4882a593Smuzhiyun #define ICL_EVENTSEL_ADAPTIVE (1ULL << 34)
36*4882a593Smuzhiyun #define ICL_FIXED_0_ADAPTIVE (1ULL << 32)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define AMD64_EVENTSEL_INT_CORE_ENABLE (1ULL << 36)
39*4882a593Smuzhiyun #define AMD64_EVENTSEL_GUESTONLY (1ULL << 40)
40*4882a593Smuzhiyun #define AMD64_EVENTSEL_HOSTONLY (1ULL << 41)
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT 37
43*4882a593Smuzhiyun #define AMD64_EVENTSEL_INT_CORE_SEL_MASK \
44*4882a593Smuzhiyun (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define AMD64_EVENTSEL_EVENT \
47*4882a593Smuzhiyun (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48*4882a593Smuzhiyun #define INTEL_ARCH_EVENT_MASK \
49*4882a593Smuzhiyun (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define AMD64_L3_SLICE_SHIFT 48
52*4882a593Smuzhiyun #define AMD64_L3_SLICE_MASK \
53*4882a593Smuzhiyun (0xFULL << AMD64_L3_SLICE_SHIFT)
54*4882a593Smuzhiyun #define AMD64_L3_SLICEID_MASK \
55*4882a593Smuzhiyun (0x7ULL << AMD64_L3_SLICE_SHIFT)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define AMD64_L3_THREAD_SHIFT 56
58*4882a593Smuzhiyun #define AMD64_L3_THREAD_MASK \
59*4882a593Smuzhiyun (0xFFULL << AMD64_L3_THREAD_SHIFT)
60*4882a593Smuzhiyun #define AMD64_L3_F19H_THREAD_MASK \
61*4882a593Smuzhiyun (0x3ULL << AMD64_L3_THREAD_SHIFT)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define AMD64_L3_EN_ALL_CORES BIT_ULL(47)
64*4882a593Smuzhiyun #define AMD64_L3_EN_ALL_SLICES BIT_ULL(46)
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define AMD64_L3_COREID_SHIFT 42
67*4882a593Smuzhiyun #define AMD64_L3_COREID_MASK \
68*4882a593Smuzhiyun (0x7ULL << AMD64_L3_COREID_SHIFT)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define X86_RAW_EVENT_MASK \
71*4882a593Smuzhiyun (ARCH_PERFMON_EVENTSEL_EVENT | \
72*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_UMASK | \
73*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_EDGE | \
74*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_INV | \
75*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_CMASK)
76*4882a593Smuzhiyun #define X86_ALL_EVENT_FLAGS \
77*4882a593Smuzhiyun (ARCH_PERFMON_EVENTSEL_EDGE | \
78*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_INV | \
79*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_CMASK | \
80*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_ANY | \
81*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_PIN_CONTROL | \
82*4882a593Smuzhiyun HSW_IN_TX | \
83*4882a593Smuzhiyun HSW_IN_TX_CHECKPOINTED)
84*4882a593Smuzhiyun #define AMD64_RAW_EVENT_MASK \
85*4882a593Smuzhiyun (X86_RAW_EVENT_MASK | \
86*4882a593Smuzhiyun AMD64_EVENTSEL_EVENT)
87*4882a593Smuzhiyun #define AMD64_RAW_EVENT_MASK_NB \
88*4882a593Smuzhiyun (AMD64_EVENTSEL_EVENT | \
89*4882a593Smuzhiyun ARCH_PERFMON_EVENTSEL_UMASK)
90*4882a593Smuzhiyun #define AMD64_NUM_COUNTERS 4
91*4882a593Smuzhiyun #define AMD64_NUM_COUNTERS_CORE 6
92*4882a593Smuzhiyun #define AMD64_NUM_COUNTERS_NB 4
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
95*4882a593Smuzhiyun #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
96*4882a593Smuzhiyun #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
97*4882a593Smuzhiyun #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98*4882a593Smuzhiyun (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
101*4882a593Smuzhiyun #define ARCH_PERFMON_EVENTS_COUNT 7
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #define PEBS_DATACFG_MEMINFO BIT_ULL(0)
104*4882a593Smuzhiyun #define PEBS_DATACFG_GP BIT_ULL(1)
105*4882a593Smuzhiyun #define PEBS_DATACFG_XMMS BIT_ULL(2)
106*4882a593Smuzhiyun #define PEBS_DATACFG_LBRS BIT_ULL(3)
107*4882a593Smuzhiyun #define PEBS_DATACFG_LBR_SHIFT 24
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * Intel "Architectural Performance Monitoring" CPUID
111*4882a593Smuzhiyun * detection/enumeration details:
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun union cpuid10_eax {
114*4882a593Smuzhiyun struct {
115*4882a593Smuzhiyun unsigned int version_id:8;
116*4882a593Smuzhiyun unsigned int num_counters:8;
117*4882a593Smuzhiyun unsigned int bit_width:8;
118*4882a593Smuzhiyun unsigned int mask_length:8;
119*4882a593Smuzhiyun } split;
120*4882a593Smuzhiyun unsigned int full;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun union cpuid10_ebx {
124*4882a593Smuzhiyun struct {
125*4882a593Smuzhiyun unsigned int no_unhalted_core_cycles:1;
126*4882a593Smuzhiyun unsigned int no_instructions_retired:1;
127*4882a593Smuzhiyun unsigned int no_unhalted_reference_cycles:1;
128*4882a593Smuzhiyun unsigned int no_llc_reference:1;
129*4882a593Smuzhiyun unsigned int no_llc_misses:1;
130*4882a593Smuzhiyun unsigned int no_branch_instruction_retired:1;
131*4882a593Smuzhiyun unsigned int no_branch_misses_retired:1;
132*4882a593Smuzhiyun } split;
133*4882a593Smuzhiyun unsigned int full;
134*4882a593Smuzhiyun };
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun union cpuid10_edx {
137*4882a593Smuzhiyun struct {
138*4882a593Smuzhiyun unsigned int num_counters_fixed:5;
139*4882a593Smuzhiyun unsigned int bit_width_fixed:8;
140*4882a593Smuzhiyun unsigned int reserved1:2;
141*4882a593Smuzhiyun unsigned int anythread_deprecated:1;
142*4882a593Smuzhiyun unsigned int reserved2:16;
143*4882a593Smuzhiyun } split;
144*4882a593Smuzhiyun unsigned int full;
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * Intel Architectural LBR CPUID detection/enumeration details:
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun union cpuid28_eax {
151*4882a593Smuzhiyun struct {
152*4882a593Smuzhiyun /* Supported LBR depth values */
153*4882a593Smuzhiyun unsigned int lbr_depth_mask:8;
154*4882a593Smuzhiyun unsigned int reserved:22;
155*4882a593Smuzhiyun /* Deep C-state Reset */
156*4882a593Smuzhiyun unsigned int lbr_deep_c_reset:1;
157*4882a593Smuzhiyun /* IP values contain LIP */
158*4882a593Smuzhiyun unsigned int lbr_lip:1;
159*4882a593Smuzhiyun } split;
160*4882a593Smuzhiyun unsigned int full;
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun union cpuid28_ebx {
164*4882a593Smuzhiyun struct {
165*4882a593Smuzhiyun /* CPL Filtering Supported */
166*4882a593Smuzhiyun unsigned int lbr_cpl:1;
167*4882a593Smuzhiyun /* Branch Filtering Supported */
168*4882a593Smuzhiyun unsigned int lbr_filter:1;
169*4882a593Smuzhiyun /* Call-stack Mode Supported */
170*4882a593Smuzhiyun unsigned int lbr_call_stack:1;
171*4882a593Smuzhiyun } split;
172*4882a593Smuzhiyun unsigned int full;
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun union cpuid28_ecx {
176*4882a593Smuzhiyun struct {
177*4882a593Smuzhiyun /* Mispredict Bit Supported */
178*4882a593Smuzhiyun unsigned int lbr_mispred:1;
179*4882a593Smuzhiyun /* Timed LBRs Supported */
180*4882a593Smuzhiyun unsigned int lbr_timed_lbr:1;
181*4882a593Smuzhiyun /* Branch Type Field Supported */
182*4882a593Smuzhiyun unsigned int lbr_br_type:1;
183*4882a593Smuzhiyun } split;
184*4882a593Smuzhiyun unsigned int full;
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun struct x86_pmu_capability {
188*4882a593Smuzhiyun int version;
189*4882a593Smuzhiyun int num_counters_gp;
190*4882a593Smuzhiyun int num_counters_fixed;
191*4882a593Smuzhiyun int bit_width_gp;
192*4882a593Smuzhiyun int bit_width_fixed;
193*4882a593Smuzhiyun unsigned int events_mask;
194*4882a593Smuzhiyun int events_mask_len;
195*4882a593Smuzhiyun };
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Fixed-purpose performance events:
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* RDPMC offset for Fixed PMCs */
202*4882a593Smuzhiyun #define INTEL_PMC_FIXED_RDPMC_BASE (1 << 30)
203*4882a593Smuzhiyun #define INTEL_PMC_FIXED_RDPMC_METRICS (1 << 29)
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * All the fixed-mode PMCs are configured via this single MSR:
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /*
211*4882a593Smuzhiyun * There is no event-code assigned to the fixed-mode PMCs.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * For a fixed-mode PMC, which has an equivalent event on a general-purpose
214*4882a593Smuzhiyun * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
215*4882a593Smuzhiyun * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * For a fixed-mode PMC, which doesn't have an equivalent event, a
218*4882a593Smuzhiyun * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
219*4882a593Smuzhiyun * The pseudo event-code for a fixed-mode PMC must be 0x00.
220*4882a593Smuzhiyun * The pseudo umask-code is 0xX. The X equals the index of the fixed
221*4882a593Smuzhiyun * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun * The counts are available in separate MSRs:
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Instr_Retired.Any: */
227*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
228*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* CPU_CLK_Unhalted.Core: */
231*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
232*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
235*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
236*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
237*4882a593Smuzhiyun #define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
240*4882a593Smuzhiyun #define MSR_ARCH_PERFMON_FIXED_CTR3 0x30c
241*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
242*4882a593Smuzhiyun #define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * We model BTS tracing as another fixed-mode PMC.
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * We choose the value 47 for the fixed index of BTS, since lower
248*4882a593Smuzhiyun * values are used by actual fixed events and higher values are used
249*4882a593Smuzhiyun * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
250*4882a593Smuzhiyun */
251*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 15)
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
255*4882a593Smuzhiyun * each TopDown metric event.
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun #define INTEL_PMC_IDX_METRIC_BASE (INTEL_PMC_IDX_FIXED + 16)
260*4882a593Smuzhiyun #define INTEL_PMC_IDX_TD_RETIRING (INTEL_PMC_IDX_METRIC_BASE + 0)
261*4882a593Smuzhiyun #define INTEL_PMC_IDX_TD_BAD_SPEC (INTEL_PMC_IDX_METRIC_BASE + 1)
262*4882a593Smuzhiyun #define INTEL_PMC_IDX_TD_FE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 2)
263*4882a593Smuzhiyun #define INTEL_PMC_IDX_TD_BE_BOUND (INTEL_PMC_IDX_METRIC_BASE + 3)
264*4882a593Smuzhiyun #define INTEL_PMC_IDX_METRIC_END INTEL_PMC_IDX_TD_BE_BOUND
265*4882a593Smuzhiyun #define INTEL_PMC_MSK_TOPDOWN ((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \
266*4882a593Smuzhiyun INTEL_PMC_MSK_FIXED_SLOTS)
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * There is no event-code assigned to the TopDown events.
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * For the slots event, use the pseudo code of the fixed counter 3.
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * For the metric events, the pseudo event-code is 0x00.
274*4882a593Smuzhiyun * The pseudo umask-code starts from the middle of the pseudo event
275*4882a593Smuzhiyun * space, 0x80.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun #define INTEL_TD_SLOTS 0x0400 /* TOPDOWN.SLOTS */
278*4882a593Smuzhiyun /* Level 1 metrics */
279*4882a593Smuzhiyun #define INTEL_TD_METRIC_RETIRING 0x8000 /* Retiring metric */
280*4882a593Smuzhiyun #define INTEL_TD_METRIC_BAD_SPEC 0x8100 /* Bad speculation metric */
281*4882a593Smuzhiyun #define INTEL_TD_METRIC_FE_BOUND 0x8200 /* FE bound metric */
282*4882a593Smuzhiyun #define INTEL_TD_METRIC_BE_BOUND 0x8300 /* BE bound metric */
283*4882a593Smuzhiyun #define INTEL_TD_METRIC_MAX INTEL_TD_METRIC_BE_BOUND
284*4882a593Smuzhiyun #define INTEL_TD_METRIC_NUM 4
285*4882a593Smuzhiyun
is_metric_idx(int idx)286*4882a593Smuzhiyun static inline bool is_metric_idx(int idx)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
is_topdown_idx(int idx)291*4882a593Smuzhiyun static inline bool is_topdown_idx(int idx)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun #define INTEL_PMC_OTHER_TOPDOWN_BITS(bit) \
297*4882a593Smuzhiyun (~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun #define GLOBAL_STATUS_COND_CHG BIT_ULL(63)
300*4882a593Smuzhiyun #define GLOBAL_STATUS_BUFFER_OVF_BIT 62
301*4882a593Smuzhiyun #define GLOBAL_STATUS_BUFFER_OVF BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
302*4882a593Smuzhiyun #define GLOBAL_STATUS_UNC_OVF BIT_ULL(61)
303*4882a593Smuzhiyun #define GLOBAL_STATUS_ASIF BIT_ULL(60)
304*4882a593Smuzhiyun #define GLOBAL_STATUS_COUNTERS_FROZEN BIT_ULL(59)
305*4882a593Smuzhiyun #define GLOBAL_STATUS_LBRS_FROZEN_BIT 58
306*4882a593Smuzhiyun #define GLOBAL_STATUS_LBRS_FROZEN BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
307*4882a593Smuzhiyun #define GLOBAL_STATUS_TRACE_TOPAPMI_BIT 55
308*4882a593Smuzhiyun #define GLOBAL_STATUS_TRACE_TOPAPMI BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
309*4882a593Smuzhiyun #define GLOBAL_STATUS_PERF_METRICS_OVF_BIT 48
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #define GLOBAL_CTRL_EN_PERF_METRICS 48
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * We model guest LBR event tracing as another fixed-mode PMC like BTS.
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * We choose bit 58 because it's used to indicate LBR stack frozen state
316*4882a593Smuzhiyun * for architectural perfmon v4, also we unconditionally mask that bit in
317*4882a593Smuzhiyun * the handle_pmi_common(), so it'll never be set in the overflow handling.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * With this fake counter assigned, the guest LBR event user (such as KVM),
320*4882a593Smuzhiyun * can program the LBR registers on its own, and we don't actually do anything
321*4882a593Smuzhiyun * with then in the host context.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun #define INTEL_PMC_IDX_FIXED_VLBR (GLOBAL_STATUS_LBRS_FROZEN_BIT)
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
327*4882a593Smuzhiyun * since it would claim bit 58 which is effectively Fixed26.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun #define INTEL_FIXED_VLBR_EVENT 0x1b00
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * Adaptive PEBS v4
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun struct pebs_basic {
336*4882a593Smuzhiyun u64 format_size;
337*4882a593Smuzhiyun u64 ip;
338*4882a593Smuzhiyun u64 applicable_counters;
339*4882a593Smuzhiyun u64 tsc;
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun struct pebs_meminfo {
343*4882a593Smuzhiyun u64 address;
344*4882a593Smuzhiyun u64 aux;
345*4882a593Smuzhiyun u64 latency;
346*4882a593Smuzhiyun u64 tsx_tuning;
347*4882a593Smuzhiyun };
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun struct pebs_gprs {
350*4882a593Smuzhiyun u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
351*4882a593Smuzhiyun u64 r8, r9, r10, r11, r12, r13, r14, r15;
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun struct pebs_xmm {
355*4882a593Smuzhiyun u64 xmm[16*2]; /* two entries for each register */
356*4882a593Smuzhiyun };
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun * IBS cpuid feature detection
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun #define IBS_CPUID_FEATURES 0x8000001b
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
366*4882a593Smuzhiyun * bit 0 is used to indicate the existence of IBS.
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun #define IBS_CAPS_AVAIL (1U<<0)
369*4882a593Smuzhiyun #define IBS_CAPS_FETCHSAM (1U<<1)
370*4882a593Smuzhiyun #define IBS_CAPS_OPSAM (1U<<2)
371*4882a593Smuzhiyun #define IBS_CAPS_RDWROPCNT (1U<<3)
372*4882a593Smuzhiyun #define IBS_CAPS_OPCNT (1U<<4)
373*4882a593Smuzhiyun #define IBS_CAPS_BRNTRGT (1U<<5)
374*4882a593Smuzhiyun #define IBS_CAPS_OPCNTEXT (1U<<6)
375*4882a593Smuzhiyun #define IBS_CAPS_RIPINVALIDCHK (1U<<7)
376*4882a593Smuzhiyun #define IBS_CAPS_OPBRNFUSE (1U<<8)
377*4882a593Smuzhiyun #define IBS_CAPS_FETCHCTLEXTD (1U<<9)
378*4882a593Smuzhiyun #define IBS_CAPS_OPDATA4 (1U<<10)
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun #define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
381*4882a593Smuzhiyun | IBS_CAPS_FETCHSAM \
382*4882a593Smuzhiyun | IBS_CAPS_OPSAM)
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /*
385*4882a593Smuzhiyun * IBS APIC setup
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun #define IBSCTL 0x1cc
388*4882a593Smuzhiyun #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
389*4882a593Smuzhiyun #define IBSCTL_LVT_OFFSET_MASK 0x0F
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* IBS fetch bits/masks */
392*4882a593Smuzhiyun #define IBS_FETCH_RAND_EN (1ULL<<57)
393*4882a593Smuzhiyun #define IBS_FETCH_VAL (1ULL<<49)
394*4882a593Smuzhiyun #define IBS_FETCH_ENABLE (1ULL<<48)
395*4882a593Smuzhiyun #define IBS_FETCH_CNT 0xFFFF0000ULL
396*4882a593Smuzhiyun #define IBS_FETCH_MAX_CNT 0x0000FFFFULL
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun * IBS op bits/masks
400*4882a593Smuzhiyun * The lower 7 bits of the current count are random bits
401*4882a593Smuzhiyun * preloaded by hardware and ignored in software
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
404*4882a593Smuzhiyun #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
405*4882a593Smuzhiyun #define IBS_OP_CNT_CTL (1ULL<<19)
406*4882a593Smuzhiyun #define IBS_OP_VAL (1ULL<<18)
407*4882a593Smuzhiyun #define IBS_OP_ENABLE (1ULL<<17)
408*4882a593Smuzhiyun #define IBS_OP_MAX_CNT 0x0000FFFFULL
409*4882a593Smuzhiyun #define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
410*4882a593Smuzhiyun #define IBS_OP_MAX_CNT_EXT_MASK (0x7FULL<<20) /* separate upper 7 bits */
411*4882a593Smuzhiyun #define IBS_RIP_INVALID (1ULL<<38)
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun #ifdef CONFIG_X86_LOCAL_APIC
414*4882a593Smuzhiyun extern u32 get_ibs_caps(void);
415*4882a593Smuzhiyun #else
get_ibs_caps(void)416*4882a593Smuzhiyun static inline u32 get_ibs_caps(void) { return 0; }
417*4882a593Smuzhiyun #endif
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun #ifdef CONFIG_PERF_EVENTS
420*4882a593Smuzhiyun extern void perf_events_lapic_init(void);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
424*4882a593Smuzhiyun * unused and ABI specified to be 0, so nobody should care what we do with
425*4882a593Smuzhiyun * them.
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * EXACT - the IP points to the exact instruction that triggered the
428*4882a593Smuzhiyun * event (HW bugs exempt).
429*4882a593Smuzhiyun * VM - original X86_VM_MASK; see set_linear_ip().
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun #define PERF_EFLAGS_EXACT (1UL << 3)
432*4882a593Smuzhiyun #define PERF_EFLAGS_VM (1UL << 5)
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun struct pt_regs;
435*4882a593Smuzhiyun struct x86_perf_regs {
436*4882a593Smuzhiyun struct pt_regs regs;
437*4882a593Smuzhiyun u64 *xmm_regs;
438*4882a593Smuzhiyun };
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
441*4882a593Smuzhiyun extern unsigned long perf_misc_flags(struct pt_regs *regs);
442*4882a593Smuzhiyun #define perf_misc_flags(regs) perf_misc_flags(regs)
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun #include <asm/stacktrace.h>
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
448*4882a593Smuzhiyun * and the comment with PERF_EFLAGS_EXACT.
449*4882a593Smuzhiyun */
450*4882a593Smuzhiyun #define perf_arch_fetch_caller_regs(regs, __ip) { \
451*4882a593Smuzhiyun (regs)->ip = (__ip); \
452*4882a593Smuzhiyun (regs)->sp = (unsigned long)__builtin_frame_address(0); \
453*4882a593Smuzhiyun (regs)->cs = __KERNEL_CS; \
454*4882a593Smuzhiyun regs->flags = 0; \
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun struct perf_guest_switch_msr {
458*4882a593Smuzhiyun unsigned msr;
459*4882a593Smuzhiyun u64 host, guest;
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun struct x86_pmu_lbr {
463*4882a593Smuzhiyun unsigned int nr;
464*4882a593Smuzhiyun unsigned int from;
465*4882a593Smuzhiyun unsigned int to;
466*4882a593Smuzhiyun unsigned int info;
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
470*4882a593Smuzhiyun extern void perf_check_microcode(void);
471*4882a593Smuzhiyun extern int x86_perf_rdpmc_index(struct perf_event *event);
472*4882a593Smuzhiyun #else
perf_get_x86_pmu_capability(struct x86_pmu_capability * cap)473*4882a593Smuzhiyun static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun memset(cap, 0, sizeof(*cap));
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
perf_events_lapic_init(void)478*4882a593Smuzhiyun static inline void perf_events_lapic_init(void) { }
perf_check_microcode(void)479*4882a593Smuzhiyun static inline void perf_check_microcode(void) { }
480*4882a593Smuzhiyun #endif
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
483*4882a593Smuzhiyun extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
484*4882a593Smuzhiyun extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
485*4882a593Smuzhiyun #else
perf_guest_get_msrs(int * nr)486*4882a593Smuzhiyun static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun *nr = 0;
489*4882a593Smuzhiyun return NULL;
490*4882a593Smuzhiyun }
x86_perf_get_lbr(struct x86_pmu_lbr * lbr)491*4882a593Smuzhiyun static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun return -1;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun #endif
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun #ifdef CONFIG_CPU_SUP_INTEL
498*4882a593Smuzhiyun extern void intel_pt_handle_vmx(int on);
499*4882a593Smuzhiyun #else
intel_pt_handle_vmx(int on)500*4882a593Smuzhiyun static inline void intel_pt_handle_vmx(int on)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun #endif
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
507*4882a593Smuzhiyun extern void amd_pmu_enable_virt(void);
508*4882a593Smuzhiyun extern void amd_pmu_disable_virt(void);
509*4882a593Smuzhiyun #else
amd_pmu_enable_virt(void)510*4882a593Smuzhiyun static inline void amd_pmu_enable_virt(void) { }
amd_pmu_disable_virt(void)511*4882a593Smuzhiyun static inline void amd_pmu_disable_virt(void) { }
512*4882a593Smuzhiyun #endif
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun #define arch_perf_out_copy_user copy_from_user_nmi
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun #endif /* _ASM_X86_PERF_EVENT_H */
517