1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/bitops.h>
3*4882a593Smuzhiyun #include <linux/types.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <asm/cpu_entry_area.h>
7*4882a593Smuzhiyun #include <asm/perf_event.h>
8*4882a593Smuzhiyun #include <asm/tlbflush.h>
9*4882a593Smuzhiyun #include <asm/insn.h>
10*4882a593Smuzhiyun #include <asm/io.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "../perf_event.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /* Waste a full page so it can be mapped into the cpu_entry_area */
15*4882a593Smuzhiyun DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* The size of a BTS record in bytes: */
18*4882a593Smuzhiyun #define BTS_RECORD_SIZE 24
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #define PEBS_FIXUP_SIZE PAGE_SIZE
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * pebs_record_32 for p4 and core not supported
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct pebs_record_32 {
26*4882a593Smuzhiyun u32 flags, ip;
27*4882a593Smuzhiyun u32 ax, bc, cx, dx;
28*4882a593Smuzhiyun u32 si, di, bp, sp;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun union intel_x86_pebs_dse {
34*4882a593Smuzhiyun u64 val;
35*4882a593Smuzhiyun struct {
36*4882a593Smuzhiyun unsigned int ld_dse:4;
37*4882a593Smuzhiyun unsigned int ld_stlb_miss:1;
38*4882a593Smuzhiyun unsigned int ld_locked:1;
39*4882a593Smuzhiyun unsigned int ld_reserved:26;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun struct {
42*4882a593Smuzhiyun unsigned int st_l1d_hit:1;
43*4882a593Smuzhiyun unsigned int st_reserved1:3;
44*4882a593Smuzhiyun unsigned int st_stlb_miss:1;
45*4882a593Smuzhiyun unsigned int st_locked:1;
46*4882a593Smuzhiyun unsigned int st_reserved2:26;
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * Map PEBS Load Latency Data Source encodings to generic
53*4882a593Smuzhiyun * memory data source information
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define P(a, b) PERF_MEM_S(a, b)
56*4882a593Smuzhiyun #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
57*4882a593Smuzhiyun #define LEVEL(x) P(LVLNUM, x)
58*4882a593Smuzhiyun #define REM P(REMOTE, REMOTE)
59*4882a593Smuzhiyun #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Version for Sandy Bridge and later */
62*4882a593Smuzhiyun static u64 pebs_data_source[] = {
63*4882a593Smuzhiyun P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
64*4882a593Smuzhiyun OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
65*4882a593Smuzhiyun OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
66*4882a593Smuzhiyun OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
67*4882a593Smuzhiyun OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
68*4882a593Smuzhiyun OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
69*4882a593Smuzhiyun OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
70*4882a593Smuzhiyun OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
71*4882a593Smuzhiyun OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
72*4882a593Smuzhiyun OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
73*4882a593Smuzhiyun OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
74*4882a593Smuzhiyun OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
75*4882a593Smuzhiyun OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
76*4882a593Smuzhiyun OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
77*4882a593Smuzhiyun OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
78*4882a593Smuzhiyun OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Patch up minor differences in the bits */
intel_pmu_pebs_data_source_nhm(void)82*4882a593Smuzhiyun void __init intel_pmu_pebs_data_source_nhm(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
85*4882a593Smuzhiyun pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
86*4882a593Smuzhiyun pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
intel_pmu_pebs_data_source_skl(bool pmem)89*4882a593Smuzhiyun void __init intel_pmu_pebs_data_source_skl(bool pmem)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun pebs_data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
94*4882a593Smuzhiyun pebs_data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
95*4882a593Smuzhiyun pebs_data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
96*4882a593Smuzhiyun pebs_data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
97*4882a593Smuzhiyun pebs_data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
precise_store_data(u64 status)100*4882a593Smuzhiyun static u64 precise_store_data(u64 status)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun union intel_x86_pebs_dse dse;
103*4882a593Smuzhiyun u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun dse.val = status;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun * bit 4: TLB access
109*4882a593Smuzhiyun * 1 = stored missed 2nd level TLB
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * so it either hit the walker or the OS
112*4882a593Smuzhiyun * otherwise hit 2nd level TLB
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun if (dse.st_stlb_miss)
115*4882a593Smuzhiyun val |= P(TLB, MISS);
116*4882a593Smuzhiyun else
117*4882a593Smuzhiyun val |= P(TLB, HIT);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * bit 0: hit L1 data cache
121*4882a593Smuzhiyun * if not set, then all we know is that
122*4882a593Smuzhiyun * it missed L1D
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun if (dse.st_l1d_hit)
125*4882a593Smuzhiyun val |= P(LVL, HIT);
126*4882a593Smuzhiyun else
127*4882a593Smuzhiyun val |= P(LVL, MISS);
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * bit 5: Locked prefix
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun if (dse.st_locked)
133*4882a593Smuzhiyun val |= P(LOCK, LOCKED);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun return val;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
precise_datala_hsw(struct perf_event * event,u64 status)138*4882a593Smuzhiyun static u64 precise_datala_hsw(struct perf_event *event, u64 status)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun union perf_mem_data_src dse;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun dse.val = PERF_MEM_NA;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
145*4882a593Smuzhiyun dse.mem_op = PERF_MEM_OP_STORE;
146*4882a593Smuzhiyun else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
147*4882a593Smuzhiyun dse.mem_op = PERF_MEM_OP_LOAD;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * L1 info only valid for following events:
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * MEM_UOPS_RETIRED.STLB_MISS_STORES
153*4882a593Smuzhiyun * MEM_UOPS_RETIRED.LOCK_STORES
154*4882a593Smuzhiyun * MEM_UOPS_RETIRED.SPLIT_STORES
155*4882a593Smuzhiyun * MEM_UOPS_RETIRED.ALL_STORES
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
158*4882a593Smuzhiyun if (status & 1)
159*4882a593Smuzhiyun dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
160*4882a593Smuzhiyun else
161*4882a593Smuzhiyun dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun return dse.val;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
load_latency_data(u64 status)166*4882a593Smuzhiyun static u64 load_latency_data(u64 status)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun union intel_x86_pebs_dse dse;
169*4882a593Smuzhiyun u64 val;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun dse.val = status;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * use the mapping table for bit 0-3
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun val = pebs_data_source[dse.ld_dse];
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Nehalem models do not support TLB, Lock infos
180*4882a593Smuzhiyun */
181*4882a593Smuzhiyun if (x86_pmu.pebs_no_tlb) {
182*4882a593Smuzhiyun val |= P(TLB, NA) | P(LOCK, NA);
183*4882a593Smuzhiyun return val;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * bit 4: TLB access
187*4882a593Smuzhiyun * 0 = did not miss 2nd level TLB
188*4882a593Smuzhiyun * 1 = missed 2nd level TLB
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun if (dse.ld_stlb_miss)
191*4882a593Smuzhiyun val |= P(TLB, MISS) | P(TLB, L2);
192*4882a593Smuzhiyun else
193*4882a593Smuzhiyun val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * bit 5: locked prefix
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun if (dse.ld_locked)
199*4882a593Smuzhiyun val |= P(LOCK, LOCKED);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun return val;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun struct pebs_record_core {
205*4882a593Smuzhiyun u64 flags, ip;
206*4882a593Smuzhiyun u64 ax, bx, cx, dx;
207*4882a593Smuzhiyun u64 si, di, bp, sp;
208*4882a593Smuzhiyun u64 r8, r9, r10, r11;
209*4882a593Smuzhiyun u64 r12, r13, r14, r15;
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun struct pebs_record_nhm {
213*4882a593Smuzhiyun u64 flags, ip;
214*4882a593Smuzhiyun u64 ax, bx, cx, dx;
215*4882a593Smuzhiyun u64 si, di, bp, sp;
216*4882a593Smuzhiyun u64 r8, r9, r10, r11;
217*4882a593Smuzhiyun u64 r12, r13, r14, r15;
218*4882a593Smuzhiyun u64 status, dla, dse, lat;
219*4882a593Smuzhiyun };
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * Same as pebs_record_nhm, with two additional fields.
223*4882a593Smuzhiyun */
224*4882a593Smuzhiyun struct pebs_record_hsw {
225*4882a593Smuzhiyun u64 flags, ip;
226*4882a593Smuzhiyun u64 ax, bx, cx, dx;
227*4882a593Smuzhiyun u64 si, di, bp, sp;
228*4882a593Smuzhiyun u64 r8, r9, r10, r11;
229*4882a593Smuzhiyun u64 r12, r13, r14, r15;
230*4882a593Smuzhiyun u64 status, dla, dse, lat;
231*4882a593Smuzhiyun u64 real_ip, tsx_tuning;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun union hsw_tsx_tuning {
235*4882a593Smuzhiyun struct {
236*4882a593Smuzhiyun u32 cycles_last_block : 32,
237*4882a593Smuzhiyun hle_abort : 1,
238*4882a593Smuzhiyun rtm_abort : 1,
239*4882a593Smuzhiyun instruction_abort : 1,
240*4882a593Smuzhiyun non_instruction_abort : 1,
241*4882a593Smuzhiyun retry : 1,
242*4882a593Smuzhiyun data_conflict : 1,
243*4882a593Smuzhiyun capacity_writes : 1,
244*4882a593Smuzhiyun capacity_reads : 1;
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun u64 value;
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Same as HSW, plus TSC */
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun struct pebs_record_skl {
254*4882a593Smuzhiyun u64 flags, ip;
255*4882a593Smuzhiyun u64 ax, bx, cx, dx;
256*4882a593Smuzhiyun u64 si, di, bp, sp;
257*4882a593Smuzhiyun u64 r8, r9, r10, r11;
258*4882a593Smuzhiyun u64 r12, r13, r14, r15;
259*4882a593Smuzhiyun u64 status, dla, dse, lat;
260*4882a593Smuzhiyun u64 real_ip, tsx_tuning;
261*4882a593Smuzhiyun u64 tsc;
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun
init_debug_store_on_cpu(int cpu)264*4882a593Smuzhiyun void init_debug_store_on_cpu(int cpu)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (!ds)
269*4882a593Smuzhiyun return;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
272*4882a593Smuzhiyun (u32)((u64)(unsigned long)ds),
273*4882a593Smuzhiyun (u32)((u64)(unsigned long)ds >> 32));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
fini_debug_store_on_cpu(int cpu)276*4882a593Smuzhiyun void fini_debug_store_on_cpu(int cpu)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun if (!per_cpu(cpu_hw_events, cpu).ds)
279*4882a593Smuzhiyun return;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun static DEFINE_PER_CPU(void *, insn_buffer);
285*4882a593Smuzhiyun
ds_update_cea(void * cea,void * addr,size_t size,pgprot_t prot)286*4882a593Smuzhiyun static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun unsigned long start = (unsigned long)cea;
289*4882a593Smuzhiyun phys_addr_t pa;
290*4882a593Smuzhiyun size_t msz = 0;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun pa = virt_to_phys(addr);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun preempt_disable();
295*4882a593Smuzhiyun for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
296*4882a593Smuzhiyun cea_set_pte(cea, pa, prot);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun * This is a cross-CPU update of the cpu_entry_area, we must shoot down
300*4882a593Smuzhiyun * all TLB entries for it.
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun flush_tlb_kernel_range(start, start + size);
303*4882a593Smuzhiyun preempt_enable();
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
ds_clear_cea(void * cea,size_t size)306*4882a593Smuzhiyun static void ds_clear_cea(void *cea, size_t size)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun unsigned long start = (unsigned long)cea;
309*4882a593Smuzhiyun size_t msz = 0;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun preempt_disable();
312*4882a593Smuzhiyun for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
313*4882a593Smuzhiyun cea_set_pte(cea, 0, PAGE_NONE);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun flush_tlb_kernel_range(start, start + size);
316*4882a593Smuzhiyun preempt_enable();
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
dsalloc_pages(size_t size,gfp_t flags,int cpu)319*4882a593Smuzhiyun static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun unsigned int order = get_order(size);
322*4882a593Smuzhiyun int node = cpu_to_node(cpu);
323*4882a593Smuzhiyun struct page *page;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
326*4882a593Smuzhiyun return page ? page_address(page) : NULL;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
dsfree_pages(const void * buffer,size_t size)329*4882a593Smuzhiyun static void dsfree_pages(const void *buffer, size_t size)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun if (buffer)
332*4882a593Smuzhiyun free_pages((unsigned long)buffer, get_order(size));
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
alloc_pebs_buffer(int cpu)335*4882a593Smuzhiyun static int alloc_pebs_buffer(int cpu)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
338*4882a593Smuzhiyun struct debug_store *ds = hwev->ds;
339*4882a593Smuzhiyun size_t bsiz = x86_pmu.pebs_buffer_size;
340*4882a593Smuzhiyun int max, node = cpu_to_node(cpu);
341*4882a593Smuzhiyun void *buffer, *insn_buff, *cea;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (!x86_pmu.pebs)
344*4882a593Smuzhiyun return 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
347*4882a593Smuzhiyun if (unlikely(!buffer))
348*4882a593Smuzhiyun return -ENOMEM;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * HSW+ already provides us the eventing ip; no need to allocate this
352*4882a593Smuzhiyun * buffer then.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format < 2) {
355*4882a593Smuzhiyun insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
356*4882a593Smuzhiyun if (!insn_buff) {
357*4882a593Smuzhiyun dsfree_pages(buffer, bsiz);
358*4882a593Smuzhiyun return -ENOMEM;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun per_cpu(insn_buffer, cpu) = insn_buff;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun hwev->ds_pebs_vaddr = buffer;
363*4882a593Smuzhiyun /* Update the cpu entry area mapping */
364*4882a593Smuzhiyun cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
365*4882a593Smuzhiyun ds->pebs_buffer_base = (unsigned long) cea;
366*4882a593Smuzhiyun ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
367*4882a593Smuzhiyun ds->pebs_index = ds->pebs_buffer_base;
368*4882a593Smuzhiyun max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
369*4882a593Smuzhiyun ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
release_pebs_buffer(int cpu)373*4882a593Smuzhiyun static void release_pebs_buffer(int cpu)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
376*4882a593Smuzhiyun void *cea;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (!x86_pmu.pebs)
379*4882a593Smuzhiyun return;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun kfree(per_cpu(insn_buffer, cpu));
382*4882a593Smuzhiyun per_cpu(insn_buffer, cpu) = NULL;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun /* Clear the fixmap */
385*4882a593Smuzhiyun cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
386*4882a593Smuzhiyun ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
387*4882a593Smuzhiyun dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
388*4882a593Smuzhiyun hwev->ds_pebs_vaddr = NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
alloc_bts_buffer(int cpu)391*4882a593Smuzhiyun static int alloc_bts_buffer(int cpu)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
394*4882a593Smuzhiyun struct debug_store *ds = hwev->ds;
395*4882a593Smuzhiyun void *buffer, *cea;
396*4882a593Smuzhiyun int max;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (!x86_pmu.bts)
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
402*4882a593Smuzhiyun if (unlikely(!buffer)) {
403*4882a593Smuzhiyun WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
404*4882a593Smuzhiyun return -ENOMEM;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun hwev->ds_bts_vaddr = buffer;
407*4882a593Smuzhiyun /* Update the fixmap */
408*4882a593Smuzhiyun cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
409*4882a593Smuzhiyun ds->bts_buffer_base = (unsigned long) cea;
410*4882a593Smuzhiyun ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
411*4882a593Smuzhiyun ds->bts_index = ds->bts_buffer_base;
412*4882a593Smuzhiyun max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
413*4882a593Smuzhiyun ds->bts_absolute_maximum = ds->bts_buffer_base +
414*4882a593Smuzhiyun max * BTS_RECORD_SIZE;
415*4882a593Smuzhiyun ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
416*4882a593Smuzhiyun (max / 16) * BTS_RECORD_SIZE;
417*4882a593Smuzhiyun return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
release_bts_buffer(int cpu)420*4882a593Smuzhiyun static void release_bts_buffer(int cpu)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
423*4882a593Smuzhiyun void *cea;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (!x86_pmu.bts)
426*4882a593Smuzhiyun return;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Clear the fixmap */
429*4882a593Smuzhiyun cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
430*4882a593Smuzhiyun ds_clear_cea(cea, BTS_BUFFER_SIZE);
431*4882a593Smuzhiyun dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
432*4882a593Smuzhiyun hwev->ds_bts_vaddr = NULL;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
alloc_ds_buffer(int cpu)435*4882a593Smuzhiyun static int alloc_ds_buffer(int cpu)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun memset(ds, 0, sizeof(*ds));
440*4882a593Smuzhiyun per_cpu(cpu_hw_events, cpu).ds = ds;
441*4882a593Smuzhiyun return 0;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
release_ds_buffer(int cpu)444*4882a593Smuzhiyun static void release_ds_buffer(int cpu)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun per_cpu(cpu_hw_events, cpu).ds = NULL;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
release_ds_buffers(void)449*4882a593Smuzhiyun void release_ds_buffers(void)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun int cpu;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (!x86_pmu.bts && !x86_pmu.pebs)
454*4882a593Smuzhiyun return;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun for_each_possible_cpu(cpu)
457*4882a593Smuzhiyun release_ds_buffer(cpu);
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Again, ignore errors from offline CPUs, they will no longer
462*4882a593Smuzhiyun * observe cpu_hw_events.ds and not program the DS_AREA when
463*4882a593Smuzhiyun * they come up.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun fini_debug_store_on_cpu(cpu);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
469*4882a593Smuzhiyun release_pebs_buffer(cpu);
470*4882a593Smuzhiyun release_bts_buffer(cpu);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
reserve_ds_buffers(void)474*4882a593Smuzhiyun void reserve_ds_buffers(void)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun int bts_err = 0, pebs_err = 0;
477*4882a593Smuzhiyun int cpu;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun x86_pmu.bts_active = 0;
480*4882a593Smuzhiyun x86_pmu.pebs_active = 0;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (!x86_pmu.bts && !x86_pmu.pebs)
483*4882a593Smuzhiyun return;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (!x86_pmu.bts)
486*4882a593Smuzhiyun bts_err = 1;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun if (!x86_pmu.pebs)
489*4882a593Smuzhiyun pebs_err = 1;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
492*4882a593Smuzhiyun if (alloc_ds_buffer(cpu)) {
493*4882a593Smuzhiyun bts_err = 1;
494*4882a593Smuzhiyun pebs_err = 1;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (!bts_err && alloc_bts_buffer(cpu))
498*4882a593Smuzhiyun bts_err = 1;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!pebs_err && alloc_pebs_buffer(cpu))
501*4882a593Smuzhiyun pebs_err = 1;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (bts_err && pebs_err)
504*4882a593Smuzhiyun break;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (bts_err) {
508*4882a593Smuzhiyun for_each_possible_cpu(cpu)
509*4882a593Smuzhiyun release_bts_buffer(cpu);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (pebs_err) {
513*4882a593Smuzhiyun for_each_possible_cpu(cpu)
514*4882a593Smuzhiyun release_pebs_buffer(cpu);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (bts_err && pebs_err) {
518*4882a593Smuzhiyun for_each_possible_cpu(cpu)
519*4882a593Smuzhiyun release_ds_buffer(cpu);
520*4882a593Smuzhiyun } else {
521*4882a593Smuzhiyun if (x86_pmu.bts && !bts_err)
522*4882a593Smuzhiyun x86_pmu.bts_active = 1;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (x86_pmu.pebs && !pebs_err)
525*4882a593Smuzhiyun x86_pmu.pebs_active = 1;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
528*4882a593Smuzhiyun /*
529*4882a593Smuzhiyun * Ignores wrmsr_on_cpu() errors for offline CPUs they
530*4882a593Smuzhiyun * will get this call through intel_pmu_cpu_starting().
531*4882a593Smuzhiyun */
532*4882a593Smuzhiyun init_debug_store_on_cpu(cpu);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /*
538*4882a593Smuzhiyun * BTS
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun struct event_constraint bts_constraint =
542*4882a593Smuzhiyun EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
543*4882a593Smuzhiyun
intel_pmu_enable_bts(u64 config)544*4882a593Smuzhiyun void intel_pmu_enable_bts(u64 config)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun unsigned long debugctlmsr;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun debugctlmsr = get_debugctlmsr();
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun debugctlmsr |= DEBUGCTLMSR_TR;
551*4882a593Smuzhiyun debugctlmsr |= DEBUGCTLMSR_BTS;
552*4882a593Smuzhiyun if (config & ARCH_PERFMON_EVENTSEL_INT)
553*4882a593Smuzhiyun debugctlmsr |= DEBUGCTLMSR_BTINT;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (!(config & ARCH_PERFMON_EVENTSEL_OS))
556*4882a593Smuzhiyun debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (!(config & ARCH_PERFMON_EVENTSEL_USR))
559*4882a593Smuzhiyun debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun update_debugctlmsr(debugctlmsr);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
intel_pmu_disable_bts(void)564*4882a593Smuzhiyun void intel_pmu_disable_bts(void)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
567*4882a593Smuzhiyun unsigned long debugctlmsr;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (!cpuc->ds)
570*4882a593Smuzhiyun return;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun debugctlmsr = get_debugctlmsr();
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun debugctlmsr &=
575*4882a593Smuzhiyun ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
576*4882a593Smuzhiyun DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun update_debugctlmsr(debugctlmsr);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
intel_pmu_drain_bts_buffer(void)581*4882a593Smuzhiyun int intel_pmu_drain_bts_buffer(void)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
584*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
585*4882a593Smuzhiyun struct bts_record {
586*4882a593Smuzhiyun u64 from;
587*4882a593Smuzhiyun u64 to;
588*4882a593Smuzhiyun u64 flags;
589*4882a593Smuzhiyun };
590*4882a593Smuzhiyun struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
591*4882a593Smuzhiyun struct bts_record *at, *base, *top;
592*4882a593Smuzhiyun struct perf_output_handle handle;
593*4882a593Smuzhiyun struct perf_event_header header;
594*4882a593Smuzhiyun struct perf_sample_data data;
595*4882a593Smuzhiyun unsigned long skip = 0;
596*4882a593Smuzhiyun struct pt_regs regs;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun if (!event)
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (!x86_pmu.bts_active)
602*4882a593Smuzhiyun return 0;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
605*4882a593Smuzhiyun top = (struct bts_record *)(unsigned long)ds->bts_index;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (top <= base)
608*4882a593Smuzhiyun return 0;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun memset(®s, 0, sizeof(regs));
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun ds->bts_index = ds->bts_buffer_base;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun perf_sample_data_init(&data, 0, event->hw.last_period);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /*
617*4882a593Smuzhiyun * BTS leaks kernel addresses in branches across the cpl boundary,
618*4882a593Smuzhiyun * such as traps or system calls, so unless the user is asking for
619*4882a593Smuzhiyun * kernel tracing (and right now it's not possible), we'd need to
620*4882a593Smuzhiyun * filter them out. But first we need to count how many of those we
621*4882a593Smuzhiyun * have in the current batch. This is an extra O(n) pass, however,
622*4882a593Smuzhiyun * it's much faster than the other one especially considering that
623*4882a593Smuzhiyun * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
624*4882a593Smuzhiyun * alloc_bts_buffer()).
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun for (at = base; at < top; at++) {
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * Note that right now *this* BTS code only works if
629*4882a593Smuzhiyun * attr::exclude_kernel is set, but let's keep this extra
630*4882a593Smuzhiyun * check here in case that changes.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun if (event->attr.exclude_kernel &&
633*4882a593Smuzhiyun (kernel_ip(at->from) || kernel_ip(at->to)))
634*4882a593Smuzhiyun skip++;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * Prepare a generic sample, i.e. fill in the invariant fields.
639*4882a593Smuzhiyun * We will overwrite the from and to address before we output
640*4882a593Smuzhiyun * the sample.
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun rcu_read_lock();
643*4882a593Smuzhiyun perf_prepare_sample(&header, &data, event, ®s);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (perf_output_begin(&handle, &data, event,
646*4882a593Smuzhiyun header.size * (top - base - skip)))
647*4882a593Smuzhiyun goto unlock;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun for (at = base; at < top; at++) {
650*4882a593Smuzhiyun /* Filter out any records that contain kernel addresses. */
651*4882a593Smuzhiyun if (event->attr.exclude_kernel &&
652*4882a593Smuzhiyun (kernel_ip(at->from) || kernel_ip(at->to)))
653*4882a593Smuzhiyun continue;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun data.ip = at->from;
656*4882a593Smuzhiyun data.addr = at->to;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun perf_output_sample(&handle, &header, &data, event);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun perf_output_end(&handle);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /* There's new data available. */
664*4882a593Smuzhiyun event->hw.interrupts++;
665*4882a593Smuzhiyun event->pending_kill = POLL_IN;
666*4882a593Smuzhiyun unlock:
667*4882a593Smuzhiyun rcu_read_unlock();
668*4882a593Smuzhiyun return 1;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
intel_pmu_drain_pebs_buffer(void)671*4882a593Smuzhiyun static inline void intel_pmu_drain_pebs_buffer(void)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct perf_sample_data data;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun x86_pmu.drain_pebs(NULL, &data);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * PEBS
680*4882a593Smuzhiyun */
681*4882a593Smuzhiyun struct event_constraint intel_core2_pebs_event_constraints[] = {
682*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
683*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
684*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
685*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
686*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
687*4882a593Smuzhiyun /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
688*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
689*4882a593Smuzhiyun EVENT_CONSTRAINT_END
690*4882a593Smuzhiyun };
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun struct event_constraint intel_atom_pebs_event_constraints[] = {
693*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
694*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
695*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
696*4882a593Smuzhiyun /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
697*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
698*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
699*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
700*4882a593Smuzhiyun EVENT_CONSTRAINT_END
701*4882a593Smuzhiyun };
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun struct event_constraint intel_slm_pebs_event_constraints[] = {
704*4882a593Smuzhiyun /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
705*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
706*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
707*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
708*4882a593Smuzhiyun EVENT_CONSTRAINT_END
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun struct event_constraint intel_glm_pebs_event_constraints[] = {
712*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
713*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
714*4882a593Smuzhiyun EVENT_CONSTRAINT_END
715*4882a593Smuzhiyun };
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun struct event_constraint intel_nehalem_pebs_event_constraints[] = {
718*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
719*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
720*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
721*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
722*4882a593Smuzhiyun INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
723*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
724*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
725*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
726*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
727*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
728*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
729*4882a593Smuzhiyun /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
730*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
731*4882a593Smuzhiyun EVENT_CONSTRAINT_END
732*4882a593Smuzhiyun };
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun struct event_constraint intel_westmere_pebs_event_constraints[] = {
735*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
736*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
737*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
738*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
739*4882a593Smuzhiyun INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
740*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
741*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
742*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
743*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
744*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
745*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
746*4882a593Smuzhiyun /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
747*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
748*4882a593Smuzhiyun EVENT_CONSTRAINT_END
749*4882a593Smuzhiyun };
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun struct event_constraint intel_snb_pebs_event_constraints[] = {
752*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
753*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
754*4882a593Smuzhiyun INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
755*4882a593Smuzhiyun /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
756*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
757*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
758*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
759*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
760*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
761*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
762*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
763*4882a593Smuzhiyun EVENT_CONSTRAINT_END
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun struct event_constraint intel_ivb_pebs_event_constraints[] = {
767*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
768*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
769*4882a593Smuzhiyun INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
770*4882a593Smuzhiyun /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
771*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
772*4882a593Smuzhiyun /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
773*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
774*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
775*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
776*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
777*4882a593Smuzhiyun INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
778*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
779*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
780*4882a593Smuzhiyun EVENT_CONSTRAINT_END
781*4882a593Smuzhiyun };
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun struct event_constraint intel_hsw_pebs_event_constraints[] = {
784*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
785*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
786*4882a593Smuzhiyun /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
787*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
788*4882a593Smuzhiyun /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
789*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
790*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
791*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
792*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
793*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
794*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
795*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
796*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
797*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
798*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
799*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
800*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
801*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
802*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
803*4882a593Smuzhiyun EVENT_CONSTRAINT_END
804*4882a593Smuzhiyun };
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun struct event_constraint intel_bdw_pebs_event_constraints[] = {
807*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
808*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
809*4882a593Smuzhiyun /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
810*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
811*4882a593Smuzhiyun /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
812*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
813*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
814*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
815*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
816*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
817*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
818*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
819*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
820*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
821*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
822*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
823*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
824*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
825*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
826*4882a593Smuzhiyun EVENT_CONSTRAINT_END
827*4882a593Smuzhiyun };
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun struct event_constraint intel_skl_pebs_event_constraints[] = {
831*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
832*4882a593Smuzhiyun /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
833*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
834*4882a593Smuzhiyun /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
835*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
836*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
837*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
838*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
839*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
840*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
841*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
842*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
843*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
844*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
845*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
846*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
847*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
848*4882a593Smuzhiyun /* Allow all events as PEBS with no flags */
849*4882a593Smuzhiyun INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
850*4882a593Smuzhiyun EVENT_CONSTRAINT_END
851*4882a593Smuzhiyun };
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun struct event_constraint intel_icl_pebs_event_constraints[] = {
854*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
855*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
858*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
859*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
860*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
861*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
862*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
863*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
864*4882a593Smuzhiyun INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /*
871*4882a593Smuzhiyun * Everything else is handled by PMU_FL_PEBS_ALL, because we
872*4882a593Smuzhiyun * need the full constraints from the main table.
873*4882a593Smuzhiyun */
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun EVENT_CONSTRAINT_END
876*4882a593Smuzhiyun };
877*4882a593Smuzhiyun
intel_pebs_constraints(struct perf_event * event)878*4882a593Smuzhiyun struct event_constraint *intel_pebs_constraints(struct perf_event *event)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun struct event_constraint *c;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (!event->attr.precise_ip)
883*4882a593Smuzhiyun return NULL;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (x86_pmu.pebs_constraints) {
886*4882a593Smuzhiyun for_each_event_constraint(c, x86_pmu.pebs_constraints) {
887*4882a593Smuzhiyun if (constraint_match(c, event->hw.config)) {
888*4882a593Smuzhiyun event->hw.flags |= c->flags;
889*4882a593Smuzhiyun return c;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /*
895*4882a593Smuzhiyun * Extended PEBS support
896*4882a593Smuzhiyun * Makes the PEBS code search the normal constraints.
897*4882a593Smuzhiyun */
898*4882a593Smuzhiyun if (x86_pmu.flags & PMU_FL_PEBS_ALL)
899*4882a593Smuzhiyun return NULL;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun return &emptyconstraint;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /*
905*4882a593Smuzhiyun * We need the sched_task callback even for per-cpu events when we use
906*4882a593Smuzhiyun * the large interrupt threshold, such that we can provide PID and TID
907*4882a593Smuzhiyun * to PEBS samples.
908*4882a593Smuzhiyun */
pebs_needs_sched_cb(struct cpu_hw_events * cpuc)909*4882a593Smuzhiyun static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
912*4882a593Smuzhiyun return false;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
intel_pmu_pebs_sched_task(struct perf_event_context * ctx,bool sched_in)917*4882a593Smuzhiyun void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (!sched_in && pebs_needs_sched_cb(cpuc))
922*4882a593Smuzhiyun intel_pmu_drain_pebs_buffer();
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
pebs_update_threshold(struct cpu_hw_events * cpuc)925*4882a593Smuzhiyun static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
928*4882a593Smuzhiyun u64 threshold;
929*4882a593Smuzhiyun int reserved;
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (cpuc->n_pebs_via_pt)
932*4882a593Smuzhiyun return;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun if (x86_pmu.flags & PMU_FL_PEBS_ALL)
935*4882a593Smuzhiyun reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
936*4882a593Smuzhiyun else
937*4882a593Smuzhiyun reserved = x86_pmu.max_pebs_events;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (cpuc->n_pebs == cpuc->n_large_pebs) {
940*4882a593Smuzhiyun threshold = ds->pebs_absolute_maximum -
941*4882a593Smuzhiyun reserved * cpuc->pebs_record_size;
942*4882a593Smuzhiyun } else {
943*4882a593Smuzhiyun threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun ds->pebs_interrupt_threshold = threshold;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
adaptive_pebs_record_size_update(void)949*4882a593Smuzhiyun static void adaptive_pebs_record_size_update(void)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
952*4882a593Smuzhiyun u64 pebs_data_cfg = cpuc->pebs_data_cfg;
953*4882a593Smuzhiyun int sz = sizeof(struct pebs_basic);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
956*4882a593Smuzhiyun sz += sizeof(struct pebs_meminfo);
957*4882a593Smuzhiyun if (pebs_data_cfg & PEBS_DATACFG_GP)
958*4882a593Smuzhiyun sz += sizeof(struct pebs_gprs);
959*4882a593Smuzhiyun if (pebs_data_cfg & PEBS_DATACFG_XMMS)
960*4882a593Smuzhiyun sz += sizeof(struct pebs_xmm);
961*4882a593Smuzhiyun if (pebs_data_cfg & PEBS_DATACFG_LBRS)
962*4882a593Smuzhiyun sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun cpuc->pebs_record_size = sz;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
968*4882a593Smuzhiyun PERF_SAMPLE_PHYS_ADDR | PERF_SAMPLE_WEIGHT | \
969*4882a593Smuzhiyun PERF_SAMPLE_TRANSACTION)
970*4882a593Smuzhiyun
pebs_update_adaptive_cfg(struct perf_event * event)971*4882a593Smuzhiyun static u64 pebs_update_adaptive_cfg(struct perf_event *event)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct perf_event_attr *attr = &event->attr;
974*4882a593Smuzhiyun u64 sample_type = attr->sample_type;
975*4882a593Smuzhiyun u64 pebs_data_cfg = 0;
976*4882a593Smuzhiyun bool gprs, tsx_weight;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
979*4882a593Smuzhiyun attr->precise_ip > 1)
980*4882a593Smuzhiyun return pebs_data_cfg;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun if (sample_type & PERF_PEBS_MEMINFO_TYPE)
983*4882a593Smuzhiyun pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /*
986*4882a593Smuzhiyun * We need GPRs when:
987*4882a593Smuzhiyun * + user requested them
988*4882a593Smuzhiyun * + precise_ip < 2 for the non event IP
989*4882a593Smuzhiyun * + For RTM TSX weight we need GPRs for the abort code.
990*4882a593Smuzhiyun */
991*4882a593Smuzhiyun gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
992*4882a593Smuzhiyun (attr->sample_regs_intr & PEBS_GP_REGS);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT) &&
995*4882a593Smuzhiyun ((attr->config & INTEL_ARCH_EVENT_MASK) ==
996*4882a593Smuzhiyun x86_pmu.rtm_abort_event);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (gprs || (attr->precise_ip < 2) || tsx_weight)
999*4882a593Smuzhiyun pebs_data_cfg |= PEBS_DATACFG_GP;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
1002*4882a593Smuzhiyun (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
1003*4882a593Smuzhiyun pebs_data_cfg |= PEBS_DATACFG_XMMS;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1006*4882a593Smuzhiyun /*
1007*4882a593Smuzhiyun * For now always log all LBRs. Could configure this
1008*4882a593Smuzhiyun * later.
1009*4882a593Smuzhiyun */
1010*4882a593Smuzhiyun pebs_data_cfg |= PEBS_DATACFG_LBRS |
1011*4882a593Smuzhiyun ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun return pebs_data_cfg;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun static void
pebs_update_state(bool needed_cb,struct cpu_hw_events * cpuc,struct perf_event * event,bool add)1018*4882a593Smuzhiyun pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1019*4882a593Smuzhiyun struct perf_event *event, bool add)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct pmu *pmu = event->ctx->pmu;
1022*4882a593Smuzhiyun /*
1023*4882a593Smuzhiyun * Make sure we get updated with the first PEBS
1024*4882a593Smuzhiyun * event. It will trigger also during removal, but
1025*4882a593Smuzhiyun * that does not hurt:
1026*4882a593Smuzhiyun */
1027*4882a593Smuzhiyun bool update = cpuc->n_pebs == 1;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1030*4882a593Smuzhiyun if (!needed_cb)
1031*4882a593Smuzhiyun perf_sched_cb_inc(pmu);
1032*4882a593Smuzhiyun else
1033*4882a593Smuzhiyun perf_sched_cb_dec(pmu);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun update = true;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /*
1039*4882a593Smuzhiyun * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1040*4882a593Smuzhiyun * iterating all remaining PEBS events to reconstruct the config.
1041*4882a593Smuzhiyun */
1042*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_baseline && add) {
1043*4882a593Smuzhiyun u64 pebs_data_cfg;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1046*4882a593Smuzhiyun if (cpuc->n_pebs == 1) {
1047*4882a593Smuzhiyun cpuc->pebs_data_cfg = 0;
1048*4882a593Smuzhiyun cpuc->pebs_record_size = sizeof(struct pebs_basic);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun pebs_data_cfg = pebs_update_adaptive_cfg(event);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* Update pebs_record_size if new event requires more data. */
1054*4882a593Smuzhiyun if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
1055*4882a593Smuzhiyun cpuc->pebs_data_cfg |= pebs_data_cfg;
1056*4882a593Smuzhiyun adaptive_pebs_record_size_update();
1057*4882a593Smuzhiyun update = true;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun if (update)
1062*4882a593Smuzhiyun pebs_update_threshold(cpuc);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
intel_pmu_pebs_add(struct perf_event * event)1065*4882a593Smuzhiyun void intel_pmu_pebs_add(struct perf_event *event)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1068*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1069*4882a593Smuzhiyun bool needed_cb = pebs_needs_sched_cb(cpuc);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun cpuc->n_pebs++;
1072*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1073*4882a593Smuzhiyun cpuc->n_large_pebs++;
1074*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1075*4882a593Smuzhiyun cpuc->n_pebs_via_pt++;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun pebs_update_state(needed_cb, cpuc, event, true);
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun
intel_pmu_pebs_via_pt_disable(struct perf_event * event)1080*4882a593Smuzhiyun static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (!is_pebs_pt(event))
1085*4882a593Smuzhiyun return;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
1088*4882a593Smuzhiyun cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun
intel_pmu_pebs_via_pt_enable(struct perf_event * event)1091*4882a593Smuzhiyun static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1094*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1095*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun if (!is_pebs_pt(event))
1098*4882a593Smuzhiyun return;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
1101*4882a593Smuzhiyun cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
intel_pmu_pebs_enable(struct perf_event * event)1108*4882a593Smuzhiyun void intel_pmu_pebs_enable(struct perf_event *event)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1111*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1112*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun cpuc->pebs_enabled |= 1ULL << hwc->idx;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1119*4882a593Smuzhiyun cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1120*4882a593Smuzhiyun else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1121*4882a593Smuzhiyun cpuc->pebs_enabled |= 1ULL << 63;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_baseline) {
1124*4882a593Smuzhiyun hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1125*4882a593Smuzhiyun if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1126*4882a593Smuzhiyun wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
1127*4882a593Smuzhiyun cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun * Use auto-reload if possible to save a MSR write in the PMI.
1133*4882a593Smuzhiyun * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1134*4882a593Smuzhiyun */
1135*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1136*4882a593Smuzhiyun unsigned int idx = hwc->idx;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun if (idx >= INTEL_PMC_IDX_FIXED)
1139*4882a593Smuzhiyun idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1140*4882a593Smuzhiyun ds->pebs_event_reset[idx] =
1141*4882a593Smuzhiyun (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1142*4882a593Smuzhiyun } else {
1143*4882a593Smuzhiyun ds->pebs_event_reset[hwc->idx] = 0;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun intel_pmu_pebs_via_pt_enable(event);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
intel_pmu_pebs_del(struct perf_event * event)1149*4882a593Smuzhiyun void intel_pmu_pebs_del(struct perf_event *event)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1152*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1153*4882a593Smuzhiyun bool needed_cb = pebs_needs_sched_cb(cpuc);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun cpuc->n_pebs--;
1156*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1157*4882a593Smuzhiyun cpuc->n_large_pebs--;
1158*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1159*4882a593Smuzhiyun cpuc->n_pebs_via_pt--;
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun pebs_update_state(needed_cb, cpuc, event, false);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
intel_pmu_pebs_disable(struct perf_event * event)1164*4882a593Smuzhiyun void intel_pmu_pebs_disable(struct perf_event *event)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1167*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (cpuc->n_pebs == cpuc->n_large_pebs &&
1170*4882a593Smuzhiyun cpuc->n_pebs != cpuc->n_pebs_via_pt)
1171*4882a593Smuzhiyun intel_pmu_drain_pebs_buffer();
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1176*4882a593Smuzhiyun (x86_pmu.version < 5))
1177*4882a593Smuzhiyun cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1178*4882a593Smuzhiyun else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1179*4882a593Smuzhiyun cpuc->pebs_enabled &= ~(1ULL << 63);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun intel_pmu_pebs_via_pt_disable(event);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun if (cpuc->enabled)
1184*4882a593Smuzhiyun wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun
intel_pmu_pebs_enable_all(void)1189*4882a593Smuzhiyun void intel_pmu_pebs_enable_all(void)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (cpuc->pebs_enabled)
1194*4882a593Smuzhiyun wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
intel_pmu_pebs_disable_all(void)1197*4882a593Smuzhiyun void intel_pmu_pebs_disable_all(void)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun if (cpuc->pebs_enabled)
1202*4882a593Smuzhiyun wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
intel_pmu_pebs_fixup_ip(struct pt_regs * regs)1205*4882a593Smuzhiyun static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1208*4882a593Smuzhiyun unsigned long from = cpuc->lbr_entries[0].from;
1209*4882a593Smuzhiyun unsigned long old_to, to = cpuc->lbr_entries[0].to;
1210*4882a593Smuzhiyun unsigned long ip = regs->ip;
1211*4882a593Smuzhiyun int is_64bit = 0;
1212*4882a593Smuzhiyun void *kaddr;
1213*4882a593Smuzhiyun int size;
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun * We don't need to fixup if the PEBS assist is fault like
1217*4882a593Smuzhiyun */
1218*4882a593Smuzhiyun if (!x86_pmu.intel_cap.pebs_trap)
1219*4882a593Smuzhiyun return 1;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun /*
1222*4882a593Smuzhiyun * No LBR entry, no basic block, no rewinding
1223*4882a593Smuzhiyun */
1224*4882a593Smuzhiyun if (!cpuc->lbr_stack.nr || !from || !to)
1225*4882a593Smuzhiyun return 0;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /*
1228*4882a593Smuzhiyun * Basic blocks should never cross user/kernel boundaries
1229*4882a593Smuzhiyun */
1230*4882a593Smuzhiyun if (kernel_ip(ip) != kernel_ip(to))
1231*4882a593Smuzhiyun return 0;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun /*
1234*4882a593Smuzhiyun * unsigned math, either ip is before the start (impossible) or
1235*4882a593Smuzhiyun * the basic block is larger than 1 page (sanity)
1236*4882a593Smuzhiyun */
1237*4882a593Smuzhiyun if ((ip - to) > PEBS_FIXUP_SIZE)
1238*4882a593Smuzhiyun return 0;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*
1241*4882a593Smuzhiyun * We sampled a branch insn, rewind using the LBR stack
1242*4882a593Smuzhiyun */
1243*4882a593Smuzhiyun if (ip == to) {
1244*4882a593Smuzhiyun set_linear_ip(regs, from);
1245*4882a593Smuzhiyun return 1;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun size = ip - to;
1249*4882a593Smuzhiyun if (!kernel_ip(ip)) {
1250*4882a593Smuzhiyun int bytes;
1251*4882a593Smuzhiyun u8 *buf = this_cpu_read(insn_buffer);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun /* 'size' must fit our buffer, see above */
1254*4882a593Smuzhiyun bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1255*4882a593Smuzhiyun if (bytes != 0)
1256*4882a593Smuzhiyun return 0;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun kaddr = buf;
1259*4882a593Smuzhiyun } else {
1260*4882a593Smuzhiyun kaddr = (void *)to;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun do {
1264*4882a593Smuzhiyun struct insn insn;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun old_to = to;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun #ifdef CONFIG_X86_64
1269*4882a593Smuzhiyun is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
1270*4882a593Smuzhiyun #endif
1271*4882a593Smuzhiyun insn_init(&insn, kaddr, size, is_64bit);
1272*4882a593Smuzhiyun insn_get_length(&insn);
1273*4882a593Smuzhiyun /*
1274*4882a593Smuzhiyun * Make sure there was not a problem decoding the
1275*4882a593Smuzhiyun * instruction and getting the length. This is
1276*4882a593Smuzhiyun * doubly important because we have an infinite
1277*4882a593Smuzhiyun * loop if insn.length=0.
1278*4882a593Smuzhiyun */
1279*4882a593Smuzhiyun if (!insn.length)
1280*4882a593Smuzhiyun break;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun to += insn.length;
1283*4882a593Smuzhiyun kaddr += insn.length;
1284*4882a593Smuzhiyun size -= insn.length;
1285*4882a593Smuzhiyun } while (to < ip);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (to == ip) {
1288*4882a593Smuzhiyun set_linear_ip(regs, old_to);
1289*4882a593Smuzhiyun return 1;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * Even though we decoded the basic block, the instruction stream
1294*4882a593Smuzhiyun * never matched the given IP, either the TO or the IP got corrupted.
1295*4882a593Smuzhiyun */
1296*4882a593Smuzhiyun return 0;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
intel_get_tsx_weight(u64 tsx_tuning)1299*4882a593Smuzhiyun static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun if (tsx_tuning) {
1302*4882a593Smuzhiyun union hsw_tsx_tuning tsx = { .value = tsx_tuning };
1303*4882a593Smuzhiyun return tsx.cycles_last_block;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun return 0;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
intel_get_tsx_transaction(u64 tsx_tuning,u64 ax)1308*4882a593Smuzhiyun static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
1309*4882a593Smuzhiyun {
1310*4882a593Smuzhiyun u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /* For RTM XABORTs also log the abort code from AX */
1313*4882a593Smuzhiyun if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
1314*4882a593Smuzhiyun txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1315*4882a593Smuzhiyun return txn;
1316*4882a593Smuzhiyun }
1317*4882a593Smuzhiyun
get_pebs_status(void * n)1318*4882a593Smuzhiyun static inline u64 get_pebs_status(void *n)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format < 4)
1321*4882a593Smuzhiyun return ((struct pebs_record_nhm *)n)->status;
1322*4882a593Smuzhiyun return ((struct pebs_basic *)n)->applicable_counters;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun #define PERF_X86_EVENT_PEBS_HSW_PREC \
1326*4882a593Smuzhiyun (PERF_X86_EVENT_PEBS_ST_HSW | \
1327*4882a593Smuzhiyun PERF_X86_EVENT_PEBS_LD_HSW | \
1328*4882a593Smuzhiyun PERF_X86_EVENT_PEBS_NA_HSW)
1329*4882a593Smuzhiyun
get_data_src(struct perf_event * event,u64 aux)1330*4882a593Smuzhiyun static u64 get_data_src(struct perf_event *event, u64 aux)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun u64 val = PERF_MEM_NA;
1333*4882a593Smuzhiyun int fl = event->hw.flags;
1334*4882a593Smuzhiyun bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1337*4882a593Smuzhiyun val = load_latency_data(aux);
1338*4882a593Smuzhiyun else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1339*4882a593Smuzhiyun val = precise_datala_hsw(event, aux);
1340*4882a593Smuzhiyun else if (fst)
1341*4882a593Smuzhiyun val = precise_store_data(aux);
1342*4882a593Smuzhiyun return val;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
setup_pebs_fixed_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1345*4882a593Smuzhiyun static void setup_pebs_fixed_sample_data(struct perf_event *event,
1346*4882a593Smuzhiyun struct pt_regs *iregs, void *__pebs,
1347*4882a593Smuzhiyun struct perf_sample_data *data,
1348*4882a593Smuzhiyun struct pt_regs *regs)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun /*
1351*4882a593Smuzhiyun * We cast to the biggest pebs_record but are careful not to
1352*4882a593Smuzhiyun * unconditionally access the 'extra' entries.
1353*4882a593Smuzhiyun */
1354*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1355*4882a593Smuzhiyun struct pebs_record_skl *pebs = __pebs;
1356*4882a593Smuzhiyun u64 sample_type;
1357*4882a593Smuzhiyun int fll;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (pebs == NULL)
1360*4882a593Smuzhiyun return;
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun sample_type = event->attr.sample_type;
1363*4882a593Smuzhiyun fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun perf_sample_data_init(data, 0, event->hw.last_period);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun data->period = event->hw.last_period;
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /*
1370*4882a593Smuzhiyun * Use latency for weight (only avail with PEBS-LL)
1371*4882a593Smuzhiyun */
1372*4882a593Smuzhiyun if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
1373*4882a593Smuzhiyun data->weight = pebs->lat;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun /*
1376*4882a593Smuzhiyun * data.data_src encodes the data source
1377*4882a593Smuzhiyun */
1378*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_DATA_SRC)
1379*4882a593Smuzhiyun data->data_src.val = get_data_src(event, pebs->dse);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /*
1382*4882a593Smuzhiyun * We must however always use iregs for the unwinder to stay sane; the
1383*4882a593Smuzhiyun * record BP,SP,IP can point into thin air when the record is from a
1384*4882a593Smuzhiyun * previous PMI context or an (I)RET happened between the record and
1385*4882a593Smuzhiyun * PMI.
1386*4882a593Smuzhiyun */
1387*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_CALLCHAIN)
1388*4882a593Smuzhiyun data->callchain = perf_callchain(event, iregs);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /*
1391*4882a593Smuzhiyun * We use the interrupt regs as a base because the PEBS record does not
1392*4882a593Smuzhiyun * contain a full regs set, specifically it seems to lack segment
1393*4882a593Smuzhiyun * descriptors, which get used by things like user_mode().
1394*4882a593Smuzhiyun *
1395*4882a593Smuzhiyun * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1396*4882a593Smuzhiyun */
1397*4882a593Smuzhiyun *regs = *iregs;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /*
1400*4882a593Smuzhiyun * Initialize regs_>flags from PEBS,
1401*4882a593Smuzhiyun * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1402*4882a593Smuzhiyun * i.e., do not rely on it being zero:
1403*4882a593Smuzhiyun */
1404*4882a593Smuzhiyun regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_REGS_INTR) {
1407*4882a593Smuzhiyun regs->ax = pebs->ax;
1408*4882a593Smuzhiyun regs->bx = pebs->bx;
1409*4882a593Smuzhiyun regs->cx = pebs->cx;
1410*4882a593Smuzhiyun regs->dx = pebs->dx;
1411*4882a593Smuzhiyun regs->si = pebs->si;
1412*4882a593Smuzhiyun regs->di = pebs->di;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun regs->bp = pebs->bp;
1415*4882a593Smuzhiyun regs->sp = pebs->sp;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun #ifndef CONFIG_X86_32
1418*4882a593Smuzhiyun regs->r8 = pebs->r8;
1419*4882a593Smuzhiyun regs->r9 = pebs->r9;
1420*4882a593Smuzhiyun regs->r10 = pebs->r10;
1421*4882a593Smuzhiyun regs->r11 = pebs->r11;
1422*4882a593Smuzhiyun regs->r12 = pebs->r12;
1423*4882a593Smuzhiyun regs->r13 = pebs->r13;
1424*4882a593Smuzhiyun regs->r14 = pebs->r14;
1425*4882a593Smuzhiyun regs->r15 = pebs->r15;
1426*4882a593Smuzhiyun #endif
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun if (event->attr.precise_ip > 1) {
1430*4882a593Smuzhiyun /*
1431*4882a593Smuzhiyun * Haswell and later processors have an 'eventing IP'
1432*4882a593Smuzhiyun * (real IP) which fixes the off-by-1 skid in hardware.
1433*4882a593Smuzhiyun * Use it when precise_ip >= 2 :
1434*4882a593Smuzhiyun */
1435*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format >= 2) {
1436*4882a593Smuzhiyun set_linear_ip(regs, pebs->real_ip);
1437*4882a593Smuzhiyun regs->flags |= PERF_EFLAGS_EXACT;
1438*4882a593Smuzhiyun } else {
1439*4882a593Smuzhiyun /* Otherwise, use PEBS off-by-1 IP: */
1440*4882a593Smuzhiyun set_linear_ip(regs, pebs->ip);
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /*
1443*4882a593Smuzhiyun * With precise_ip >= 2, try to fix up the off-by-1 IP
1444*4882a593Smuzhiyun * using the LBR. If successful, the fixup function
1445*4882a593Smuzhiyun * corrects regs->ip and calls set_linear_ip() on regs:
1446*4882a593Smuzhiyun */
1447*4882a593Smuzhiyun if (intel_pmu_pebs_fixup_ip(regs))
1448*4882a593Smuzhiyun regs->flags |= PERF_EFLAGS_EXACT;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun } else {
1451*4882a593Smuzhiyun /*
1452*4882a593Smuzhiyun * When precise_ip == 1, return the PEBS off-by-1 IP,
1453*4882a593Smuzhiyun * no fixup attempted:
1454*4882a593Smuzhiyun */
1455*4882a593Smuzhiyun set_linear_ip(regs, pebs->ip);
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) &&
1460*4882a593Smuzhiyun x86_pmu.intel_cap.pebs_format >= 1)
1461*4882a593Smuzhiyun data->addr = pebs->dla;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format >= 2) {
1464*4882a593Smuzhiyun /* Only set the TSX weight when no memory weight. */
1465*4882a593Smuzhiyun if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
1466*4882a593Smuzhiyun data->weight = intel_get_tsx_weight(pebs->tsx_tuning);
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_TRANSACTION)
1469*4882a593Smuzhiyun data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
1470*4882a593Smuzhiyun pebs->ax);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /*
1474*4882a593Smuzhiyun * v3 supplies an accurate time stamp, so we use that
1475*4882a593Smuzhiyun * for the time stamp.
1476*4882a593Smuzhiyun *
1477*4882a593Smuzhiyun * We can only do this for the default trace clock.
1478*4882a593Smuzhiyun */
1479*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format >= 3 &&
1480*4882a593Smuzhiyun event->attr.use_clockid == 0)
1481*4882a593Smuzhiyun data->time = native_sched_clock_from_tsc(pebs->tsc);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun if (has_branch_stack(event))
1484*4882a593Smuzhiyun data->br_stack = &cpuc->lbr_stack;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
adaptive_pebs_save_regs(struct pt_regs * regs,struct pebs_gprs * gprs)1487*4882a593Smuzhiyun static void adaptive_pebs_save_regs(struct pt_regs *regs,
1488*4882a593Smuzhiyun struct pebs_gprs *gprs)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun regs->ax = gprs->ax;
1491*4882a593Smuzhiyun regs->bx = gprs->bx;
1492*4882a593Smuzhiyun regs->cx = gprs->cx;
1493*4882a593Smuzhiyun regs->dx = gprs->dx;
1494*4882a593Smuzhiyun regs->si = gprs->si;
1495*4882a593Smuzhiyun regs->di = gprs->di;
1496*4882a593Smuzhiyun regs->bp = gprs->bp;
1497*4882a593Smuzhiyun regs->sp = gprs->sp;
1498*4882a593Smuzhiyun #ifndef CONFIG_X86_32
1499*4882a593Smuzhiyun regs->r8 = gprs->r8;
1500*4882a593Smuzhiyun regs->r9 = gprs->r9;
1501*4882a593Smuzhiyun regs->r10 = gprs->r10;
1502*4882a593Smuzhiyun regs->r11 = gprs->r11;
1503*4882a593Smuzhiyun regs->r12 = gprs->r12;
1504*4882a593Smuzhiyun regs->r13 = gprs->r13;
1505*4882a593Smuzhiyun regs->r14 = gprs->r14;
1506*4882a593Smuzhiyun regs->r15 = gprs->r15;
1507*4882a593Smuzhiyun #endif
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun /*
1511*4882a593Smuzhiyun * With adaptive PEBS the layout depends on what fields are configured.
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun
setup_pebs_adaptive_sample_data(struct perf_event * event,struct pt_regs * iregs,void * __pebs,struct perf_sample_data * data,struct pt_regs * regs)1514*4882a593Smuzhiyun static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1515*4882a593Smuzhiyun struct pt_regs *iregs, void *__pebs,
1516*4882a593Smuzhiyun struct perf_sample_data *data,
1517*4882a593Smuzhiyun struct pt_regs *regs)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1520*4882a593Smuzhiyun struct pebs_basic *basic = __pebs;
1521*4882a593Smuzhiyun void *next_record = basic + 1;
1522*4882a593Smuzhiyun u64 sample_type;
1523*4882a593Smuzhiyun u64 format_size;
1524*4882a593Smuzhiyun struct pebs_meminfo *meminfo = NULL;
1525*4882a593Smuzhiyun struct pebs_gprs *gprs = NULL;
1526*4882a593Smuzhiyun struct x86_perf_regs *perf_regs;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (basic == NULL)
1529*4882a593Smuzhiyun return;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun perf_regs = container_of(regs, struct x86_perf_regs, regs);
1532*4882a593Smuzhiyun perf_regs->xmm_regs = NULL;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun sample_type = event->attr.sample_type;
1535*4882a593Smuzhiyun format_size = basic->format_size;
1536*4882a593Smuzhiyun perf_sample_data_init(data, 0, event->hw.last_period);
1537*4882a593Smuzhiyun data->period = event->hw.last_period;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun if (event->attr.use_clockid == 0)
1540*4882a593Smuzhiyun data->time = native_sched_clock_from_tsc(basic->tsc);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun /*
1543*4882a593Smuzhiyun * We must however always use iregs for the unwinder to stay sane; the
1544*4882a593Smuzhiyun * record BP,SP,IP can point into thin air when the record is from a
1545*4882a593Smuzhiyun * previous PMI context or an (I)RET happened between the record and
1546*4882a593Smuzhiyun * PMI.
1547*4882a593Smuzhiyun */
1548*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_CALLCHAIN)
1549*4882a593Smuzhiyun data->callchain = perf_callchain(event, iregs);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun *regs = *iregs;
1552*4882a593Smuzhiyun /* The ip in basic is EventingIP */
1553*4882a593Smuzhiyun set_linear_ip(regs, basic->ip);
1554*4882a593Smuzhiyun regs->flags = PERF_EFLAGS_EXACT;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun /*
1557*4882a593Smuzhiyun * The record for MEMINFO is in front of GP
1558*4882a593Smuzhiyun * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1559*4882a593Smuzhiyun * Save the pointer here but process later.
1560*4882a593Smuzhiyun */
1561*4882a593Smuzhiyun if (format_size & PEBS_DATACFG_MEMINFO) {
1562*4882a593Smuzhiyun meminfo = next_record;
1563*4882a593Smuzhiyun next_record = meminfo + 1;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun if (format_size & PEBS_DATACFG_GP) {
1567*4882a593Smuzhiyun gprs = next_record;
1568*4882a593Smuzhiyun next_record = gprs + 1;
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun if (event->attr.precise_ip < 2) {
1571*4882a593Smuzhiyun set_linear_ip(regs, gprs->ip);
1572*4882a593Smuzhiyun regs->flags &= ~PERF_EFLAGS_EXACT;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_REGS_INTR)
1576*4882a593Smuzhiyun adaptive_pebs_save_regs(regs, gprs);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun if (format_size & PEBS_DATACFG_MEMINFO) {
1580*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_WEIGHT)
1581*4882a593Smuzhiyun data->weight = meminfo->latency ?:
1582*4882a593Smuzhiyun intel_get_tsx_weight(meminfo->tsx_tuning);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_DATA_SRC)
1585*4882a593Smuzhiyun data->data_src.val = get_data_src(event, meminfo->aux);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun if (sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
1588*4882a593Smuzhiyun data->addr = meminfo->address;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun if (sample_type & PERF_SAMPLE_TRANSACTION)
1591*4882a593Smuzhiyun data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1592*4882a593Smuzhiyun gprs ? gprs->ax : 0);
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun if (format_size & PEBS_DATACFG_XMMS) {
1596*4882a593Smuzhiyun struct pebs_xmm *xmm = next_record;
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun next_record = xmm + 1;
1599*4882a593Smuzhiyun perf_regs->xmm_regs = xmm->xmm;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun if (format_size & PEBS_DATACFG_LBRS) {
1603*4882a593Smuzhiyun struct lbr_entry *lbr = next_record;
1604*4882a593Smuzhiyun int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1605*4882a593Smuzhiyun & 0xff) + 1;
1606*4882a593Smuzhiyun next_record = next_record + num_lbr * sizeof(struct lbr_entry);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun if (has_branch_stack(event)) {
1609*4882a593Smuzhiyun intel_pmu_store_pebs_lbrs(lbr);
1610*4882a593Smuzhiyun data->br_stack = &cpuc->lbr_stack;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun WARN_ONCE(next_record != __pebs + (format_size >> 48),
1615*4882a593Smuzhiyun "PEBS record size %llu, expected %llu, config %llx\n",
1616*4882a593Smuzhiyun format_size >> 48,
1617*4882a593Smuzhiyun (u64)(next_record - __pebs),
1618*4882a593Smuzhiyun basic->format_size);
1619*4882a593Smuzhiyun }
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun static inline void *
get_next_pebs_record_by_bit(void * base,void * top,int bit)1622*4882a593Smuzhiyun get_next_pebs_record_by_bit(void *base, void *top, int bit)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1625*4882a593Smuzhiyun void *at;
1626*4882a593Smuzhiyun u64 pebs_status;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun /*
1629*4882a593Smuzhiyun * fmt0 does not have a status bitfield (does not use
1630*4882a593Smuzhiyun * perf_record_nhm format)
1631*4882a593Smuzhiyun */
1632*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format < 1)
1633*4882a593Smuzhiyun return base;
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun if (base == NULL)
1636*4882a593Smuzhiyun return NULL;
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun for (at = base; at < top; at += cpuc->pebs_record_size) {
1639*4882a593Smuzhiyun unsigned long status = get_pebs_status(at);
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun if (test_bit(bit, (unsigned long *)&status)) {
1642*4882a593Smuzhiyun /* PEBS v3 has accurate status bits */
1643*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format >= 3)
1644*4882a593Smuzhiyun return at;
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun if (status == (1 << bit))
1647*4882a593Smuzhiyun return at;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /* clear non-PEBS bit and re-check */
1650*4882a593Smuzhiyun pebs_status = status & cpuc->pebs_enabled;
1651*4882a593Smuzhiyun pebs_status &= PEBS_COUNTER_MASK;
1652*4882a593Smuzhiyun if (pebs_status == (1 << bit))
1653*4882a593Smuzhiyun return at;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun return NULL;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun
intel_pmu_auto_reload_read(struct perf_event * event)1659*4882a593Smuzhiyun void intel_pmu_auto_reload_read(struct perf_event *event)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun perf_pmu_disable(event->pmu);
1664*4882a593Smuzhiyun intel_pmu_drain_pebs_buffer();
1665*4882a593Smuzhiyun perf_pmu_enable(event->pmu);
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun /*
1669*4882a593Smuzhiyun * Special variant of intel_pmu_save_and_restart() for auto-reload.
1670*4882a593Smuzhiyun */
1671*4882a593Smuzhiyun static int
intel_pmu_save_and_restart_reload(struct perf_event * event,int count)1672*4882a593Smuzhiyun intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1675*4882a593Smuzhiyun int shift = 64 - x86_pmu.cntval_bits;
1676*4882a593Smuzhiyun u64 period = hwc->sample_period;
1677*4882a593Smuzhiyun u64 prev_raw_count, new_raw_count;
1678*4882a593Smuzhiyun s64 new, old;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun WARN_ON(!period);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /*
1683*4882a593Smuzhiyun * drain_pebs() only happens when the PMU is disabled.
1684*4882a593Smuzhiyun */
1685*4882a593Smuzhiyun WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun prev_raw_count = local64_read(&hwc->prev_count);
1688*4882a593Smuzhiyun rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1689*4882a593Smuzhiyun local64_set(&hwc->prev_count, new_raw_count);
1690*4882a593Smuzhiyun
1691*4882a593Smuzhiyun /*
1692*4882a593Smuzhiyun * Since the counter increments a negative counter value and
1693*4882a593Smuzhiyun * overflows on the sign switch, giving the interval:
1694*4882a593Smuzhiyun *
1695*4882a593Smuzhiyun * [-period, 0]
1696*4882a593Smuzhiyun *
1697*4882a593Smuzhiyun * the difference between two consequtive reads is:
1698*4882a593Smuzhiyun *
1699*4882a593Smuzhiyun * A) value2 - value1;
1700*4882a593Smuzhiyun * when no overflows have happened in between,
1701*4882a593Smuzhiyun *
1702*4882a593Smuzhiyun * B) (0 - value1) + (value2 - (-period));
1703*4882a593Smuzhiyun * when one overflow happened in between,
1704*4882a593Smuzhiyun *
1705*4882a593Smuzhiyun * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1706*4882a593Smuzhiyun * when @n overflows happened in between.
1707*4882a593Smuzhiyun *
1708*4882a593Smuzhiyun * Here A) is the obvious difference, B) is the extension to the
1709*4882a593Smuzhiyun * discrete interval, where the first term is to the top of the
1710*4882a593Smuzhiyun * interval and the second term is from the bottom of the next
1711*4882a593Smuzhiyun * interval and C) the extension to multiple intervals, where the
1712*4882a593Smuzhiyun * middle term is the whole intervals covered.
1713*4882a593Smuzhiyun *
1714*4882a593Smuzhiyun * An equivalent of C, by reduction, is:
1715*4882a593Smuzhiyun *
1716*4882a593Smuzhiyun * value2 - value1 + n * period
1717*4882a593Smuzhiyun */
1718*4882a593Smuzhiyun new = ((s64)(new_raw_count << shift) >> shift);
1719*4882a593Smuzhiyun old = ((s64)(prev_raw_count << shift) >> shift);
1720*4882a593Smuzhiyun local64_add(new - old + count * period, &event->count);
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun local64_set(&hwc->period_left, -new);
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun perf_event_update_userpage(event);
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun return 0;
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun static __always_inline void
__intel_pmu_pebs_event(struct perf_event * event,struct pt_regs * iregs,struct perf_sample_data * data,void * base,void * top,int bit,int count,void (* setup_sample)(struct perf_event *,struct pt_regs *,void *,struct perf_sample_data *,struct pt_regs *))1730*4882a593Smuzhiyun __intel_pmu_pebs_event(struct perf_event *event,
1731*4882a593Smuzhiyun struct pt_regs *iregs,
1732*4882a593Smuzhiyun struct perf_sample_data *data,
1733*4882a593Smuzhiyun void *base, void *top,
1734*4882a593Smuzhiyun int bit, int count,
1735*4882a593Smuzhiyun void (*setup_sample)(struct perf_event *,
1736*4882a593Smuzhiyun struct pt_regs *,
1737*4882a593Smuzhiyun void *,
1738*4882a593Smuzhiyun struct perf_sample_data *,
1739*4882a593Smuzhiyun struct pt_regs *))
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1742*4882a593Smuzhiyun struct hw_perf_event *hwc = &event->hw;
1743*4882a593Smuzhiyun struct x86_perf_regs perf_regs;
1744*4882a593Smuzhiyun struct pt_regs *regs = &perf_regs.regs;
1745*4882a593Smuzhiyun void *at = get_next_pebs_record_by_bit(base, top, bit);
1746*4882a593Smuzhiyun static struct pt_regs dummy_iregs;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1749*4882a593Smuzhiyun /*
1750*4882a593Smuzhiyun * Now, auto-reload is only enabled in fixed period mode.
1751*4882a593Smuzhiyun * The reload value is always hwc->sample_period.
1752*4882a593Smuzhiyun * May need to change it, if auto-reload is enabled in
1753*4882a593Smuzhiyun * freq mode later.
1754*4882a593Smuzhiyun */
1755*4882a593Smuzhiyun intel_pmu_save_and_restart_reload(event, count);
1756*4882a593Smuzhiyun } else if (!intel_pmu_save_and_restart(event))
1757*4882a593Smuzhiyun return;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun if (!iregs)
1760*4882a593Smuzhiyun iregs = &dummy_iregs;
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun while (count > 1) {
1763*4882a593Smuzhiyun setup_sample(event, iregs, at, data, regs);
1764*4882a593Smuzhiyun perf_event_output(event, data, regs);
1765*4882a593Smuzhiyun at += cpuc->pebs_record_size;
1766*4882a593Smuzhiyun at = get_next_pebs_record_by_bit(at, top, bit);
1767*4882a593Smuzhiyun count--;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun setup_sample(event, iregs, at, data, regs);
1771*4882a593Smuzhiyun if (iregs == &dummy_iregs) {
1772*4882a593Smuzhiyun /*
1773*4882a593Smuzhiyun * The PEBS records may be drained in the non-overflow context,
1774*4882a593Smuzhiyun * e.g., large PEBS + context switch. Perf should treat the
1775*4882a593Smuzhiyun * last record the same as other PEBS records, and doesn't
1776*4882a593Smuzhiyun * invoke the generic overflow handler.
1777*4882a593Smuzhiyun */
1778*4882a593Smuzhiyun perf_event_output(event, data, regs);
1779*4882a593Smuzhiyun } else {
1780*4882a593Smuzhiyun /*
1781*4882a593Smuzhiyun * All but the last records are processed.
1782*4882a593Smuzhiyun * The last one is left to be able to call the overflow handler.
1783*4882a593Smuzhiyun */
1784*4882a593Smuzhiyun if (perf_event_overflow(event, data, regs))
1785*4882a593Smuzhiyun x86_pmu_stop(event, 0);
1786*4882a593Smuzhiyun }
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
intel_pmu_drain_pebs_core(struct pt_regs * iregs,struct perf_sample_data * data)1789*4882a593Smuzhiyun static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
1790*4882a593Smuzhiyun {
1791*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1792*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
1793*4882a593Smuzhiyun struct perf_event *event = cpuc->events[0]; /* PMC0 only */
1794*4882a593Smuzhiyun struct pebs_record_core *at, *top;
1795*4882a593Smuzhiyun int n;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (!x86_pmu.pebs_active)
1798*4882a593Smuzhiyun return;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
1801*4882a593Smuzhiyun top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun /*
1804*4882a593Smuzhiyun * Whatever else happens, drain the thing
1805*4882a593Smuzhiyun */
1806*4882a593Smuzhiyun ds->pebs_index = ds->pebs_buffer_base;
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun if (!test_bit(0, cpuc->active_mask))
1809*4882a593Smuzhiyun return;
1810*4882a593Smuzhiyun
1811*4882a593Smuzhiyun WARN_ON_ONCE(!event);
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun if (!event->attr.precise_ip)
1814*4882a593Smuzhiyun return;
1815*4882a593Smuzhiyun
1816*4882a593Smuzhiyun n = top - at;
1817*4882a593Smuzhiyun if (n <= 0) {
1818*4882a593Smuzhiyun if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1819*4882a593Smuzhiyun intel_pmu_save_and_restart_reload(event, 0);
1820*4882a593Smuzhiyun return;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
1824*4882a593Smuzhiyun setup_pebs_fixed_sample_data);
1825*4882a593Smuzhiyun }
1826*4882a593Smuzhiyun
intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events * cpuc,int size)1827*4882a593Smuzhiyun static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun struct perf_event *event;
1830*4882a593Smuzhiyun int bit;
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun /*
1833*4882a593Smuzhiyun * The drain_pebs() could be called twice in a short period
1834*4882a593Smuzhiyun * for auto-reload event in pmu::read(). There are no
1835*4882a593Smuzhiyun * overflows have happened in between.
1836*4882a593Smuzhiyun * It needs to call intel_pmu_save_and_restart_reload() to
1837*4882a593Smuzhiyun * update the event->count for this case.
1838*4882a593Smuzhiyun */
1839*4882a593Smuzhiyun for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
1840*4882a593Smuzhiyun event = cpuc->events[bit];
1841*4882a593Smuzhiyun if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
1842*4882a593Smuzhiyun intel_pmu_save_and_restart_reload(event, 0);
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
intel_pmu_drain_pebs_nhm(struct pt_regs * iregs,struct perf_sample_data * data)1846*4882a593Smuzhiyun static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1849*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
1850*4882a593Smuzhiyun struct perf_event *event;
1851*4882a593Smuzhiyun void *base, *at, *top;
1852*4882a593Smuzhiyun short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1853*4882a593Smuzhiyun short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1854*4882a593Smuzhiyun int bit, i, size;
1855*4882a593Smuzhiyun u64 mask;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun if (!x86_pmu.pebs_active)
1858*4882a593Smuzhiyun return;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
1861*4882a593Smuzhiyun top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun ds->pebs_index = ds->pebs_buffer_base;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun mask = (1ULL << x86_pmu.max_pebs_events) - 1;
1866*4882a593Smuzhiyun size = x86_pmu.max_pebs_events;
1867*4882a593Smuzhiyun if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
1868*4882a593Smuzhiyun mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
1869*4882a593Smuzhiyun size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun if (unlikely(base >= top)) {
1873*4882a593Smuzhiyun intel_pmu_pebs_event_update_no_drain(cpuc, size);
1874*4882a593Smuzhiyun return;
1875*4882a593Smuzhiyun }
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun for (at = base; at < top; at += x86_pmu.pebs_record_size) {
1878*4882a593Smuzhiyun struct pebs_record_nhm *p = at;
1879*4882a593Smuzhiyun u64 pebs_status;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun pebs_status = p->status & cpuc->pebs_enabled;
1882*4882a593Smuzhiyun pebs_status &= mask;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun /* PEBS v3 has more accurate status bits */
1885*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_format >= 3) {
1886*4882a593Smuzhiyun for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1887*4882a593Smuzhiyun counts[bit]++;
1888*4882a593Smuzhiyun
1889*4882a593Smuzhiyun continue;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun /*
1893*4882a593Smuzhiyun * On some CPUs the PEBS status can be zero when PEBS is
1894*4882a593Smuzhiyun * racing with clearing of GLOBAL_STATUS.
1895*4882a593Smuzhiyun *
1896*4882a593Smuzhiyun * Normally we would drop that record, but in the
1897*4882a593Smuzhiyun * case when there is only a single active PEBS event
1898*4882a593Smuzhiyun * we can assume it's for that event.
1899*4882a593Smuzhiyun */
1900*4882a593Smuzhiyun if (!pebs_status && cpuc->pebs_enabled &&
1901*4882a593Smuzhiyun !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
1902*4882a593Smuzhiyun pebs_status = p->status = cpuc->pebs_enabled;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun bit = find_first_bit((unsigned long *)&pebs_status,
1905*4882a593Smuzhiyun x86_pmu.max_pebs_events);
1906*4882a593Smuzhiyun if (bit >= x86_pmu.max_pebs_events)
1907*4882a593Smuzhiyun continue;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun /*
1910*4882a593Smuzhiyun * The PEBS hardware does not deal well with the situation
1911*4882a593Smuzhiyun * when events happen near to each other and multiple bits
1912*4882a593Smuzhiyun * are set. But it should happen rarely.
1913*4882a593Smuzhiyun *
1914*4882a593Smuzhiyun * If these events include one PEBS and multiple non-PEBS
1915*4882a593Smuzhiyun * events, it doesn't impact PEBS record. The record will
1916*4882a593Smuzhiyun * be handled normally. (slow path)
1917*4882a593Smuzhiyun *
1918*4882a593Smuzhiyun * If these events include two or more PEBS events, the
1919*4882a593Smuzhiyun * records for the events can be collapsed into a single
1920*4882a593Smuzhiyun * one, and it's not possible to reconstruct all events
1921*4882a593Smuzhiyun * that caused the PEBS record. It's called collision.
1922*4882a593Smuzhiyun * If collision happened, the record will be dropped.
1923*4882a593Smuzhiyun */
1924*4882a593Smuzhiyun if (pebs_status != (1ULL << bit)) {
1925*4882a593Smuzhiyun for_each_set_bit(i, (unsigned long *)&pebs_status, size)
1926*4882a593Smuzhiyun error[i]++;
1927*4882a593Smuzhiyun continue;
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun
1930*4882a593Smuzhiyun counts[bit]++;
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun for_each_set_bit(bit, (unsigned long *)&mask, size) {
1934*4882a593Smuzhiyun if ((counts[bit] == 0) && (error[bit] == 0))
1935*4882a593Smuzhiyun continue;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun event = cpuc->events[bit];
1938*4882a593Smuzhiyun if (WARN_ON_ONCE(!event))
1939*4882a593Smuzhiyun continue;
1940*4882a593Smuzhiyun
1941*4882a593Smuzhiyun if (WARN_ON_ONCE(!event->attr.precise_ip))
1942*4882a593Smuzhiyun continue;
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun /* log dropped samples number */
1945*4882a593Smuzhiyun if (error[bit]) {
1946*4882a593Smuzhiyun perf_log_lost_samples(event, error[bit]);
1947*4882a593Smuzhiyun
1948*4882a593Smuzhiyun if (iregs && perf_event_account_interrupt(event))
1949*4882a593Smuzhiyun x86_pmu_stop(event, 0);
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun if (counts[bit]) {
1953*4882a593Smuzhiyun __intel_pmu_pebs_event(event, iregs, data, base,
1954*4882a593Smuzhiyun top, bit, counts[bit],
1955*4882a593Smuzhiyun setup_pebs_fixed_sample_data);
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun }
1959*4882a593Smuzhiyun
intel_pmu_drain_pebs_icl(struct pt_regs * iregs,struct perf_sample_data * data)1960*4882a593Smuzhiyun static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
1961*4882a593Smuzhiyun {
1962*4882a593Smuzhiyun short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
1963*4882a593Smuzhiyun struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1964*4882a593Smuzhiyun struct debug_store *ds = cpuc->ds;
1965*4882a593Smuzhiyun struct perf_event *event;
1966*4882a593Smuzhiyun void *base, *at, *top;
1967*4882a593Smuzhiyun int bit, size;
1968*4882a593Smuzhiyun u64 mask;
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun if (!x86_pmu.pebs_active)
1971*4882a593Smuzhiyun return;
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
1974*4882a593Smuzhiyun top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun ds->pebs_index = ds->pebs_buffer_base;
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
1979*4882a593Smuzhiyun (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
1980*4882a593Smuzhiyun size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun if (unlikely(base >= top)) {
1983*4882a593Smuzhiyun intel_pmu_pebs_event_update_no_drain(cpuc, size);
1984*4882a593Smuzhiyun return;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun for (at = base; at < top; at += cpuc->pebs_record_size) {
1988*4882a593Smuzhiyun u64 pebs_status;
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
1991*4882a593Smuzhiyun pebs_status &= mask;
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
1994*4882a593Smuzhiyun counts[bit]++;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun for_each_set_bit(bit, (unsigned long *)&mask, size) {
1998*4882a593Smuzhiyun if (counts[bit] == 0)
1999*4882a593Smuzhiyun continue;
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun event = cpuc->events[bit];
2002*4882a593Smuzhiyun if (WARN_ON_ONCE(!event))
2003*4882a593Smuzhiyun continue;
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun if (WARN_ON_ONCE(!event->attr.precise_ip))
2006*4882a593Smuzhiyun continue;
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun __intel_pmu_pebs_event(event, iregs, data, base,
2009*4882a593Smuzhiyun top, bit, counts[bit],
2010*4882a593Smuzhiyun setup_pebs_adaptive_sample_data);
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun /*
2015*4882a593Smuzhiyun * BTS, PEBS probe and setup
2016*4882a593Smuzhiyun */
2017*4882a593Smuzhiyun
intel_ds_init(void)2018*4882a593Smuzhiyun void __init intel_ds_init(void)
2019*4882a593Smuzhiyun {
2020*4882a593Smuzhiyun /*
2021*4882a593Smuzhiyun * No support for 32bit formats
2022*4882a593Smuzhiyun */
2023*4882a593Smuzhiyun if (!boot_cpu_has(X86_FEATURE_DTES64))
2024*4882a593Smuzhiyun return;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
2027*4882a593Smuzhiyun x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2028*4882a593Smuzhiyun x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2029*4882a593Smuzhiyun if (x86_pmu.version <= 4)
2030*4882a593Smuzhiyun x86_pmu.pebs_no_isolation = 1;
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun if (x86_pmu.pebs) {
2033*4882a593Smuzhiyun char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
2034*4882a593Smuzhiyun char *pebs_qual = "";
2035*4882a593Smuzhiyun int format = x86_pmu.intel_cap.pebs_format;
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun if (format < 4)
2038*4882a593Smuzhiyun x86_pmu.intel_cap.pebs_baseline = 0;
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun switch (format) {
2041*4882a593Smuzhiyun case 0:
2042*4882a593Smuzhiyun pr_cont("PEBS fmt0%c, ", pebs_type);
2043*4882a593Smuzhiyun x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2044*4882a593Smuzhiyun /*
2045*4882a593Smuzhiyun * Using >PAGE_SIZE buffers makes the WRMSR to
2046*4882a593Smuzhiyun * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2047*4882a593Smuzhiyun * mysteriously hang on Core2.
2048*4882a593Smuzhiyun *
2049*4882a593Smuzhiyun * As a workaround, we don't do this.
2050*4882a593Smuzhiyun */
2051*4882a593Smuzhiyun x86_pmu.pebs_buffer_size = PAGE_SIZE;
2052*4882a593Smuzhiyun x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
2053*4882a593Smuzhiyun break;
2054*4882a593Smuzhiyun
2055*4882a593Smuzhiyun case 1:
2056*4882a593Smuzhiyun pr_cont("PEBS fmt1%c, ", pebs_type);
2057*4882a593Smuzhiyun x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
2058*4882a593Smuzhiyun x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2059*4882a593Smuzhiyun break;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun case 2:
2062*4882a593Smuzhiyun pr_cont("PEBS fmt2%c, ", pebs_type);
2063*4882a593Smuzhiyun x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
2064*4882a593Smuzhiyun x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2065*4882a593Smuzhiyun break;
2066*4882a593Smuzhiyun
2067*4882a593Smuzhiyun case 3:
2068*4882a593Smuzhiyun pr_cont("PEBS fmt3%c, ", pebs_type);
2069*4882a593Smuzhiyun x86_pmu.pebs_record_size =
2070*4882a593Smuzhiyun sizeof(struct pebs_record_skl);
2071*4882a593Smuzhiyun x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2072*4882a593Smuzhiyun x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
2073*4882a593Smuzhiyun break;
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun case 4:
2076*4882a593Smuzhiyun x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2077*4882a593Smuzhiyun x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2078*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_baseline) {
2079*4882a593Smuzhiyun x86_pmu.large_pebs_flags |=
2080*4882a593Smuzhiyun PERF_SAMPLE_BRANCH_STACK |
2081*4882a593Smuzhiyun PERF_SAMPLE_TIME;
2082*4882a593Smuzhiyun x86_pmu.flags |= PMU_FL_PEBS_ALL;
2083*4882a593Smuzhiyun pebs_qual = "-baseline";
2084*4882a593Smuzhiyun x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2085*4882a593Smuzhiyun } else {
2086*4882a593Smuzhiyun /* Only basic record supported */
2087*4882a593Smuzhiyun x86_pmu.large_pebs_flags &=
2088*4882a593Smuzhiyun ~(PERF_SAMPLE_ADDR |
2089*4882a593Smuzhiyun PERF_SAMPLE_TIME |
2090*4882a593Smuzhiyun PERF_SAMPLE_DATA_SRC |
2091*4882a593Smuzhiyun PERF_SAMPLE_TRANSACTION |
2092*4882a593Smuzhiyun PERF_SAMPLE_REGS_USER |
2093*4882a593Smuzhiyun PERF_SAMPLE_REGS_INTR);
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2096*4882a593Smuzhiyun
2097*4882a593Smuzhiyun if (x86_pmu.intel_cap.pebs_output_pt_available) {
2098*4882a593Smuzhiyun pr_cont("PEBS-via-PT, ");
2099*4882a593Smuzhiyun x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
2100*4882a593Smuzhiyun }
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun break;
2103*4882a593Smuzhiyun
2104*4882a593Smuzhiyun default:
2105*4882a593Smuzhiyun pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
2106*4882a593Smuzhiyun x86_pmu.pebs = 0;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
perf_restore_debug_store(void)2111*4882a593Smuzhiyun void perf_restore_debug_store(void)
2112*4882a593Smuzhiyun {
2113*4882a593Smuzhiyun struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun if (!x86_pmu.bts && !x86_pmu.pebs)
2116*4882a593Smuzhiyun return;
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);
2119*4882a593Smuzhiyun }
2120