xref: /OK3568_Linux_fs/kernel/arch/x86/events/intel/pt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Intel(R) Processor Trace PMU driver for perf
4*4882a593Smuzhiyun  * Copyright (c) 2013-2014, Intel Corporation.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Intel PT is specified in the Intel Architecture Instruction Set Extensions
7*4882a593Smuzhiyun  * Programming Reference:
8*4882a593Smuzhiyun  * http://software.intel.com/en-us/intel-isa-extensions
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #undef DEBUG
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/bits.h>
17*4882a593Smuzhiyun #include <linux/limits.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/device.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <asm/perf_event.h>
22*4882a593Smuzhiyun #include <asm/insn.h>
23*4882a593Smuzhiyun #include <asm/io.h>
24*4882a593Smuzhiyun #include <asm/intel_pt.h>
25*4882a593Smuzhiyun #include <asm/intel-family.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "../perf_event.h"
28*4882a593Smuzhiyun #include "pt.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static DEFINE_PER_CPU(struct pt, pt_ctx);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static struct pt_pmu pt_pmu;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * Capabilities of Intel PT hardware, such as number of address bits or
36*4882a593Smuzhiyun  * supported output schemes, are cached and exported to userspace as "caps"
37*4882a593Smuzhiyun  * attribute group of pt pmu device
38*4882a593Smuzhiyun  * (/sys/bus/event_source/devices/intel_pt/caps/) so that userspace can store
39*4882a593Smuzhiyun  * relevant bits together with intel_pt traces.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * These are necessary for both trace decoding (payloads_lip, contains address
42*4882a593Smuzhiyun  * width encoded in IP-related packets), and event configuration (bitmasks with
43*4882a593Smuzhiyun  * permitted values for certain bit fields).
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #define PT_CAP(_n, _l, _r, _m)						\
46*4882a593Smuzhiyun 	[PT_CAP_ ## _n] = { .name = __stringify(_n), .leaf = _l,	\
47*4882a593Smuzhiyun 			    .reg = _r, .mask = _m }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun static struct pt_cap_desc {
50*4882a593Smuzhiyun 	const char	*name;
51*4882a593Smuzhiyun 	u32		leaf;
52*4882a593Smuzhiyun 	u8		reg;
53*4882a593Smuzhiyun 	u32		mask;
54*4882a593Smuzhiyun } pt_caps[] = {
55*4882a593Smuzhiyun 	PT_CAP(max_subleaf,		0, CPUID_EAX, 0xffffffff),
56*4882a593Smuzhiyun 	PT_CAP(cr3_filtering,		0, CPUID_EBX, BIT(0)),
57*4882a593Smuzhiyun 	PT_CAP(psb_cyc,			0, CPUID_EBX, BIT(1)),
58*4882a593Smuzhiyun 	PT_CAP(ip_filtering,		0, CPUID_EBX, BIT(2)),
59*4882a593Smuzhiyun 	PT_CAP(mtc,			0, CPUID_EBX, BIT(3)),
60*4882a593Smuzhiyun 	PT_CAP(ptwrite,			0, CPUID_EBX, BIT(4)),
61*4882a593Smuzhiyun 	PT_CAP(power_event_trace,	0, CPUID_EBX, BIT(5)),
62*4882a593Smuzhiyun 	PT_CAP(topa_output,		0, CPUID_ECX, BIT(0)),
63*4882a593Smuzhiyun 	PT_CAP(topa_multiple_entries,	0, CPUID_ECX, BIT(1)),
64*4882a593Smuzhiyun 	PT_CAP(single_range_output,	0, CPUID_ECX, BIT(2)),
65*4882a593Smuzhiyun 	PT_CAP(output_subsys,		0, CPUID_ECX, BIT(3)),
66*4882a593Smuzhiyun 	PT_CAP(payloads_lip,		0, CPUID_ECX, BIT(31)),
67*4882a593Smuzhiyun 	PT_CAP(num_address_ranges,	1, CPUID_EAX, 0x7),
68*4882a593Smuzhiyun 	PT_CAP(mtc_periods,		1, CPUID_EAX, 0xffff0000),
69*4882a593Smuzhiyun 	PT_CAP(cycle_thresholds,	1, CPUID_EBX, 0xffff),
70*4882a593Smuzhiyun 	PT_CAP(psb_periods,		1, CPUID_EBX, 0xffff0000),
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun 
intel_pt_validate_cap(u32 * caps,enum pt_capabilities capability)73*4882a593Smuzhiyun u32 intel_pt_validate_cap(u32 *caps, enum pt_capabilities capability)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct pt_cap_desc *cd = &pt_caps[capability];
76*4882a593Smuzhiyun 	u32 c = caps[cd->leaf * PT_CPUID_REGS_NUM + cd->reg];
77*4882a593Smuzhiyun 	unsigned int shift = __ffs(cd->mask);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	return (c & cd->mask) >> shift;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_pt_validate_cap);
82*4882a593Smuzhiyun 
intel_pt_validate_hw_cap(enum pt_capabilities cap)83*4882a593Smuzhiyun u32 intel_pt_validate_hw_cap(enum pt_capabilities cap)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return intel_pt_validate_cap(pt_pmu.caps, cap);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_pt_validate_hw_cap);
88*4882a593Smuzhiyun 
pt_cap_show(struct device * cdev,struct device_attribute * attr,char * buf)89*4882a593Smuzhiyun static ssize_t pt_cap_show(struct device *cdev,
90*4882a593Smuzhiyun 			   struct device_attribute *attr,
91*4882a593Smuzhiyun 			   char *buf)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct dev_ext_attribute *ea =
94*4882a593Smuzhiyun 		container_of(attr, struct dev_ext_attribute, attr);
95*4882a593Smuzhiyun 	enum pt_capabilities cap = (long)ea->var;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	return snprintf(buf, PAGE_SIZE, "%x\n", intel_pt_validate_hw_cap(cap));
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static struct attribute_group pt_cap_group __ro_after_init = {
101*4882a593Smuzhiyun 	.name	= "caps",
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun PMU_FORMAT_ATTR(pt,		"config:0"	);
105*4882a593Smuzhiyun PMU_FORMAT_ATTR(cyc,		"config:1"	);
106*4882a593Smuzhiyun PMU_FORMAT_ATTR(pwr_evt,	"config:4"	);
107*4882a593Smuzhiyun PMU_FORMAT_ATTR(fup_on_ptw,	"config:5"	);
108*4882a593Smuzhiyun PMU_FORMAT_ATTR(mtc,		"config:9"	);
109*4882a593Smuzhiyun PMU_FORMAT_ATTR(tsc,		"config:10"	);
110*4882a593Smuzhiyun PMU_FORMAT_ATTR(noretcomp,	"config:11"	);
111*4882a593Smuzhiyun PMU_FORMAT_ATTR(ptw,		"config:12"	);
112*4882a593Smuzhiyun PMU_FORMAT_ATTR(branch,		"config:13"	);
113*4882a593Smuzhiyun PMU_FORMAT_ATTR(mtc_period,	"config:14-17"	);
114*4882a593Smuzhiyun PMU_FORMAT_ATTR(cyc_thresh,	"config:19-22"	);
115*4882a593Smuzhiyun PMU_FORMAT_ATTR(psb_period,	"config:24-27"	);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun static struct attribute *pt_formats_attr[] = {
118*4882a593Smuzhiyun 	&format_attr_pt.attr,
119*4882a593Smuzhiyun 	&format_attr_cyc.attr,
120*4882a593Smuzhiyun 	&format_attr_pwr_evt.attr,
121*4882a593Smuzhiyun 	&format_attr_fup_on_ptw.attr,
122*4882a593Smuzhiyun 	&format_attr_mtc.attr,
123*4882a593Smuzhiyun 	&format_attr_tsc.attr,
124*4882a593Smuzhiyun 	&format_attr_noretcomp.attr,
125*4882a593Smuzhiyun 	&format_attr_ptw.attr,
126*4882a593Smuzhiyun 	&format_attr_branch.attr,
127*4882a593Smuzhiyun 	&format_attr_mtc_period.attr,
128*4882a593Smuzhiyun 	&format_attr_cyc_thresh.attr,
129*4882a593Smuzhiyun 	&format_attr_psb_period.attr,
130*4882a593Smuzhiyun 	NULL,
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun static struct attribute_group pt_format_group = {
134*4882a593Smuzhiyun 	.name	= "format",
135*4882a593Smuzhiyun 	.attrs	= pt_formats_attr,
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun static ssize_t
pt_timing_attr_show(struct device * dev,struct device_attribute * attr,char * page)139*4882a593Smuzhiyun pt_timing_attr_show(struct device *dev, struct device_attribute *attr,
140*4882a593Smuzhiyun 		    char *page)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct perf_pmu_events_attr *pmu_attr =
143*4882a593Smuzhiyun 		container_of(attr, struct perf_pmu_events_attr, attr);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	switch (pmu_attr->id) {
146*4882a593Smuzhiyun 	case 0:
147*4882a593Smuzhiyun 		return sprintf(page, "%lu\n", pt_pmu.max_nonturbo_ratio);
148*4882a593Smuzhiyun 	case 1:
149*4882a593Smuzhiyun 		return sprintf(page, "%u:%u\n",
150*4882a593Smuzhiyun 			       pt_pmu.tsc_art_num,
151*4882a593Smuzhiyun 			       pt_pmu.tsc_art_den);
152*4882a593Smuzhiyun 	default:
153*4882a593Smuzhiyun 		break;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return -EINVAL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun PMU_EVENT_ATTR(max_nonturbo_ratio, timing_attr_max_nonturbo_ratio, 0,
160*4882a593Smuzhiyun 	       pt_timing_attr_show);
161*4882a593Smuzhiyun PMU_EVENT_ATTR(tsc_art_ratio, timing_attr_tsc_art_ratio, 1,
162*4882a593Smuzhiyun 	       pt_timing_attr_show);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun static struct attribute *pt_timing_attr[] = {
165*4882a593Smuzhiyun 	&timing_attr_max_nonturbo_ratio.attr.attr,
166*4882a593Smuzhiyun 	&timing_attr_tsc_art_ratio.attr.attr,
167*4882a593Smuzhiyun 	NULL,
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun static struct attribute_group pt_timing_group = {
171*4882a593Smuzhiyun 	.attrs	= pt_timing_attr,
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun static const struct attribute_group *pt_attr_groups[] = {
175*4882a593Smuzhiyun 	&pt_cap_group,
176*4882a593Smuzhiyun 	&pt_format_group,
177*4882a593Smuzhiyun 	&pt_timing_group,
178*4882a593Smuzhiyun 	NULL,
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
pt_pmu_hw_init(void)181*4882a593Smuzhiyun static int __init pt_pmu_hw_init(void)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct dev_ext_attribute *de_attrs;
184*4882a593Smuzhiyun 	struct attribute **attrs;
185*4882a593Smuzhiyun 	size_t size;
186*4882a593Smuzhiyun 	u64 reg;
187*4882a593Smuzhiyun 	int ret;
188*4882a593Smuzhiyun 	long i;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	rdmsrl(MSR_PLATFORM_INFO, reg);
191*4882a593Smuzhiyun 	pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * if available, read in TSC to core crystal clock ratio,
195*4882a593Smuzhiyun 	 * otherwise, zero for numerator stands for "not enumerated"
196*4882a593Smuzhiyun 	 * as per SDM
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) {
199*4882a593Smuzhiyun 		u32 eax, ebx, ecx, edx;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		pt_pmu.tsc_art_num = ebx;
204*4882a593Smuzhiyun 		pt_pmu.tsc_art_den = eax;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* model-specific quirks */
208*4882a593Smuzhiyun 	switch (boot_cpu_data.x86_model) {
209*4882a593Smuzhiyun 	case INTEL_FAM6_BROADWELL:
210*4882a593Smuzhiyun 	case INTEL_FAM6_BROADWELL_D:
211*4882a593Smuzhiyun 	case INTEL_FAM6_BROADWELL_G:
212*4882a593Smuzhiyun 	case INTEL_FAM6_BROADWELL_X:
213*4882a593Smuzhiyun 		/* not setting BRANCH_EN will #GP, erratum BDM106 */
214*4882a593Smuzhiyun 		pt_pmu.branch_en_always_on = true;
215*4882a593Smuzhiyun 		break;
216*4882a593Smuzhiyun 	default:
217*4882a593Smuzhiyun 		break;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (boot_cpu_has(X86_FEATURE_VMX)) {
221*4882a593Smuzhiyun 		/*
222*4882a593Smuzhiyun 		 * Intel SDM, 36.5 "Tracing post-VMXON" says that
223*4882a593Smuzhiyun 		 * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
224*4882a593Smuzhiyun 		 * post-VMXON.
225*4882a593Smuzhiyun 		 */
226*4882a593Smuzhiyun 		rdmsrl(MSR_IA32_VMX_MISC, reg);
227*4882a593Smuzhiyun 		if (reg & BIT(14))
228*4882a593Smuzhiyun 			pt_pmu.vmx = true;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	for (i = 0; i < PT_CPUID_LEAVES; i++) {
232*4882a593Smuzhiyun 		cpuid_count(20, i,
233*4882a593Smuzhiyun 			    &pt_pmu.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM],
234*4882a593Smuzhiyun 			    &pt_pmu.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM],
235*4882a593Smuzhiyun 			    &pt_pmu.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM],
236*4882a593Smuzhiyun 			    &pt_pmu.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM]);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	ret = -ENOMEM;
240*4882a593Smuzhiyun 	size = sizeof(struct attribute *) * (ARRAY_SIZE(pt_caps)+1);
241*4882a593Smuzhiyun 	attrs = kzalloc(size, GFP_KERNEL);
242*4882a593Smuzhiyun 	if (!attrs)
243*4882a593Smuzhiyun 		goto fail;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	size = sizeof(struct dev_ext_attribute) * (ARRAY_SIZE(pt_caps)+1);
246*4882a593Smuzhiyun 	de_attrs = kzalloc(size, GFP_KERNEL);
247*4882a593Smuzhiyun 	if (!de_attrs)
248*4882a593Smuzhiyun 		goto fail;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(pt_caps); i++) {
251*4882a593Smuzhiyun 		struct dev_ext_attribute *de_attr = de_attrs + i;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		de_attr->attr.attr.name = pt_caps[i].name;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		sysfs_attr_init(&de_attr->attr.attr);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		de_attr->attr.attr.mode		= S_IRUGO;
258*4882a593Smuzhiyun 		de_attr->attr.show		= pt_cap_show;
259*4882a593Smuzhiyun 		de_attr->var			= (void *)i;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		attrs[i] = &de_attr->attr.attr;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	pt_cap_group.attrs = attrs;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	return 0;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun fail:
269*4882a593Smuzhiyun 	kfree(attrs);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return ret;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #define RTIT_CTL_CYC_PSB (RTIT_CTL_CYCLEACC	| \
275*4882a593Smuzhiyun 			  RTIT_CTL_CYC_THRESH	| \
276*4882a593Smuzhiyun 			  RTIT_CTL_PSB_FREQ)
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define RTIT_CTL_MTC	(RTIT_CTL_MTC_EN	| \
279*4882a593Smuzhiyun 			 RTIT_CTL_MTC_RANGE)
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun #define RTIT_CTL_PTW	(RTIT_CTL_PTW_EN	| \
282*4882a593Smuzhiyun 			 RTIT_CTL_FUP_ON_PTW)
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun  * Bit 0 (TraceEn) in the attr.config is meaningless as the
286*4882a593Smuzhiyun  * corresponding bit in the RTIT_CTL can only be controlled
287*4882a593Smuzhiyun  * by the driver; therefore, repurpose it to mean: pass
288*4882a593Smuzhiyun  * through the bit that was previously assumed to be always
289*4882a593Smuzhiyun  * on for PT, thereby allowing the user to *not* set it if
290*4882a593Smuzhiyun  * they so wish. See also pt_event_valid() and pt_config().
291*4882a593Smuzhiyun  */
292*4882a593Smuzhiyun #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN	| \
295*4882a593Smuzhiyun 			RTIT_CTL_TSC_EN		| \
296*4882a593Smuzhiyun 			RTIT_CTL_DISRETC	| \
297*4882a593Smuzhiyun 			RTIT_CTL_BRANCH_EN	| \
298*4882a593Smuzhiyun 			RTIT_CTL_CYC_PSB	| \
299*4882a593Smuzhiyun 			RTIT_CTL_MTC		| \
300*4882a593Smuzhiyun 			RTIT_CTL_PWR_EVT_EN	| \
301*4882a593Smuzhiyun 			RTIT_CTL_FUP_ON_PTW	| \
302*4882a593Smuzhiyun 			RTIT_CTL_PTW_EN)
303*4882a593Smuzhiyun 
pt_event_valid(struct perf_event * event)304*4882a593Smuzhiyun static bool pt_event_valid(struct perf_event *event)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	u64 config = event->attr.config;
307*4882a593Smuzhiyun 	u64 allowed, requested;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if ((config & PT_CONFIG_MASK) != config)
310*4882a593Smuzhiyun 		return false;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (config & RTIT_CTL_CYC_PSB) {
313*4882a593Smuzhiyun 		if (!intel_pt_validate_hw_cap(PT_CAP_psb_cyc))
314*4882a593Smuzhiyun 			return false;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		allowed = intel_pt_validate_hw_cap(PT_CAP_psb_periods);
317*4882a593Smuzhiyun 		requested = (config & RTIT_CTL_PSB_FREQ) >>
318*4882a593Smuzhiyun 			RTIT_CTL_PSB_FREQ_OFFSET;
319*4882a593Smuzhiyun 		if (requested && (!(allowed & BIT(requested))))
320*4882a593Smuzhiyun 			return false;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		allowed = intel_pt_validate_hw_cap(PT_CAP_cycle_thresholds);
323*4882a593Smuzhiyun 		requested = (config & RTIT_CTL_CYC_THRESH) >>
324*4882a593Smuzhiyun 			RTIT_CTL_CYC_THRESH_OFFSET;
325*4882a593Smuzhiyun 		if (requested && (!(allowed & BIT(requested))))
326*4882a593Smuzhiyun 			return false;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (config & RTIT_CTL_MTC) {
330*4882a593Smuzhiyun 		/*
331*4882a593Smuzhiyun 		 * In the unlikely case that CPUID lists valid mtc periods,
332*4882a593Smuzhiyun 		 * but not the mtc capability, drop out here.
333*4882a593Smuzhiyun 		 *
334*4882a593Smuzhiyun 		 * Spec says that setting mtc period bits while mtc bit in
335*4882a593Smuzhiyun 		 * CPUID is 0 will #GP, so better safe than sorry.
336*4882a593Smuzhiyun 		 */
337*4882a593Smuzhiyun 		if (!intel_pt_validate_hw_cap(PT_CAP_mtc))
338*4882a593Smuzhiyun 			return false;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		allowed = intel_pt_validate_hw_cap(PT_CAP_mtc_periods);
341*4882a593Smuzhiyun 		if (!allowed)
342*4882a593Smuzhiyun 			return false;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		requested = (config & RTIT_CTL_MTC_RANGE) >>
345*4882a593Smuzhiyun 			RTIT_CTL_MTC_RANGE_OFFSET;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 		if (!(allowed & BIT(requested)))
348*4882a593Smuzhiyun 			return false;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (config & RTIT_CTL_PWR_EVT_EN &&
352*4882a593Smuzhiyun 	    !intel_pt_validate_hw_cap(PT_CAP_power_event_trace))
353*4882a593Smuzhiyun 		return false;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (config & RTIT_CTL_PTW) {
356*4882a593Smuzhiyun 		if (!intel_pt_validate_hw_cap(PT_CAP_ptwrite))
357*4882a593Smuzhiyun 			return false;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		/* FUPonPTW without PTW doesn't make sense */
360*4882a593Smuzhiyun 		if ((config & RTIT_CTL_FUP_ON_PTW) &&
361*4882a593Smuzhiyun 		    !(config & RTIT_CTL_PTW_EN))
362*4882a593Smuzhiyun 			return false;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/*
366*4882a593Smuzhiyun 	 * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
367*4882a593Smuzhiyun 	 * clears the assomption that BranchEn must always be enabled,
368*4882a593Smuzhiyun 	 * as was the case with the first implementation of PT.
369*4882a593Smuzhiyun 	 * If this bit is not set, the legacy behavior is preserved
370*4882a593Smuzhiyun 	 * for compatibility with the older userspace.
371*4882a593Smuzhiyun 	 *
372*4882a593Smuzhiyun 	 * Re-using bit 0 for this purpose is fine because it is never
373*4882a593Smuzhiyun 	 * directly set by the user; previous attempts at setting it in
374*4882a593Smuzhiyun 	 * the attr.config resulted in -EINVAL.
375*4882a593Smuzhiyun 	 */
376*4882a593Smuzhiyun 	if (config & RTIT_CTL_PASSTHROUGH) {
377*4882a593Smuzhiyun 		/*
378*4882a593Smuzhiyun 		 * Disallow not setting BRANCH_EN where BRANCH_EN is
379*4882a593Smuzhiyun 		 * always required.
380*4882a593Smuzhiyun 		 */
381*4882a593Smuzhiyun 		if (pt_pmu.branch_en_always_on &&
382*4882a593Smuzhiyun 		    !(config & RTIT_CTL_BRANCH_EN))
383*4882a593Smuzhiyun 			return false;
384*4882a593Smuzhiyun 	} else {
385*4882a593Smuzhiyun 		/*
386*4882a593Smuzhiyun 		 * Disallow BRANCH_EN without the PASSTHROUGH.
387*4882a593Smuzhiyun 		 */
388*4882a593Smuzhiyun 		if (config & RTIT_CTL_BRANCH_EN)
389*4882a593Smuzhiyun 			return false;
390*4882a593Smuzhiyun 	}
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return true;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun  * PT configuration helpers
397*4882a593Smuzhiyun  * These all are cpu affine and operate on a local PT
398*4882a593Smuzhiyun  */
399*4882a593Smuzhiyun 
pt_config_start(struct perf_event * event)400*4882a593Smuzhiyun static void pt_config_start(struct perf_event *event)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
403*4882a593Smuzhiyun 	u64 ctl = event->hw.config;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	ctl |= RTIT_CTL_TRACEEN;
406*4882a593Smuzhiyun 	if (READ_ONCE(pt->vmx_on))
407*4882a593Smuzhiyun 		perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL);
408*4882a593Smuzhiyun 	else
409*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_CTL, ctl);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	WRITE_ONCE(event->hw.config, ctl);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /* Address ranges and their corresponding msr configuration registers */
415*4882a593Smuzhiyun static const struct pt_address_range {
416*4882a593Smuzhiyun 	unsigned long	msr_a;
417*4882a593Smuzhiyun 	unsigned long	msr_b;
418*4882a593Smuzhiyun 	unsigned int	reg_off;
419*4882a593Smuzhiyun } pt_address_ranges[] = {
420*4882a593Smuzhiyun 	{
421*4882a593Smuzhiyun 		.msr_a	 = MSR_IA32_RTIT_ADDR0_A,
422*4882a593Smuzhiyun 		.msr_b	 = MSR_IA32_RTIT_ADDR0_B,
423*4882a593Smuzhiyun 		.reg_off = RTIT_CTL_ADDR0_OFFSET,
424*4882a593Smuzhiyun 	},
425*4882a593Smuzhiyun 	{
426*4882a593Smuzhiyun 		.msr_a	 = MSR_IA32_RTIT_ADDR1_A,
427*4882a593Smuzhiyun 		.msr_b	 = MSR_IA32_RTIT_ADDR1_B,
428*4882a593Smuzhiyun 		.reg_off = RTIT_CTL_ADDR1_OFFSET,
429*4882a593Smuzhiyun 	},
430*4882a593Smuzhiyun 	{
431*4882a593Smuzhiyun 		.msr_a	 = MSR_IA32_RTIT_ADDR2_A,
432*4882a593Smuzhiyun 		.msr_b	 = MSR_IA32_RTIT_ADDR2_B,
433*4882a593Smuzhiyun 		.reg_off = RTIT_CTL_ADDR2_OFFSET,
434*4882a593Smuzhiyun 	},
435*4882a593Smuzhiyun 	{
436*4882a593Smuzhiyun 		.msr_a	 = MSR_IA32_RTIT_ADDR3_A,
437*4882a593Smuzhiyun 		.msr_b	 = MSR_IA32_RTIT_ADDR3_B,
438*4882a593Smuzhiyun 		.reg_off = RTIT_CTL_ADDR3_OFFSET,
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun };
441*4882a593Smuzhiyun 
pt_config_filters(struct perf_event * event)442*4882a593Smuzhiyun static u64 pt_config_filters(struct perf_event *event)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct pt_filters *filters = event->hw.addr_filters;
445*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
446*4882a593Smuzhiyun 	unsigned int range = 0;
447*4882a593Smuzhiyun 	u64 rtit_ctl = 0;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (!filters)
450*4882a593Smuzhiyun 		return 0;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	perf_event_addr_filters_sync(event);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	for (range = 0; range < filters->nr_filters; range++) {
455*4882a593Smuzhiyun 		struct pt_filter *filter = &filters->filter[range];
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		/*
458*4882a593Smuzhiyun 		 * Note, if the range has zero start/end addresses due
459*4882a593Smuzhiyun 		 * to its dynamic object not being loaded yet, we just
460*4882a593Smuzhiyun 		 * go ahead and program zeroed range, which will simply
461*4882a593Smuzhiyun 		 * produce no data. Note^2: if executable code at 0x0
462*4882a593Smuzhiyun 		 * is a concern, we can set up an "invalid" configuration
463*4882a593Smuzhiyun 		 * such as msr_b < msr_a.
464*4882a593Smuzhiyun 		 */
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		/* avoid redundant msr writes */
467*4882a593Smuzhiyun 		if (pt->filters.filter[range].msr_a != filter->msr_a) {
468*4882a593Smuzhiyun 			wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a);
469*4882a593Smuzhiyun 			pt->filters.filter[range].msr_a = filter->msr_a;
470*4882a593Smuzhiyun 		}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		if (pt->filters.filter[range].msr_b != filter->msr_b) {
473*4882a593Smuzhiyun 			wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b);
474*4882a593Smuzhiyun 			pt->filters.filter[range].msr_b = filter->msr_b;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		rtit_ctl |= (u64)filter->config << pt_address_ranges[range].reg_off;
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	return rtit_ctl;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
pt_config(struct perf_event * event)483*4882a593Smuzhiyun static void pt_config(struct perf_event *event)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
486*4882a593Smuzhiyun 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
487*4882a593Smuzhiyun 	u64 reg;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* First round: clear STATUS, in particular the PSB byte counter. */
490*4882a593Smuzhiyun 	if (!event->hw.config) {
491*4882a593Smuzhiyun 		perf_event_itrace_started(event);
492*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_STATUS, 0);
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	reg = pt_config_filters(event);
496*4882a593Smuzhiyun 	reg |= RTIT_CTL_TRACEEN;
497*4882a593Smuzhiyun 	if (!buf->single)
498*4882a593Smuzhiyun 		reg |= RTIT_CTL_TOPA;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * Previously, we had BRANCH_EN on by default, but now that PT has
502*4882a593Smuzhiyun 	 * grown features outside of branch tracing, it is useful to allow
503*4882a593Smuzhiyun 	 * the user to disable it. Setting bit 0 in the event's attr.config
504*4882a593Smuzhiyun 	 * allows BRANCH_EN to pass through instead of being always on. See
505*4882a593Smuzhiyun 	 * also the comment in pt_event_valid().
506*4882a593Smuzhiyun 	 */
507*4882a593Smuzhiyun 	if (event->attr.config & BIT(0)) {
508*4882a593Smuzhiyun 		reg |= event->attr.config & RTIT_CTL_BRANCH_EN;
509*4882a593Smuzhiyun 	} else {
510*4882a593Smuzhiyun 		reg |= RTIT_CTL_BRANCH_EN;
511*4882a593Smuzhiyun 	}
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if (!event->attr.exclude_kernel)
514*4882a593Smuzhiyun 		reg |= RTIT_CTL_OS;
515*4882a593Smuzhiyun 	if (!event->attr.exclude_user)
516*4882a593Smuzhiyun 		reg |= RTIT_CTL_USR;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	reg |= (event->attr.config & PT_CONFIG_MASK);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	event->hw.config = reg;
521*4882a593Smuzhiyun 	pt_config_start(event);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
pt_config_stop(struct perf_event * event)524*4882a593Smuzhiyun static void pt_config_stop(struct perf_event *event)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
527*4882a593Smuzhiyun 	u64 ctl = READ_ONCE(event->hw.config);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	/* may be already stopped by a PMI */
530*4882a593Smuzhiyun 	if (!(ctl & RTIT_CTL_TRACEEN))
531*4882a593Smuzhiyun 		return;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	ctl &= ~RTIT_CTL_TRACEEN;
534*4882a593Smuzhiyun 	if (!READ_ONCE(pt->vmx_on))
535*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_CTL, ctl);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	WRITE_ONCE(event->hw.config, ctl);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/*
540*4882a593Smuzhiyun 	 * A wrmsr that disables trace generation serializes other PT
541*4882a593Smuzhiyun 	 * registers and causes all data packets to be written to memory,
542*4882a593Smuzhiyun 	 * but a fence is required for the data to become globally visible.
543*4882a593Smuzhiyun 	 *
544*4882a593Smuzhiyun 	 * The below WMB, separating data store and aux_head store matches
545*4882a593Smuzhiyun 	 * the consumer's RMB that separates aux_head load and data load.
546*4882a593Smuzhiyun 	 */
547*4882a593Smuzhiyun 	wmb();
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun /**
551*4882a593Smuzhiyun  * struct topa - ToPA metadata
552*4882a593Smuzhiyun  * @list:	linkage to struct pt_buffer's list of tables
553*4882a593Smuzhiyun  * @offset:	offset of the first entry in this table in the buffer
554*4882a593Smuzhiyun  * @size:	total size of all entries in this table
555*4882a593Smuzhiyun  * @last:	index of the last initialized entry in this table
556*4882a593Smuzhiyun  * @z_count:	how many times the first entry repeats
557*4882a593Smuzhiyun  */
558*4882a593Smuzhiyun struct topa {
559*4882a593Smuzhiyun 	struct list_head	list;
560*4882a593Smuzhiyun 	u64			offset;
561*4882a593Smuzhiyun 	size_t			size;
562*4882a593Smuzhiyun 	int			last;
563*4882a593Smuzhiyun 	unsigned int		z_count;
564*4882a593Smuzhiyun };
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun  * Keep ToPA table-related metadata on the same page as the actual table,
568*4882a593Smuzhiyun  * taking up a few words from the top
569*4882a593Smuzhiyun  */
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun #define TENTS_PER_PAGE	\
572*4882a593Smuzhiyun 	((PAGE_SIZE - sizeof(struct topa)) / sizeof(struct topa_entry))
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun  * struct topa_page - page-sized ToPA table with metadata at the top
576*4882a593Smuzhiyun  * @table:	actual ToPA table entries, as understood by PT hardware
577*4882a593Smuzhiyun  * @topa:	metadata
578*4882a593Smuzhiyun  */
579*4882a593Smuzhiyun struct topa_page {
580*4882a593Smuzhiyun 	struct topa_entry	table[TENTS_PER_PAGE];
581*4882a593Smuzhiyun 	struct topa		topa;
582*4882a593Smuzhiyun };
583*4882a593Smuzhiyun 
topa_to_page(struct topa * topa)584*4882a593Smuzhiyun static inline struct topa_page *topa_to_page(struct topa *topa)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	return container_of(topa, struct topa_page, topa);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
topa_entry_to_page(struct topa_entry * te)589*4882a593Smuzhiyun static inline struct topa_page *topa_entry_to_page(struct topa_entry *te)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	return (struct topa_page *)((unsigned long)te & PAGE_MASK);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
topa_pfn(struct topa * topa)594*4882a593Smuzhiyun static inline phys_addr_t topa_pfn(struct topa *topa)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	return PFN_DOWN(virt_to_phys(topa_to_page(topa)));
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /* make -1 stand for the last table entry */
600*4882a593Smuzhiyun #define TOPA_ENTRY(t, i)				\
601*4882a593Smuzhiyun 	((i) == -1					\
602*4882a593Smuzhiyun 		? &topa_to_page(t)->table[(t)->last]	\
603*4882a593Smuzhiyun 		: &topa_to_page(t)->table[(i)])
604*4882a593Smuzhiyun #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size))
605*4882a593Smuzhiyun #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size)
606*4882a593Smuzhiyun 
pt_config_buffer(struct pt_buffer * buf)607*4882a593Smuzhiyun static void pt_config_buffer(struct pt_buffer *buf)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
610*4882a593Smuzhiyun 	u64 reg, mask;
611*4882a593Smuzhiyun 	void *base;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (buf->single) {
614*4882a593Smuzhiyun 		base = buf->data_pages[0];
615*4882a593Smuzhiyun 		mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7;
616*4882a593Smuzhiyun 	} else {
617*4882a593Smuzhiyun 		base = topa_to_page(buf->cur)->table;
618*4882a593Smuzhiyun 		mask = (u64)buf->cur_idx;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	reg = virt_to_phys(base);
622*4882a593Smuzhiyun 	if (pt->output_base != reg) {
623*4882a593Smuzhiyun 		pt->output_base = reg;
624*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg);
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32);
628*4882a593Smuzhiyun 	if (pt->output_mask != reg) {
629*4882a593Smuzhiyun 		pt->output_mask = reg;
630*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg);
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun /**
635*4882a593Smuzhiyun  * topa_alloc() - allocate page-sized ToPA table
636*4882a593Smuzhiyun  * @cpu:	CPU on which to allocate.
637*4882a593Smuzhiyun  * @gfp:	Allocation flags.
638*4882a593Smuzhiyun  *
639*4882a593Smuzhiyun  * Return:	On success, return the pointer to ToPA table page.
640*4882a593Smuzhiyun  */
topa_alloc(int cpu,gfp_t gfp)641*4882a593Smuzhiyun static struct topa *topa_alloc(int cpu, gfp_t gfp)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	int node = cpu_to_node(cpu);
644*4882a593Smuzhiyun 	struct topa_page *tp;
645*4882a593Smuzhiyun 	struct page *p;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	p = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
648*4882a593Smuzhiyun 	if (!p)
649*4882a593Smuzhiyun 		return NULL;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	tp = page_address(p);
652*4882a593Smuzhiyun 	tp->topa.last = 0;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/*
655*4882a593Smuzhiyun 	 * In case of singe-entry ToPA, always put the self-referencing END
656*4882a593Smuzhiyun 	 * link as the 2nd entry in the table
657*4882a593Smuzhiyun 	 */
658*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
659*4882a593Smuzhiyun 		TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
660*4882a593Smuzhiyun 		TOPA_ENTRY(&tp->topa, 1)->end = 1;
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	return &tp->topa;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /**
667*4882a593Smuzhiyun  * topa_free() - free a page-sized ToPA table
668*4882a593Smuzhiyun  * @topa:	Table to deallocate.
669*4882a593Smuzhiyun  */
topa_free(struct topa * topa)670*4882a593Smuzhiyun static void topa_free(struct topa *topa)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	free_page((unsigned long)topa);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun /**
676*4882a593Smuzhiyun  * topa_insert_table() - insert a ToPA table into a buffer
677*4882a593Smuzhiyun  * @buf:	 PT buffer that's being extended.
678*4882a593Smuzhiyun  * @topa:	 New topa table to be inserted.
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * If it's the first table in this buffer, set up buffer's pointers
681*4882a593Smuzhiyun  * accordingly; otherwise, add a END=1 link entry to @topa to the current
682*4882a593Smuzhiyun  * "last" table and adjust the last table pointer to @topa.
683*4882a593Smuzhiyun  */
topa_insert_table(struct pt_buffer * buf,struct topa * topa)684*4882a593Smuzhiyun static void topa_insert_table(struct pt_buffer *buf, struct topa *topa)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	struct topa *last = buf->last;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	list_add_tail(&topa->list, &buf->tables);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (!buf->first) {
691*4882a593Smuzhiyun 		buf->first = buf->last = buf->cur = topa;
692*4882a593Smuzhiyun 		return;
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	topa->offset = last->offset + last->size;
696*4882a593Smuzhiyun 	buf->last = topa;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
699*4882a593Smuzhiyun 		return;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	BUG_ON(last->last != TENTS_PER_PAGE - 1);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	TOPA_ENTRY(last, -1)->base = topa_pfn(topa);
704*4882a593Smuzhiyun 	TOPA_ENTRY(last, -1)->end = 1;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun /**
708*4882a593Smuzhiyun  * topa_table_full() - check if a ToPA table is filled up
709*4882a593Smuzhiyun  * @topa:	ToPA table.
710*4882a593Smuzhiyun  */
topa_table_full(struct topa * topa)711*4882a593Smuzhiyun static bool topa_table_full(struct topa *topa)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun 	/* single-entry ToPA is a special case */
714*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
715*4882a593Smuzhiyun 		return !!topa->last;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	return topa->last == TENTS_PER_PAGE - 1;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun /**
721*4882a593Smuzhiyun  * topa_insert_pages() - create a list of ToPA tables
722*4882a593Smuzhiyun  * @buf:	PT buffer being initialized.
723*4882a593Smuzhiyun  * @gfp:	Allocation flags.
724*4882a593Smuzhiyun  *
725*4882a593Smuzhiyun  * This initializes a list of ToPA tables with entries from
726*4882a593Smuzhiyun  * the data_pages provided by rb_alloc_aux().
727*4882a593Smuzhiyun  *
728*4882a593Smuzhiyun  * Return:	0 on success or error code.
729*4882a593Smuzhiyun  */
topa_insert_pages(struct pt_buffer * buf,int cpu,gfp_t gfp)730*4882a593Smuzhiyun static int topa_insert_pages(struct pt_buffer *buf, int cpu, gfp_t gfp)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	struct topa *topa = buf->last;
733*4882a593Smuzhiyun 	int order = 0;
734*4882a593Smuzhiyun 	struct page *p;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	p = virt_to_page(buf->data_pages[buf->nr_pages]);
737*4882a593Smuzhiyun 	if (PagePrivate(p))
738*4882a593Smuzhiyun 		order = page_private(p);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (topa_table_full(topa)) {
741*4882a593Smuzhiyun 		topa = topa_alloc(cpu, gfp);
742*4882a593Smuzhiyun 		if (!topa)
743*4882a593Smuzhiyun 			return -ENOMEM;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		topa_insert_table(buf, topa);
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	if (topa->z_count == topa->last - 1) {
749*4882a593Smuzhiyun 		if (order == TOPA_ENTRY(topa, topa->last - 1)->size)
750*4882a593Smuzhiyun 			topa->z_count++;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	TOPA_ENTRY(topa, -1)->base = page_to_phys(p) >> TOPA_SHIFT;
754*4882a593Smuzhiyun 	TOPA_ENTRY(topa, -1)->size = order;
755*4882a593Smuzhiyun 	if (!buf->snapshot &&
756*4882a593Smuzhiyun 	    !intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
757*4882a593Smuzhiyun 		TOPA_ENTRY(topa, -1)->intr = 1;
758*4882a593Smuzhiyun 		TOPA_ENTRY(topa, -1)->stop = 1;
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	topa->last++;
762*4882a593Smuzhiyun 	topa->size += sizes(order);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	buf->nr_pages += 1ul << order;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	return 0;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun  * pt_topa_dump() - print ToPA tables and their entries
771*4882a593Smuzhiyun  * @buf:	PT buffer.
772*4882a593Smuzhiyun  */
pt_topa_dump(struct pt_buffer * buf)773*4882a593Smuzhiyun static void pt_topa_dump(struct pt_buffer *buf)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct topa *topa;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	list_for_each_entry(topa, &buf->tables, list) {
778*4882a593Smuzhiyun 		struct topa_page *tp = topa_to_page(topa);
779*4882a593Smuzhiyun 		int i;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		pr_debug("# table @%p, off %llx size %zx\n", tp->table,
782*4882a593Smuzhiyun 			 topa->offset, topa->size);
783*4882a593Smuzhiyun 		for (i = 0; i < TENTS_PER_PAGE; i++) {
784*4882a593Smuzhiyun 			pr_debug("# entry @%p (%lx sz %u %c%c%c) raw=%16llx\n",
785*4882a593Smuzhiyun 				 &tp->table[i],
786*4882a593Smuzhiyun 				 (unsigned long)tp->table[i].base << TOPA_SHIFT,
787*4882a593Smuzhiyun 				 sizes(tp->table[i].size),
788*4882a593Smuzhiyun 				 tp->table[i].end ?  'E' : ' ',
789*4882a593Smuzhiyun 				 tp->table[i].intr ? 'I' : ' ',
790*4882a593Smuzhiyun 				 tp->table[i].stop ? 'S' : ' ',
791*4882a593Smuzhiyun 				 *(u64 *)&tp->table[i]);
792*4882a593Smuzhiyun 			if ((intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
793*4882a593Smuzhiyun 			     tp->table[i].stop) ||
794*4882a593Smuzhiyun 			    tp->table[i].end)
795*4882a593Smuzhiyun 				break;
796*4882a593Smuzhiyun 			if (!i && topa->z_count)
797*4882a593Smuzhiyun 				i += topa->z_count;
798*4882a593Smuzhiyun 		}
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun /**
803*4882a593Smuzhiyun  * pt_buffer_advance() - advance to the next output region
804*4882a593Smuzhiyun  * @buf:	PT buffer.
805*4882a593Smuzhiyun  *
806*4882a593Smuzhiyun  * Advance the current pointers in the buffer to the next ToPA entry.
807*4882a593Smuzhiyun  */
pt_buffer_advance(struct pt_buffer * buf)808*4882a593Smuzhiyun static void pt_buffer_advance(struct pt_buffer *buf)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	buf->output_off = 0;
811*4882a593Smuzhiyun 	buf->cur_idx++;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	if (buf->cur_idx == buf->cur->last) {
814*4882a593Smuzhiyun 		if (buf->cur == buf->last)
815*4882a593Smuzhiyun 			buf->cur = buf->first;
816*4882a593Smuzhiyun 		else
817*4882a593Smuzhiyun 			buf->cur = list_entry(buf->cur->list.next, struct topa,
818*4882a593Smuzhiyun 					      list);
819*4882a593Smuzhiyun 		buf->cur_idx = 0;
820*4882a593Smuzhiyun 	}
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /**
824*4882a593Smuzhiyun  * pt_update_head() - calculate current offsets and sizes
825*4882a593Smuzhiyun  * @pt:		Per-cpu pt context.
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  * Update buffer's current write pointer position and data size.
828*4882a593Smuzhiyun  */
pt_update_head(struct pt * pt)829*4882a593Smuzhiyun static void pt_update_head(struct pt *pt)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
832*4882a593Smuzhiyun 	u64 topa_idx, base, old;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (buf->single) {
835*4882a593Smuzhiyun 		local_set(&buf->data_size, buf->output_off);
836*4882a593Smuzhiyun 		return;
837*4882a593Smuzhiyun 	}
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/* offset of the first region in this table from the beginning of buf */
840*4882a593Smuzhiyun 	base = buf->cur->offset + buf->output_off;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	/* offset of the current output region within this table */
843*4882a593Smuzhiyun 	for (topa_idx = 0; topa_idx < buf->cur_idx; topa_idx++)
844*4882a593Smuzhiyun 		base += TOPA_ENTRY_SIZE(buf->cur, topa_idx);
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	if (buf->snapshot) {
847*4882a593Smuzhiyun 		local_set(&buf->data_size, base);
848*4882a593Smuzhiyun 	} else {
849*4882a593Smuzhiyun 		old = (local64_xchg(&buf->head, base) &
850*4882a593Smuzhiyun 		       ((buf->nr_pages << PAGE_SHIFT) - 1));
851*4882a593Smuzhiyun 		if (base < old)
852*4882a593Smuzhiyun 			base += buf->nr_pages << PAGE_SHIFT;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		local_add(base - old, &buf->data_size);
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun /**
859*4882a593Smuzhiyun  * pt_buffer_region() - obtain current output region's address
860*4882a593Smuzhiyun  * @buf:	PT buffer.
861*4882a593Smuzhiyun  */
pt_buffer_region(struct pt_buffer * buf)862*4882a593Smuzhiyun static void *pt_buffer_region(struct pt_buffer *buf)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT);
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun /**
868*4882a593Smuzhiyun  * pt_buffer_region_size() - obtain current output region's size
869*4882a593Smuzhiyun  * @buf:	PT buffer.
870*4882a593Smuzhiyun  */
pt_buffer_region_size(struct pt_buffer * buf)871*4882a593Smuzhiyun static size_t pt_buffer_region_size(struct pt_buffer *buf)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	return TOPA_ENTRY_SIZE(buf->cur, buf->cur_idx);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun /**
877*4882a593Smuzhiyun  * pt_handle_status() - take care of possible status conditions
878*4882a593Smuzhiyun  * @pt:		Per-cpu pt context.
879*4882a593Smuzhiyun  */
pt_handle_status(struct pt * pt)880*4882a593Smuzhiyun static void pt_handle_status(struct pt *pt)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
883*4882a593Smuzhiyun 	int advance = 0;
884*4882a593Smuzhiyun 	u64 status;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	rdmsrl(MSR_IA32_RTIT_STATUS, status);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	if (status & RTIT_STATUS_ERROR) {
889*4882a593Smuzhiyun 		pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n");
890*4882a593Smuzhiyun 		pt_topa_dump(buf);
891*4882a593Smuzhiyun 		status &= ~RTIT_STATUS_ERROR;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	if (status & RTIT_STATUS_STOPPED) {
895*4882a593Smuzhiyun 		status &= ~RTIT_STATUS_STOPPED;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		/*
898*4882a593Smuzhiyun 		 * On systems that only do single-entry ToPA, hitting STOP
899*4882a593Smuzhiyun 		 * means we are already losing data; need to let the decoder
900*4882a593Smuzhiyun 		 * know.
901*4882a593Smuzhiyun 		 */
902*4882a593Smuzhiyun 		if (!buf->single &&
903*4882a593Smuzhiyun 		    (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
904*4882a593Smuzhiyun 		     buf->output_off == pt_buffer_region_size(buf))) {
905*4882a593Smuzhiyun 			perf_aux_output_flag(&pt->handle,
906*4882a593Smuzhiyun 			                     PERF_AUX_FLAG_TRUNCATED);
907*4882a593Smuzhiyun 			advance++;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	/*
912*4882a593Smuzhiyun 	 * Also on single-entry ToPA implementations, interrupt will come
913*4882a593Smuzhiyun 	 * before the output reaches its output region's boundary.
914*4882a593Smuzhiyun 	 */
915*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) &&
916*4882a593Smuzhiyun 	    !buf->snapshot &&
917*4882a593Smuzhiyun 	    pt_buffer_region_size(buf) - buf->output_off <= TOPA_PMI_MARGIN) {
918*4882a593Smuzhiyun 		void *head = pt_buffer_region(buf);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 		/* everything within this margin needs to be zeroed out */
921*4882a593Smuzhiyun 		memset(head + buf->output_off, 0,
922*4882a593Smuzhiyun 		       pt_buffer_region_size(buf) -
923*4882a593Smuzhiyun 		       buf->output_off);
924*4882a593Smuzhiyun 		advance++;
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (advance)
928*4882a593Smuzhiyun 		pt_buffer_advance(buf);
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	wrmsrl(MSR_IA32_RTIT_STATUS, status);
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun /**
934*4882a593Smuzhiyun  * pt_read_offset() - translate registers into buffer pointers
935*4882a593Smuzhiyun  * @buf:	PT buffer.
936*4882a593Smuzhiyun  *
937*4882a593Smuzhiyun  * Set buffer's output pointers from MSR values.
938*4882a593Smuzhiyun  */
pt_read_offset(struct pt_buffer * buf)939*4882a593Smuzhiyun static void pt_read_offset(struct pt_buffer *buf)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
942*4882a593Smuzhiyun 	struct topa_page *tp;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (!buf->single) {
945*4882a593Smuzhiyun 		rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base);
946*4882a593Smuzhiyun 		tp = phys_to_virt(pt->output_base);
947*4882a593Smuzhiyun 		buf->cur = &tp->topa;
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask);
951*4882a593Smuzhiyun 	/* offset within current output region */
952*4882a593Smuzhiyun 	buf->output_off = pt->output_mask >> 32;
953*4882a593Smuzhiyun 	/* index of current output region within this table */
954*4882a593Smuzhiyun 	if (!buf->single)
955*4882a593Smuzhiyun 		buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun static struct topa_entry *
pt_topa_entry_for_page(struct pt_buffer * buf,unsigned int pg)959*4882a593Smuzhiyun pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	struct topa_page *tp;
962*4882a593Smuzhiyun 	struct topa *topa;
963*4882a593Smuzhiyun 	unsigned int idx, cur_pg = 0, z_pg = 0, start_idx = 0;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	/*
966*4882a593Smuzhiyun 	 * Indicates a bug in the caller.
967*4882a593Smuzhiyun 	 */
968*4882a593Smuzhiyun 	if (WARN_ON_ONCE(pg >= buf->nr_pages))
969*4882a593Smuzhiyun 		return NULL;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/*
972*4882a593Smuzhiyun 	 * First, find the ToPA table where @pg fits. With high
973*4882a593Smuzhiyun 	 * order allocations, there shouldn't be many of these.
974*4882a593Smuzhiyun 	 */
975*4882a593Smuzhiyun 	list_for_each_entry(topa, &buf->tables, list) {
976*4882a593Smuzhiyun 		if (topa->offset + topa->size > pg << PAGE_SHIFT)
977*4882a593Smuzhiyun 			goto found;
978*4882a593Smuzhiyun 	}
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/*
981*4882a593Smuzhiyun 	 * Hitting this means we have a problem in the ToPA
982*4882a593Smuzhiyun 	 * allocation code.
983*4882a593Smuzhiyun 	 */
984*4882a593Smuzhiyun 	WARN_ON_ONCE(1);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	return NULL;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun found:
989*4882a593Smuzhiyun 	/*
990*4882a593Smuzhiyun 	 * Indicates a problem in the ToPA allocation code.
991*4882a593Smuzhiyun 	 */
992*4882a593Smuzhiyun 	if (WARN_ON_ONCE(topa->last == -1))
993*4882a593Smuzhiyun 		return NULL;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	tp = topa_to_page(topa);
996*4882a593Smuzhiyun 	cur_pg = PFN_DOWN(topa->offset);
997*4882a593Smuzhiyun 	if (topa->z_count) {
998*4882a593Smuzhiyun 		z_pg = TOPA_ENTRY_PAGES(topa, 0) * (topa->z_count + 1);
999*4882a593Smuzhiyun 		start_idx = topa->z_count + 1;
1000*4882a593Smuzhiyun 	}
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/*
1003*4882a593Smuzhiyun 	 * Multiple entries at the beginning of the table have the same size,
1004*4882a593Smuzhiyun 	 * ideally all of them; if @pg falls there, the search is done.
1005*4882a593Smuzhiyun 	 */
1006*4882a593Smuzhiyun 	if (pg >= cur_pg && pg < cur_pg + z_pg) {
1007*4882a593Smuzhiyun 		idx = (pg - cur_pg) / TOPA_ENTRY_PAGES(topa, 0);
1008*4882a593Smuzhiyun 		return &tp->table[idx];
1009*4882a593Smuzhiyun 	}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/*
1012*4882a593Smuzhiyun 	 * Otherwise, slow path: iterate through the remaining entries.
1013*4882a593Smuzhiyun 	 */
1014*4882a593Smuzhiyun 	for (idx = start_idx, cur_pg += z_pg; idx < topa->last; idx++) {
1015*4882a593Smuzhiyun 		if (cur_pg + TOPA_ENTRY_PAGES(topa, idx) > pg)
1016*4882a593Smuzhiyun 			return &tp->table[idx];
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 		cur_pg += TOPA_ENTRY_PAGES(topa, idx);
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/*
1022*4882a593Smuzhiyun 	 * Means we couldn't find a ToPA entry in the table that does match.
1023*4882a593Smuzhiyun 	 */
1024*4882a593Smuzhiyun 	WARN_ON_ONCE(1);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	return NULL;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun static struct topa_entry *
pt_topa_prev_entry(struct pt_buffer * buf,struct topa_entry * te)1030*4882a593Smuzhiyun pt_topa_prev_entry(struct pt_buffer *buf, struct topa_entry *te)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	unsigned long table = (unsigned long)te & ~(PAGE_SIZE - 1);
1033*4882a593Smuzhiyun 	struct topa_page *tp;
1034*4882a593Smuzhiyun 	struct topa *topa;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	tp = (struct topa_page *)table;
1037*4882a593Smuzhiyun 	if (tp->table != te)
1038*4882a593Smuzhiyun 		return --te;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	topa = &tp->topa;
1041*4882a593Smuzhiyun 	if (topa == buf->first)
1042*4882a593Smuzhiyun 		topa = buf->last;
1043*4882a593Smuzhiyun 	else
1044*4882a593Smuzhiyun 		topa = list_prev_entry(topa, list);
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	tp = topa_to_page(topa);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	return &tp->table[topa->last - 1];
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun /**
1052*4882a593Smuzhiyun  * pt_buffer_reset_markers() - place interrupt and stop bits in the buffer
1053*4882a593Smuzhiyun  * @buf:	PT buffer.
1054*4882a593Smuzhiyun  * @handle:	Current output handle.
1055*4882a593Smuzhiyun  *
1056*4882a593Smuzhiyun  * Place INT and STOP marks to prevent overwriting old data that the consumer
1057*4882a593Smuzhiyun  * hasn't yet collected and waking up the consumer after a certain fraction of
1058*4882a593Smuzhiyun  * the buffer has filled up. Only needed and sensible for non-snapshot counters.
1059*4882a593Smuzhiyun  *
1060*4882a593Smuzhiyun  * This obviously relies on buf::head to figure out buffer markers, so it has
1061*4882a593Smuzhiyun  * to be called after pt_buffer_reset_offsets() and before the hardware tracing
1062*4882a593Smuzhiyun  * is enabled.
1063*4882a593Smuzhiyun  */
pt_buffer_reset_markers(struct pt_buffer * buf,struct perf_output_handle * handle)1064*4882a593Smuzhiyun static int pt_buffer_reset_markers(struct pt_buffer *buf,
1065*4882a593Smuzhiyun 				   struct perf_output_handle *handle)
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	unsigned long head = local64_read(&buf->head);
1069*4882a593Smuzhiyun 	unsigned long idx, npages, wakeup;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	if (buf->single)
1072*4882a593Smuzhiyun 		return 0;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/* can't stop in the middle of an output region */
1075*4882a593Smuzhiyun 	if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) {
1076*4882a593Smuzhiyun 		perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
1077*4882a593Smuzhiyun 		return -EINVAL;
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/* single entry ToPA is handled by marking all regions STOP=1 INT=1 */
1082*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1083*4882a593Smuzhiyun 		return 0;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/* clear STOP and INT from current entry */
1086*4882a593Smuzhiyun 	if (buf->stop_te) {
1087*4882a593Smuzhiyun 		buf->stop_te->stop = 0;
1088*4882a593Smuzhiyun 		buf->stop_te->intr = 0;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	if (buf->intr_te)
1092*4882a593Smuzhiyun 		buf->intr_te->intr = 0;
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	/* how many pages till the STOP marker */
1095*4882a593Smuzhiyun 	npages = handle->size >> PAGE_SHIFT;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	/* if it's on a page boundary, fill up one more page */
1098*4882a593Smuzhiyun 	if (!offset_in_page(head + handle->size + 1))
1099*4882a593Smuzhiyun 		npages++;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	idx = (head >> PAGE_SHIFT) + npages;
1102*4882a593Smuzhiyun 	idx &= buf->nr_pages - 1;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	if (idx != buf->stop_pos) {
1105*4882a593Smuzhiyun 		buf->stop_pos = idx;
1106*4882a593Smuzhiyun 		buf->stop_te = pt_topa_entry_for_page(buf, idx);
1107*4882a593Smuzhiyun 		buf->stop_te = pt_topa_prev_entry(buf, buf->stop_te);
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	wakeup = handle->wakeup >> PAGE_SHIFT;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	/* in the worst case, wake up the consumer one page before hard stop */
1113*4882a593Smuzhiyun 	idx = (head >> PAGE_SHIFT) + npages - 1;
1114*4882a593Smuzhiyun 	if (idx > wakeup)
1115*4882a593Smuzhiyun 		idx = wakeup;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	idx &= buf->nr_pages - 1;
1118*4882a593Smuzhiyun 	if (idx != buf->intr_pos) {
1119*4882a593Smuzhiyun 		buf->intr_pos = idx;
1120*4882a593Smuzhiyun 		buf->intr_te = pt_topa_entry_for_page(buf, idx);
1121*4882a593Smuzhiyun 		buf->intr_te = pt_topa_prev_entry(buf, buf->intr_te);
1122*4882a593Smuzhiyun 	}
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	buf->stop_te->stop = 1;
1125*4882a593Smuzhiyun 	buf->stop_te->intr = 1;
1126*4882a593Smuzhiyun 	buf->intr_te->intr = 1;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	return 0;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /**
1132*4882a593Smuzhiyun  * pt_buffer_reset_offsets() - adjust buffer's write pointers from aux_head
1133*4882a593Smuzhiyun  * @buf:	PT buffer.
1134*4882a593Smuzhiyun  * @head:	Write pointer (aux_head) from AUX buffer.
1135*4882a593Smuzhiyun  *
1136*4882a593Smuzhiyun  * Find the ToPA table and entry corresponding to given @head and set buffer's
1137*4882a593Smuzhiyun  * "current" pointers accordingly. This is done after we have obtained the
1138*4882a593Smuzhiyun  * current aux_head position from a successful call to perf_aux_output_begin()
1139*4882a593Smuzhiyun  * to make sure the hardware is writing to the right place.
1140*4882a593Smuzhiyun  *
1141*4882a593Smuzhiyun  * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
1142*4882a593Smuzhiyun  * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
1143*4882a593Smuzhiyun  * which are used to determine INT and STOP markers' locations by a subsequent
1144*4882a593Smuzhiyun  * call to pt_buffer_reset_markers().
1145*4882a593Smuzhiyun  */
pt_buffer_reset_offsets(struct pt_buffer * buf,unsigned long head)1146*4882a593Smuzhiyun static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun 	struct topa_page *cur_tp;
1149*4882a593Smuzhiyun 	struct topa_entry *te;
1150*4882a593Smuzhiyun 	int pg;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	if (buf->snapshot)
1153*4882a593Smuzhiyun 		head &= (buf->nr_pages << PAGE_SHIFT) - 1;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	if (!buf->single) {
1156*4882a593Smuzhiyun 		pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1);
1157*4882a593Smuzhiyun 		te = pt_topa_entry_for_page(buf, pg);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		cur_tp = topa_entry_to_page(te);
1160*4882a593Smuzhiyun 		buf->cur = &cur_tp->topa;
1161*4882a593Smuzhiyun 		buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0);
1162*4882a593Smuzhiyun 		buf->output_off = head & (pt_buffer_region_size(buf) - 1);
1163*4882a593Smuzhiyun 	} else {
1164*4882a593Smuzhiyun 		buf->output_off = head;
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	local64_set(&buf->head, head);
1168*4882a593Smuzhiyun 	local_set(&buf->data_size, 0);
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun /**
1172*4882a593Smuzhiyun  * pt_buffer_fini_topa() - deallocate ToPA structure of a buffer
1173*4882a593Smuzhiyun  * @buf:	PT buffer.
1174*4882a593Smuzhiyun  */
pt_buffer_fini_topa(struct pt_buffer * buf)1175*4882a593Smuzhiyun static void pt_buffer_fini_topa(struct pt_buffer *buf)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun 	struct topa *topa, *iter;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (buf->single)
1180*4882a593Smuzhiyun 		return;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	list_for_each_entry_safe(topa, iter, &buf->tables, list) {
1183*4882a593Smuzhiyun 		/*
1184*4882a593Smuzhiyun 		 * right now, this is in free_aux() path only, so
1185*4882a593Smuzhiyun 		 * no need to unlink this table from the list
1186*4882a593Smuzhiyun 		 */
1187*4882a593Smuzhiyun 		topa_free(topa);
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun /**
1192*4882a593Smuzhiyun  * pt_buffer_init_topa() - initialize ToPA table for pt buffer
1193*4882a593Smuzhiyun  * @buf:	PT buffer.
1194*4882a593Smuzhiyun  * @size:	Total size of all regions within this ToPA.
1195*4882a593Smuzhiyun  * @gfp:	Allocation flags.
1196*4882a593Smuzhiyun  */
pt_buffer_init_topa(struct pt_buffer * buf,int cpu,unsigned long nr_pages,gfp_t gfp)1197*4882a593Smuzhiyun static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu,
1198*4882a593Smuzhiyun 			       unsigned long nr_pages, gfp_t gfp)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	struct topa *topa;
1201*4882a593Smuzhiyun 	int err;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	topa = topa_alloc(cpu, gfp);
1204*4882a593Smuzhiyun 	if (!topa)
1205*4882a593Smuzhiyun 		return -ENOMEM;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	topa_insert_table(buf, topa);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	while (buf->nr_pages < nr_pages) {
1210*4882a593Smuzhiyun 		err = topa_insert_pages(buf, cpu, gfp);
1211*4882a593Smuzhiyun 		if (err) {
1212*4882a593Smuzhiyun 			pt_buffer_fini_topa(buf);
1213*4882a593Smuzhiyun 			return -ENOMEM;
1214*4882a593Smuzhiyun 		}
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* link last table to the first one, unless we're double buffering */
1218*4882a593Smuzhiyun 	if (intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
1219*4882a593Smuzhiyun 		TOPA_ENTRY(buf->last, -1)->base = topa_pfn(buf->first);
1220*4882a593Smuzhiyun 		TOPA_ENTRY(buf->last, -1)->end = 1;
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	pt_topa_dump(buf);
1224*4882a593Smuzhiyun 	return 0;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun 
pt_buffer_try_single(struct pt_buffer * buf,int nr_pages)1227*4882a593Smuzhiyun static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun 	struct page *p = virt_to_page(buf->data_pages[0]);
1230*4882a593Smuzhiyun 	int ret = -ENOTSUPP, order = 0;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	/*
1233*4882a593Smuzhiyun 	 * We can use single range output mode
1234*4882a593Smuzhiyun 	 * + in snapshot mode, where we don't need interrupts;
1235*4882a593Smuzhiyun 	 * + if the hardware supports it;
1236*4882a593Smuzhiyun 	 * + if the entire buffer is one contiguous allocation.
1237*4882a593Smuzhiyun 	 */
1238*4882a593Smuzhiyun 	if (!buf->snapshot)
1239*4882a593Smuzhiyun 		goto out;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output))
1242*4882a593Smuzhiyun 		goto out;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	if (PagePrivate(p))
1245*4882a593Smuzhiyun 		order = page_private(p);
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	if (1 << order != nr_pages)
1248*4882a593Smuzhiyun 		goto out;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	/*
1251*4882a593Smuzhiyun 	 * Some processors cannot always support single range for more than
1252*4882a593Smuzhiyun 	 * 4KB - refer errata TGL052, ADL037 and RPL017. Future processors might
1253*4882a593Smuzhiyun 	 * also be affected, so for now rather than trying to keep track of
1254*4882a593Smuzhiyun 	 * which ones, just disable it for all.
1255*4882a593Smuzhiyun 	 */
1256*4882a593Smuzhiyun 	if (nr_pages > 1)
1257*4882a593Smuzhiyun 		goto out;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	buf->single = true;
1260*4882a593Smuzhiyun 	buf->nr_pages = nr_pages;
1261*4882a593Smuzhiyun 	ret = 0;
1262*4882a593Smuzhiyun out:
1263*4882a593Smuzhiyun 	return ret;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun /**
1267*4882a593Smuzhiyun  * pt_buffer_setup_aux() - set up topa tables for a PT buffer
1268*4882a593Smuzhiyun  * @cpu:	Cpu on which to allocate, -1 means current.
1269*4882a593Smuzhiyun  * @pages:	Array of pointers to buffer pages passed from perf core.
1270*4882a593Smuzhiyun  * @nr_pages:	Number of pages in the buffer.
1271*4882a593Smuzhiyun  * @snapshot:	If this is a snapshot/overwrite counter.
1272*4882a593Smuzhiyun  *
1273*4882a593Smuzhiyun  * This is a pmu::setup_aux callback that sets up ToPA tables and all the
1274*4882a593Smuzhiyun  * bookkeeping for an AUX buffer.
1275*4882a593Smuzhiyun  *
1276*4882a593Smuzhiyun  * Return:	Our private PT buffer structure.
1277*4882a593Smuzhiyun  */
1278*4882a593Smuzhiyun static void *
pt_buffer_setup_aux(struct perf_event * event,void ** pages,int nr_pages,bool snapshot)1279*4882a593Smuzhiyun pt_buffer_setup_aux(struct perf_event *event, void **pages,
1280*4882a593Smuzhiyun 		    int nr_pages, bool snapshot)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	struct pt_buffer *buf;
1283*4882a593Smuzhiyun 	int node, ret, cpu = event->cpu;
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (!nr_pages)
1286*4882a593Smuzhiyun 		return NULL;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	/*
1289*4882a593Smuzhiyun 	 * Only support AUX sampling in snapshot mode, where we don't
1290*4882a593Smuzhiyun 	 * generate NMIs.
1291*4882a593Smuzhiyun 	 */
1292*4882a593Smuzhiyun 	if (event->attr.aux_sample_size && !snapshot)
1293*4882a593Smuzhiyun 		return NULL;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	if (cpu == -1)
1296*4882a593Smuzhiyun 		cpu = raw_smp_processor_id();
1297*4882a593Smuzhiyun 	node = cpu_to_node(cpu);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	buf = kzalloc_node(sizeof(struct pt_buffer), GFP_KERNEL, node);
1300*4882a593Smuzhiyun 	if (!buf)
1301*4882a593Smuzhiyun 		return NULL;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	buf->snapshot = snapshot;
1304*4882a593Smuzhiyun 	buf->data_pages = pages;
1305*4882a593Smuzhiyun 	buf->stop_pos = -1;
1306*4882a593Smuzhiyun 	buf->intr_pos = -1;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	INIT_LIST_HEAD(&buf->tables);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	ret = pt_buffer_try_single(buf, nr_pages);
1311*4882a593Smuzhiyun 	if (!ret)
1312*4882a593Smuzhiyun 		return buf;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL);
1315*4882a593Smuzhiyun 	if (ret) {
1316*4882a593Smuzhiyun 		kfree(buf);
1317*4882a593Smuzhiyun 		return NULL;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	return buf;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun /**
1324*4882a593Smuzhiyun  * pt_buffer_free_aux() - perf AUX deallocation path callback
1325*4882a593Smuzhiyun  * @data:	PT buffer.
1326*4882a593Smuzhiyun  */
pt_buffer_free_aux(void * data)1327*4882a593Smuzhiyun static void pt_buffer_free_aux(void *data)
1328*4882a593Smuzhiyun {
1329*4882a593Smuzhiyun 	struct pt_buffer *buf = data;
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 	pt_buffer_fini_topa(buf);
1332*4882a593Smuzhiyun 	kfree(buf);
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun 
pt_addr_filters_init(struct perf_event * event)1335*4882a593Smuzhiyun static int pt_addr_filters_init(struct perf_event *event)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun 	struct pt_filters *filters;
1338*4882a593Smuzhiyun 	int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1341*4882a593Smuzhiyun 		return 0;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	filters = kzalloc_node(sizeof(struct pt_filters), GFP_KERNEL, node);
1344*4882a593Smuzhiyun 	if (!filters)
1345*4882a593Smuzhiyun 		return -ENOMEM;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	if (event->parent)
1348*4882a593Smuzhiyun 		memcpy(filters, event->parent->hw.addr_filters,
1349*4882a593Smuzhiyun 		       sizeof(*filters));
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	event->hw.addr_filters = filters;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	return 0;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun 
pt_addr_filters_fini(struct perf_event * event)1356*4882a593Smuzhiyun static void pt_addr_filters_fini(struct perf_event *event)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun 	kfree(event->hw.addr_filters);
1359*4882a593Smuzhiyun 	event->hw.addr_filters = NULL;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun #ifdef CONFIG_X86_64
canonical_address(u64 vaddr,u8 vaddr_bits)1363*4882a593Smuzhiyun static u64 canonical_address(u64 vaddr, u8 vaddr_bits)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
is_canonical_address(u64 vaddr,u8 vaddr_bits)1368*4882a593Smuzhiyun static u64 is_canonical_address(u64 vaddr, u8 vaddr_bits)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	return canonical_address(vaddr, vaddr_bits) == vaddr;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun /* Clamp to a canonical address greater-than-or-equal-to the address given */
clamp_to_ge_canonical_addr(u64 vaddr,u8 vaddr_bits)1374*4882a593Smuzhiyun static u64 clamp_to_ge_canonical_addr(u64 vaddr, u8 vaddr_bits)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun 	return is_canonical_address(vaddr, vaddr_bits) ?
1377*4882a593Smuzhiyun 	       vaddr :
1378*4882a593Smuzhiyun 	       -BIT_ULL(vaddr_bits - 1);
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun /* Clamp to a canonical address less-than-or-equal-to the address given */
clamp_to_le_canonical_addr(u64 vaddr,u8 vaddr_bits)1382*4882a593Smuzhiyun static u64 clamp_to_le_canonical_addr(u64 vaddr, u8 vaddr_bits)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	return is_canonical_address(vaddr, vaddr_bits) ?
1385*4882a593Smuzhiyun 	       vaddr :
1386*4882a593Smuzhiyun 	       BIT_ULL(vaddr_bits - 1) - 1;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun #else
1389*4882a593Smuzhiyun #define clamp_to_ge_canonical_addr(x, y) (x)
1390*4882a593Smuzhiyun #define clamp_to_le_canonical_addr(x, y) (x)
1391*4882a593Smuzhiyun #endif
1392*4882a593Smuzhiyun 
pt_event_addr_filters_validate(struct list_head * filters)1393*4882a593Smuzhiyun static int pt_event_addr_filters_validate(struct list_head *filters)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun 	struct perf_addr_filter *filter;
1396*4882a593Smuzhiyun 	int range = 0;
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	list_for_each_entry(filter, filters, entry) {
1399*4882a593Smuzhiyun 		/*
1400*4882a593Smuzhiyun 		 * PT doesn't support single address triggers and
1401*4882a593Smuzhiyun 		 * 'start' filters.
1402*4882a593Smuzhiyun 		 */
1403*4882a593Smuzhiyun 		if (!filter->size ||
1404*4882a593Smuzhiyun 		    filter->action == PERF_ADDR_FILTER_ACTION_START)
1405*4882a593Smuzhiyun 			return -EOPNOTSUPP;
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 		if (++range > intel_pt_validate_hw_cap(PT_CAP_num_address_ranges))
1408*4882a593Smuzhiyun 			return -EOPNOTSUPP;
1409*4882a593Smuzhiyun 	}
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	return 0;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun 
pt_event_addr_filters_sync(struct perf_event * event)1414*4882a593Smuzhiyun static void pt_event_addr_filters_sync(struct perf_event *event)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun 	struct perf_addr_filters_head *head = perf_event_addr_filters(event);
1417*4882a593Smuzhiyun 	unsigned long msr_a, msr_b;
1418*4882a593Smuzhiyun 	struct perf_addr_filter_range *fr = event->addr_filter_ranges;
1419*4882a593Smuzhiyun 	struct pt_filters *filters = event->hw.addr_filters;
1420*4882a593Smuzhiyun 	struct perf_addr_filter *filter;
1421*4882a593Smuzhiyun 	int range = 0;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	if (!filters)
1424*4882a593Smuzhiyun 		return;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	list_for_each_entry(filter, &head->list, entry) {
1427*4882a593Smuzhiyun 		if (filter->path.dentry && !fr[range].start) {
1428*4882a593Smuzhiyun 			msr_a = msr_b = 0;
1429*4882a593Smuzhiyun 		} else {
1430*4882a593Smuzhiyun 			unsigned long n = fr[range].size - 1;
1431*4882a593Smuzhiyun 			unsigned long a = fr[range].start;
1432*4882a593Smuzhiyun 			unsigned long b;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 			if (a > ULONG_MAX - n)
1435*4882a593Smuzhiyun 				b = ULONG_MAX;
1436*4882a593Smuzhiyun 			else
1437*4882a593Smuzhiyun 				b = a + n;
1438*4882a593Smuzhiyun 			/*
1439*4882a593Smuzhiyun 			 * Apply the offset. 64-bit addresses written to the
1440*4882a593Smuzhiyun 			 * MSRs must be canonical, but the range can encompass
1441*4882a593Smuzhiyun 			 * non-canonical addresses. Since software cannot
1442*4882a593Smuzhiyun 			 * execute at non-canonical addresses, adjusting to
1443*4882a593Smuzhiyun 			 * canonical addresses does not affect the result of the
1444*4882a593Smuzhiyun 			 * address filter.
1445*4882a593Smuzhiyun 			 */
1446*4882a593Smuzhiyun 			msr_a = clamp_to_ge_canonical_addr(a, boot_cpu_data.x86_virt_bits);
1447*4882a593Smuzhiyun 			msr_b = clamp_to_le_canonical_addr(b, boot_cpu_data.x86_virt_bits);
1448*4882a593Smuzhiyun 			if (msr_b < msr_a)
1449*4882a593Smuzhiyun 				msr_a = msr_b = 0;
1450*4882a593Smuzhiyun 		}
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		filters->filter[range].msr_a  = msr_a;
1453*4882a593Smuzhiyun 		filters->filter[range].msr_b  = msr_b;
1454*4882a593Smuzhiyun 		if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER)
1455*4882a593Smuzhiyun 			filters->filter[range].config = 1;
1456*4882a593Smuzhiyun 		else
1457*4882a593Smuzhiyun 			filters->filter[range].config = 2;
1458*4882a593Smuzhiyun 		range++;
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	filters->nr_filters = range;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun /**
1465*4882a593Smuzhiyun  * intel_pt_interrupt() - PT PMI handler
1466*4882a593Smuzhiyun  */
intel_pt_interrupt(void)1467*4882a593Smuzhiyun void intel_pt_interrupt(void)
1468*4882a593Smuzhiyun {
1469*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1470*4882a593Smuzhiyun 	struct pt_buffer *buf;
1471*4882a593Smuzhiyun 	struct perf_event *event = pt->handle.event;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	/*
1474*4882a593Smuzhiyun 	 * There may be a dangling PT bit in the interrupt status register
1475*4882a593Smuzhiyun 	 * after PT has been disabled by pt_event_stop(). Make sure we don't
1476*4882a593Smuzhiyun 	 * do anything (particularly, re-enable) for this event here.
1477*4882a593Smuzhiyun 	 */
1478*4882a593Smuzhiyun 	if (!READ_ONCE(pt->handle_nmi))
1479*4882a593Smuzhiyun 		return;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	if (!event)
1482*4882a593Smuzhiyun 		return;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	pt_config_stop(event);
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	buf = perf_get_aux(&pt->handle);
1487*4882a593Smuzhiyun 	if (!buf)
1488*4882a593Smuzhiyun 		return;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	pt_read_offset(buf);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	pt_handle_status(pt);
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	pt_update_head(pt);
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	if (!event->hw.state) {
1499*4882a593Smuzhiyun 		int ret;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 		buf = perf_aux_output_begin(&pt->handle, event);
1502*4882a593Smuzhiyun 		if (!buf) {
1503*4882a593Smuzhiyun 			event->hw.state = PERF_HES_STOPPED;
1504*4882a593Smuzhiyun 			return;
1505*4882a593Smuzhiyun 		}
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 		pt_buffer_reset_offsets(buf, pt->handle.head);
1508*4882a593Smuzhiyun 		/* snapshot counters don't use PMI, so it's safe */
1509*4882a593Smuzhiyun 		ret = pt_buffer_reset_markers(buf, &pt->handle);
1510*4882a593Smuzhiyun 		if (ret) {
1511*4882a593Smuzhiyun 			perf_aux_output_end(&pt->handle, 0);
1512*4882a593Smuzhiyun 			return;
1513*4882a593Smuzhiyun 		}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		pt_config_buffer(buf);
1516*4882a593Smuzhiyun 		pt_config_start(event);
1517*4882a593Smuzhiyun 	}
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun 
intel_pt_handle_vmx(int on)1520*4882a593Smuzhiyun void intel_pt_handle_vmx(int on)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1523*4882a593Smuzhiyun 	struct perf_event *event;
1524*4882a593Smuzhiyun 	unsigned long flags;
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	/* PT plays nice with VMX, do nothing */
1527*4882a593Smuzhiyun 	if (pt_pmu.vmx)
1528*4882a593Smuzhiyun 		return;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	/*
1531*4882a593Smuzhiyun 	 * VMXON will clear RTIT_CTL.TraceEn; we need to make
1532*4882a593Smuzhiyun 	 * sure to not try to set it while VMX is on. Disable
1533*4882a593Smuzhiyun 	 * interrupts to avoid racing with pmu callbacks;
1534*4882a593Smuzhiyun 	 * concurrent PMI should be handled fine.
1535*4882a593Smuzhiyun 	 */
1536*4882a593Smuzhiyun 	local_irq_save(flags);
1537*4882a593Smuzhiyun 	WRITE_ONCE(pt->vmx_on, on);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	/*
1540*4882a593Smuzhiyun 	 * If an AUX transaction is in progress, it will contain
1541*4882a593Smuzhiyun 	 * gap(s), so flag it PARTIAL to inform the user.
1542*4882a593Smuzhiyun 	 */
1543*4882a593Smuzhiyun 	event = pt->handle.event;
1544*4882a593Smuzhiyun 	if (event)
1545*4882a593Smuzhiyun 		perf_aux_output_flag(&pt->handle,
1546*4882a593Smuzhiyun 		                     PERF_AUX_FLAG_PARTIAL);
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	/* Turn PTs back on */
1549*4882a593Smuzhiyun 	if (!on && event)
1550*4882a593Smuzhiyun 		wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config);
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	local_irq_restore(flags);
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun /*
1557*4882a593Smuzhiyun  * PMU callbacks
1558*4882a593Smuzhiyun  */
1559*4882a593Smuzhiyun 
pt_event_start(struct perf_event * event,int mode)1560*4882a593Smuzhiyun static void pt_event_start(struct perf_event *event, int mode)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun 	struct hw_perf_event *hwc = &event->hw;
1563*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1564*4882a593Smuzhiyun 	struct pt_buffer *buf;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	buf = perf_aux_output_begin(&pt->handle, event);
1567*4882a593Smuzhiyun 	if (!buf)
1568*4882a593Smuzhiyun 		goto fail_stop;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	pt_buffer_reset_offsets(buf, pt->handle.head);
1571*4882a593Smuzhiyun 	if (!buf->snapshot) {
1572*4882a593Smuzhiyun 		if (pt_buffer_reset_markers(buf, &pt->handle))
1573*4882a593Smuzhiyun 			goto fail_end_stop;
1574*4882a593Smuzhiyun 	}
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	WRITE_ONCE(pt->handle_nmi, 1);
1577*4882a593Smuzhiyun 	hwc->state = 0;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	pt_config_buffer(buf);
1580*4882a593Smuzhiyun 	pt_config(event);
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	return;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun fail_end_stop:
1585*4882a593Smuzhiyun 	perf_aux_output_end(&pt->handle, 0);
1586*4882a593Smuzhiyun fail_stop:
1587*4882a593Smuzhiyun 	hwc->state = PERF_HES_STOPPED;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun 
pt_event_stop(struct perf_event * event,int mode)1590*4882a593Smuzhiyun static void pt_event_stop(struct perf_event *event, int mode)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	/*
1595*4882a593Smuzhiyun 	 * Protect against the PMI racing with disabling wrmsr,
1596*4882a593Smuzhiyun 	 * see comment in intel_pt_interrupt().
1597*4882a593Smuzhiyun 	 */
1598*4882a593Smuzhiyun 	WRITE_ONCE(pt->handle_nmi, 0);
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	pt_config_stop(event);
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	if (event->hw.state == PERF_HES_STOPPED)
1603*4882a593Smuzhiyun 		return;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	event->hw.state = PERF_HES_STOPPED;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (mode & PERF_EF_UPDATE) {
1608*4882a593Smuzhiyun 		struct pt_buffer *buf = perf_get_aux(&pt->handle);
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		if (!buf)
1611*4882a593Smuzhiyun 			return;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		if (WARN_ON_ONCE(pt->handle.event != event))
1614*4882a593Smuzhiyun 			return;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		pt_read_offset(buf);
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 		pt_handle_status(pt);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 		pt_update_head(pt);
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 		if (buf->snapshot)
1623*4882a593Smuzhiyun 			pt->handle.head =
1624*4882a593Smuzhiyun 				local_xchg(&buf->data_size,
1625*4882a593Smuzhiyun 					   buf->nr_pages << PAGE_SHIFT);
1626*4882a593Smuzhiyun 		perf_aux_output_end(&pt->handle, local_xchg(&buf->data_size, 0));
1627*4882a593Smuzhiyun 	}
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun 
pt_event_snapshot_aux(struct perf_event * event,struct perf_output_handle * handle,unsigned long size)1630*4882a593Smuzhiyun static long pt_event_snapshot_aux(struct perf_event *event,
1631*4882a593Smuzhiyun 				  struct perf_output_handle *handle,
1632*4882a593Smuzhiyun 				  unsigned long size)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1635*4882a593Smuzhiyun 	struct pt_buffer *buf = perf_get_aux(&pt->handle);
1636*4882a593Smuzhiyun 	unsigned long from = 0, to;
1637*4882a593Smuzhiyun 	long ret;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!buf))
1640*4882a593Smuzhiyun 		return 0;
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	/*
1643*4882a593Smuzhiyun 	 * Sampling is only allowed on snapshot events;
1644*4882a593Smuzhiyun 	 * see pt_buffer_setup_aux().
1645*4882a593Smuzhiyun 	 */
1646*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!buf->snapshot))
1647*4882a593Smuzhiyun 		return 0;
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	/*
1650*4882a593Smuzhiyun 	 * Here, handle_nmi tells us if the tracing is on
1651*4882a593Smuzhiyun 	 */
1652*4882a593Smuzhiyun 	if (READ_ONCE(pt->handle_nmi))
1653*4882a593Smuzhiyun 		pt_config_stop(event);
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	pt_read_offset(buf);
1656*4882a593Smuzhiyun 	pt_update_head(pt);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	to = local_read(&buf->data_size);
1659*4882a593Smuzhiyun 	if (to < size)
1660*4882a593Smuzhiyun 		from = buf->nr_pages << PAGE_SHIFT;
1661*4882a593Smuzhiyun 	from += to - size;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	ret = perf_output_copy_aux(&pt->handle, handle, from, to);
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	/*
1666*4882a593Smuzhiyun 	 * If the tracing was on when we turned up, restart it.
1667*4882a593Smuzhiyun 	 * Compiler barrier not needed as we couldn't have been
1668*4882a593Smuzhiyun 	 * preempted by anything that touches pt->handle_nmi.
1669*4882a593Smuzhiyun 	 */
1670*4882a593Smuzhiyun 	if (pt->handle_nmi)
1671*4882a593Smuzhiyun 		pt_config_start(event);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	return ret;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun 
pt_event_del(struct perf_event * event,int mode)1676*4882a593Smuzhiyun static void pt_event_del(struct perf_event *event, int mode)
1677*4882a593Smuzhiyun {
1678*4882a593Smuzhiyun 	pt_event_stop(event, PERF_EF_UPDATE);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun 
pt_event_add(struct perf_event * event,int mode)1681*4882a593Smuzhiyun static int pt_event_add(struct perf_event *event, int mode)
1682*4882a593Smuzhiyun {
1683*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1684*4882a593Smuzhiyun 	struct hw_perf_event *hwc = &event->hw;
1685*4882a593Smuzhiyun 	int ret = -EBUSY;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	if (pt->handle.event)
1688*4882a593Smuzhiyun 		goto fail;
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	if (mode & PERF_EF_START) {
1691*4882a593Smuzhiyun 		pt_event_start(event, 0);
1692*4882a593Smuzhiyun 		ret = -EINVAL;
1693*4882a593Smuzhiyun 		if (hwc->state == PERF_HES_STOPPED)
1694*4882a593Smuzhiyun 			goto fail;
1695*4882a593Smuzhiyun 	} else {
1696*4882a593Smuzhiyun 		hwc->state = PERF_HES_STOPPED;
1697*4882a593Smuzhiyun 	}
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	ret = 0;
1700*4882a593Smuzhiyun fail:
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	return ret;
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun 
pt_event_read(struct perf_event * event)1705*4882a593Smuzhiyun static void pt_event_read(struct perf_event *event)
1706*4882a593Smuzhiyun {
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun 
pt_event_destroy(struct perf_event * event)1709*4882a593Smuzhiyun static void pt_event_destroy(struct perf_event *event)
1710*4882a593Smuzhiyun {
1711*4882a593Smuzhiyun 	pt_addr_filters_fini(event);
1712*4882a593Smuzhiyun 	x86_del_exclusive(x86_lbr_exclusive_pt);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun 
pt_event_init(struct perf_event * event)1715*4882a593Smuzhiyun static int pt_event_init(struct perf_event *event)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	if (event->attr.type != pt_pmu.pmu.type)
1718*4882a593Smuzhiyun 		return -ENOENT;
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 	if (!pt_event_valid(event))
1721*4882a593Smuzhiyun 		return -EINVAL;
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun 	if (x86_add_exclusive(x86_lbr_exclusive_pt))
1724*4882a593Smuzhiyun 		return -EBUSY;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	if (pt_addr_filters_init(event)) {
1727*4882a593Smuzhiyun 		x86_del_exclusive(x86_lbr_exclusive_pt);
1728*4882a593Smuzhiyun 		return -ENOMEM;
1729*4882a593Smuzhiyun 	}
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	event->destroy = pt_event_destroy;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	return 0;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun 
cpu_emergency_stop_pt(void)1736*4882a593Smuzhiyun void cpu_emergency_stop_pt(void)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun 	struct pt *pt = this_cpu_ptr(&pt_ctx);
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	if (pt->handle.event)
1741*4882a593Smuzhiyun 		pt_event_stop(pt->handle.event, PERF_EF_UPDATE);
1742*4882a593Smuzhiyun }
1743*4882a593Smuzhiyun 
is_intel_pt_event(struct perf_event * event)1744*4882a593Smuzhiyun int is_intel_pt_event(struct perf_event *event)
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun 	return event->pmu == &pt_pmu.pmu;
1747*4882a593Smuzhiyun }
1748*4882a593Smuzhiyun 
pt_init(void)1749*4882a593Smuzhiyun static __init int pt_init(void)
1750*4882a593Smuzhiyun {
1751*4882a593Smuzhiyun 	int ret, cpu, prior_warn = 0;
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct topa) > PAGE_SIZE);
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	if (!boot_cpu_has(X86_FEATURE_INTEL_PT))
1756*4882a593Smuzhiyun 		return -ENODEV;
1757*4882a593Smuzhiyun 
1758*4882a593Smuzhiyun 	get_online_cpus();
1759*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
1760*4882a593Smuzhiyun 		u64 ctl;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 		ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl);
1763*4882a593Smuzhiyun 		if (!ret && (ctl & RTIT_CTL_TRACEEN))
1764*4882a593Smuzhiyun 			prior_warn++;
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 	put_online_cpus();
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	if (prior_warn) {
1769*4882a593Smuzhiyun 		x86_add_exclusive(x86_lbr_exclusive_pt);
1770*4882a593Smuzhiyun 		pr_warn("PT is enabled at boot time, doing nothing\n");
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 		return -EBUSY;
1773*4882a593Smuzhiyun 	}
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun 	ret = pt_pmu_hw_init();
1776*4882a593Smuzhiyun 	if (ret)
1777*4882a593Smuzhiyun 		return ret;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_output)) {
1780*4882a593Smuzhiyun 		pr_warn("ToPA output is not supported on this CPU\n");
1781*4882a593Smuzhiyun 		return -ENODEV;
1782*4882a593Smuzhiyun 	}
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
1785*4882a593Smuzhiyun 		pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun 	pt_pmu.pmu.capabilities	|= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
1788*4882a593Smuzhiyun 	pt_pmu.pmu.attr_groups		 = pt_attr_groups;
1789*4882a593Smuzhiyun 	pt_pmu.pmu.task_ctx_nr		 = perf_sw_context;
1790*4882a593Smuzhiyun 	pt_pmu.pmu.event_init		 = pt_event_init;
1791*4882a593Smuzhiyun 	pt_pmu.pmu.add			 = pt_event_add;
1792*4882a593Smuzhiyun 	pt_pmu.pmu.del			 = pt_event_del;
1793*4882a593Smuzhiyun 	pt_pmu.pmu.start		 = pt_event_start;
1794*4882a593Smuzhiyun 	pt_pmu.pmu.stop			 = pt_event_stop;
1795*4882a593Smuzhiyun 	pt_pmu.pmu.snapshot_aux		 = pt_event_snapshot_aux;
1796*4882a593Smuzhiyun 	pt_pmu.pmu.read			 = pt_event_read;
1797*4882a593Smuzhiyun 	pt_pmu.pmu.setup_aux		 = pt_buffer_setup_aux;
1798*4882a593Smuzhiyun 	pt_pmu.pmu.free_aux		 = pt_buffer_free_aux;
1799*4882a593Smuzhiyun 	pt_pmu.pmu.addr_filters_sync     = pt_event_addr_filters_sync;
1800*4882a593Smuzhiyun 	pt_pmu.pmu.addr_filters_validate = pt_event_addr_filters_validate;
1801*4882a593Smuzhiyun 	pt_pmu.pmu.nr_addr_filters       =
1802*4882a593Smuzhiyun 		intel_pt_validate_hw_cap(PT_CAP_num_address_ranges);
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	ret = perf_pmu_register(&pt_pmu.pmu, "intel_pt", -1);
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	return ret;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun arch_initcall(pt_init);
1809