xref: /OK3568_Linux_fs/kernel/tools/perf/arch/arm/util/cs-etm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright(C) 2015 Linaro Limited. All rights reserved.
4*4882a593Smuzhiyun  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <api/fs/fs.h>
8*4882a593Smuzhiyun #include <linux/bits.h>
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/compiler.h>
11*4882a593Smuzhiyun #include <linux/coresight-pmu.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/log2.h>
14*4882a593Smuzhiyun #include <linux/string.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/zalloc.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "cs-etm.h"
19*4882a593Smuzhiyun #include "../../util/debug.h"
20*4882a593Smuzhiyun #include "../../util/record.h"
21*4882a593Smuzhiyun #include "../../util/auxtrace.h"
22*4882a593Smuzhiyun #include "../../util/cpumap.h"
23*4882a593Smuzhiyun #include "../../util/event.h"
24*4882a593Smuzhiyun #include "../../util/evlist.h"
25*4882a593Smuzhiyun #include "../../util/evsel.h"
26*4882a593Smuzhiyun #include "../../util/perf_api_probe.h"
27*4882a593Smuzhiyun #include "../../util/evsel_config.h"
28*4882a593Smuzhiyun #include "../../util/pmu.h"
29*4882a593Smuzhiyun #include "../../util/cs-etm.h"
30*4882a593Smuzhiyun #include <internal/lib.h> // page_size
31*4882a593Smuzhiyun #include "../../util/session.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <errno.h>
34*4882a593Smuzhiyun #include <stdlib.h>
35*4882a593Smuzhiyun #include <sys/stat.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct cs_etm_recording {
38*4882a593Smuzhiyun 	struct auxtrace_record	itr;
39*4882a593Smuzhiyun 	struct perf_pmu		*cs_etm_pmu;
40*4882a593Smuzhiyun 	struct evlist		*evlist;
41*4882a593Smuzhiyun 	int			wrapped_cnt;
42*4882a593Smuzhiyun 	bool			*wrapped;
43*4882a593Smuzhiyun 	bool			snapshot_mode;
44*4882a593Smuzhiyun 	size_t			snapshot_size;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
48*4882a593Smuzhiyun 	[CS_ETM_ETMCCER]	= "mgmt/etmccer",
49*4882a593Smuzhiyun 	[CS_ETM_ETMIDR]		= "mgmt/etmidr",
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
53*4882a593Smuzhiyun 	[CS_ETMV4_TRCIDR0]		= "trcidr/trcidr0",
54*4882a593Smuzhiyun 	[CS_ETMV4_TRCIDR1]		= "trcidr/trcidr1",
55*4882a593Smuzhiyun 	[CS_ETMV4_TRCIDR2]		= "trcidr/trcidr2",
56*4882a593Smuzhiyun 	[CS_ETMV4_TRCIDR8]		= "trcidr/trcidr8",
57*4882a593Smuzhiyun 	[CS_ETMV4_TRCAUTHSTATUS]	= "mgmt/trcauthstatus",
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
61*4882a593Smuzhiyun 
cs_etm_set_context_id(struct auxtrace_record * itr,struct evsel * evsel,int cpu)62*4882a593Smuzhiyun static int cs_etm_set_context_id(struct auxtrace_record *itr,
63*4882a593Smuzhiyun 				 struct evsel *evsel, int cpu)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct cs_etm_recording *ptr;
66*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu;
67*4882a593Smuzhiyun 	char path[PATH_MAX];
68*4882a593Smuzhiyun 	int err = -EINVAL;
69*4882a593Smuzhiyun 	u32 val;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	ptr = container_of(itr, struct cs_etm_recording, itr);
72*4882a593Smuzhiyun 	cs_etm_pmu = ptr->cs_etm_pmu;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (!cs_etm_is_etmv4(itr, cpu))
75*4882a593Smuzhiyun 		goto out;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* Get a handle on TRCIRD2 */
78*4882a593Smuzhiyun 	snprintf(path, PATH_MAX, "cpu%d/%s",
79*4882a593Smuzhiyun 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
80*4882a593Smuzhiyun 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* There was a problem reading the file, bailing out */
83*4882a593Smuzhiyun 	if (err != 1) {
84*4882a593Smuzhiyun 		pr_err("%s: can't read file %s\n",
85*4882a593Smuzhiyun 		       CORESIGHT_ETM_PMU_NAME, path);
86*4882a593Smuzhiyun 		goto out;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/*
90*4882a593Smuzhiyun 	 * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
91*4882a593Smuzhiyun 	 * is supported:
92*4882a593Smuzhiyun 	 *  0b00000 Context ID tracing is not supported.
93*4882a593Smuzhiyun 	 *  0b00100 Maximum of 32-bit Context ID size.
94*4882a593Smuzhiyun 	 *  All other values are reserved.
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	val = BMVAL(val, 5, 9);
97*4882a593Smuzhiyun 	if (!val || val != 0x4) {
98*4882a593Smuzhiyun 		err = -EINVAL;
99*4882a593Smuzhiyun 		goto out;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* All good, let the kernel know */
103*4882a593Smuzhiyun 	evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
104*4882a593Smuzhiyun 	err = 0;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun out:
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return err;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
cs_etm_set_timestamp(struct auxtrace_record * itr,struct evsel * evsel,int cpu)111*4882a593Smuzhiyun static int cs_etm_set_timestamp(struct auxtrace_record *itr,
112*4882a593Smuzhiyun 				struct evsel *evsel, int cpu)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct cs_etm_recording *ptr;
115*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu;
116*4882a593Smuzhiyun 	char path[PATH_MAX];
117*4882a593Smuzhiyun 	int err = -EINVAL;
118*4882a593Smuzhiyun 	u32 val;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	ptr = container_of(itr, struct cs_etm_recording, itr);
121*4882a593Smuzhiyun 	cs_etm_pmu = ptr->cs_etm_pmu;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (!cs_etm_is_etmv4(itr, cpu))
124*4882a593Smuzhiyun 		goto out;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Get a handle on TRCIRD0 */
127*4882a593Smuzhiyun 	snprintf(path, PATH_MAX, "cpu%d/%s",
128*4882a593Smuzhiyun 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
129*4882a593Smuzhiyun 	err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* There was a problem reading the file, bailing out */
132*4882a593Smuzhiyun 	if (err != 1) {
133*4882a593Smuzhiyun 		pr_err("%s: can't read file %s\n",
134*4882a593Smuzhiyun 		       CORESIGHT_ETM_PMU_NAME, path);
135*4882a593Smuzhiyun 		goto out;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	/*
139*4882a593Smuzhiyun 	 * TRCIDR0.TSSIZE, bit [28-24], indicates whether global timestamping
140*4882a593Smuzhiyun 	 * is supported:
141*4882a593Smuzhiyun 	 *  0b00000 Global timestamping is not implemented
142*4882a593Smuzhiyun 	 *  0b00110 Implementation supports a maximum timestamp of 48bits.
143*4882a593Smuzhiyun 	 *  0b01000 Implementation supports a maximum timestamp of 64bits.
144*4882a593Smuzhiyun 	 */
145*4882a593Smuzhiyun 	val &= GENMASK(28, 24);
146*4882a593Smuzhiyun 	if (!val) {
147*4882a593Smuzhiyun 		err = -EINVAL;
148*4882a593Smuzhiyun 		goto out;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* All good, let the kernel know */
152*4882a593Smuzhiyun 	evsel->core.attr.config |= (1 << ETM_OPT_TS);
153*4882a593Smuzhiyun 	err = 0;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun out:
156*4882a593Smuzhiyun 	return err;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
cs_etm_set_option(struct auxtrace_record * itr,struct evsel * evsel,u32 option)159*4882a593Smuzhiyun static int cs_etm_set_option(struct auxtrace_record *itr,
160*4882a593Smuzhiyun 			     struct evsel *evsel, u32 option)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	int i, err = -EINVAL;
163*4882a593Smuzhiyun 	struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
164*4882a593Smuzhiyun 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Set option of each CPU we have */
167*4882a593Smuzhiyun 	for (i = 0; i < cpu__max_cpu(); i++) {
168*4882a593Smuzhiyun 		if (!cpu_map__has(event_cpus, i) ||
169*4882a593Smuzhiyun 		    !cpu_map__has(online_cpus, i))
170*4882a593Smuzhiyun 			continue;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		if (option & ETM_OPT_CTXTID) {
173*4882a593Smuzhiyun 			err = cs_etm_set_context_id(itr, evsel, i);
174*4882a593Smuzhiyun 			if (err)
175*4882a593Smuzhiyun 				goto out;
176*4882a593Smuzhiyun 		}
177*4882a593Smuzhiyun 		if (option & ETM_OPT_TS) {
178*4882a593Smuzhiyun 			err = cs_etm_set_timestamp(itr, evsel, i);
179*4882a593Smuzhiyun 			if (err)
180*4882a593Smuzhiyun 				goto out;
181*4882a593Smuzhiyun 		}
182*4882a593Smuzhiyun 		if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
183*4882a593Smuzhiyun 			/* Nothing else is currently supported */
184*4882a593Smuzhiyun 			goto out;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	err = 0;
188*4882a593Smuzhiyun out:
189*4882a593Smuzhiyun 	perf_cpu_map__put(online_cpus);
190*4882a593Smuzhiyun 	return err;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
cs_etm_parse_snapshot_options(struct auxtrace_record * itr,struct record_opts * opts,const char * str)193*4882a593Smuzhiyun static int cs_etm_parse_snapshot_options(struct auxtrace_record *itr,
194*4882a593Smuzhiyun 					 struct record_opts *opts,
195*4882a593Smuzhiyun 					 const char *str)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
198*4882a593Smuzhiyun 				container_of(itr, struct cs_etm_recording, itr);
199*4882a593Smuzhiyun 	unsigned long long snapshot_size = 0;
200*4882a593Smuzhiyun 	char *endptr;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (str) {
203*4882a593Smuzhiyun 		snapshot_size = strtoull(str, &endptr, 0);
204*4882a593Smuzhiyun 		if (*endptr || snapshot_size > SIZE_MAX)
205*4882a593Smuzhiyun 			return -1;
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	opts->auxtrace_snapshot_mode = true;
209*4882a593Smuzhiyun 	opts->auxtrace_snapshot_size = snapshot_size;
210*4882a593Smuzhiyun 	ptr->snapshot_size = snapshot_size;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
cs_etm_set_sink_attr(struct perf_pmu * pmu,struct evsel * evsel)215*4882a593Smuzhiyun static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
216*4882a593Smuzhiyun 				struct evsel *evsel)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	char msg[BUFSIZ], path[PATH_MAX], *sink;
219*4882a593Smuzhiyun 	struct evsel_config_term *term;
220*4882a593Smuzhiyun 	int ret = -EINVAL;
221*4882a593Smuzhiyun 	u32 hash;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (evsel->core.attr.config2 & GENMASK(31, 0))
224*4882a593Smuzhiyun 		return 0;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	list_for_each_entry(term, &evsel->config_terms, list) {
227*4882a593Smuzhiyun 		if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
228*4882a593Smuzhiyun 			continue;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		sink = term->val.str;
231*4882a593Smuzhiyun 		snprintf(path, PATH_MAX, "sinks/%s", sink);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 		ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
234*4882a593Smuzhiyun 		if (ret != 1) {
235*4882a593Smuzhiyun 			pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
236*4882a593Smuzhiyun 			       sink, evsel__name(evsel), errno,
237*4882a593Smuzhiyun 			       str_error_r(errno, msg, sizeof(msg)));
238*4882a593Smuzhiyun 			return ret;
239*4882a593Smuzhiyun 		}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		evsel->core.attr.config2 |= hash;
242*4882a593Smuzhiyun 		return 0;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/*
246*4882a593Smuzhiyun 	 * No sink was provided on the command line - allow the CoreSight
247*4882a593Smuzhiyun 	 * system to look for a default
248*4882a593Smuzhiyun 	 */
249*4882a593Smuzhiyun 	return 0;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
cs_etm_recording_options(struct auxtrace_record * itr,struct evlist * evlist,struct record_opts * opts)252*4882a593Smuzhiyun static int cs_etm_recording_options(struct auxtrace_record *itr,
253*4882a593Smuzhiyun 				    struct evlist *evlist,
254*4882a593Smuzhiyun 				    struct record_opts *opts)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	int ret;
257*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
258*4882a593Smuzhiyun 				container_of(itr, struct cs_etm_recording, itr);
259*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
260*4882a593Smuzhiyun 	struct evsel *evsel, *cs_etm_evsel = NULL;
261*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = evlist->core.cpus;
262*4882a593Smuzhiyun 	bool privileged = perf_event_paranoid_check(-1);
263*4882a593Smuzhiyun 	int err = 0;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	ptr->evlist = evlist;
266*4882a593Smuzhiyun 	ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (!record_opts__no_switch_events(opts) &&
269*4882a593Smuzhiyun 	    perf_can_record_switch_events())
270*4882a593Smuzhiyun 		opts->record_switch_events = true;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
273*4882a593Smuzhiyun 		if (evsel->core.attr.type == cs_etm_pmu->type) {
274*4882a593Smuzhiyun 			if (cs_etm_evsel) {
275*4882a593Smuzhiyun 				pr_err("There may be only one %s event\n",
276*4882a593Smuzhiyun 				       CORESIGHT_ETM_PMU_NAME);
277*4882a593Smuzhiyun 				return -EINVAL;
278*4882a593Smuzhiyun 			}
279*4882a593Smuzhiyun 			evsel->core.attr.freq = 0;
280*4882a593Smuzhiyun 			evsel->core.attr.sample_period = 1;
281*4882a593Smuzhiyun 			cs_etm_evsel = evsel;
282*4882a593Smuzhiyun 			opts->full_auxtrace = true;
283*4882a593Smuzhiyun 		}
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* no need to continue if at least one event of interest was found */
287*4882a593Smuzhiyun 	if (!cs_etm_evsel)
288*4882a593Smuzhiyun 		return 0;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
291*4882a593Smuzhiyun 	if (ret)
292*4882a593Smuzhiyun 		return ret;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (opts->use_clockid) {
295*4882a593Smuzhiyun 		pr_err("Cannot use clockid (-k option) with %s\n",
296*4882a593Smuzhiyun 		       CORESIGHT_ETM_PMU_NAME);
297*4882a593Smuzhiyun 		return -EINVAL;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* we are in snapshot mode */
301*4882a593Smuzhiyun 	if (opts->auxtrace_snapshot_mode) {
302*4882a593Smuzhiyun 		/*
303*4882a593Smuzhiyun 		 * No size were given to '-S' or '-m,', so go with
304*4882a593Smuzhiyun 		 * the default
305*4882a593Smuzhiyun 		 */
306*4882a593Smuzhiyun 		if (!opts->auxtrace_snapshot_size &&
307*4882a593Smuzhiyun 		    !opts->auxtrace_mmap_pages) {
308*4882a593Smuzhiyun 			if (privileged) {
309*4882a593Smuzhiyun 				opts->auxtrace_mmap_pages = MiB(4) / page_size;
310*4882a593Smuzhiyun 			} else {
311*4882a593Smuzhiyun 				opts->auxtrace_mmap_pages =
312*4882a593Smuzhiyun 							KiB(128) / page_size;
313*4882a593Smuzhiyun 				if (opts->mmap_pages == UINT_MAX)
314*4882a593Smuzhiyun 					opts->mmap_pages = KiB(256) / page_size;
315*4882a593Smuzhiyun 			}
316*4882a593Smuzhiyun 		} else if (!opts->auxtrace_mmap_pages && !privileged &&
317*4882a593Smuzhiyun 						opts->mmap_pages == UINT_MAX) {
318*4882a593Smuzhiyun 			opts->mmap_pages = KiB(256) / page_size;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		/*
322*4882a593Smuzhiyun 		 * '-m,xyz' was specified but no snapshot size, so make the
323*4882a593Smuzhiyun 		 * snapshot size as big as the auxtrace mmap area.
324*4882a593Smuzhiyun 		 */
325*4882a593Smuzhiyun 		if (!opts->auxtrace_snapshot_size) {
326*4882a593Smuzhiyun 			opts->auxtrace_snapshot_size =
327*4882a593Smuzhiyun 				opts->auxtrace_mmap_pages * (size_t)page_size;
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		/*
331*4882a593Smuzhiyun 		 * -Sxyz was specified but no auxtrace mmap area, so make the
332*4882a593Smuzhiyun 		 * auxtrace mmap area big enough to fit the requested snapshot
333*4882a593Smuzhiyun 		 * size.
334*4882a593Smuzhiyun 		 */
335*4882a593Smuzhiyun 		if (!opts->auxtrace_mmap_pages) {
336*4882a593Smuzhiyun 			size_t sz = opts->auxtrace_snapshot_size;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 			sz = round_up(sz, page_size) / page_size;
339*4882a593Smuzhiyun 			opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
340*4882a593Smuzhiyun 		}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 		/* Snapshost size can't be bigger than the auxtrace area */
343*4882a593Smuzhiyun 		if (opts->auxtrace_snapshot_size >
344*4882a593Smuzhiyun 				opts->auxtrace_mmap_pages * (size_t)page_size) {
345*4882a593Smuzhiyun 			pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
346*4882a593Smuzhiyun 			       opts->auxtrace_snapshot_size,
347*4882a593Smuzhiyun 			       opts->auxtrace_mmap_pages * (size_t)page_size);
348*4882a593Smuzhiyun 			return -EINVAL;
349*4882a593Smuzhiyun 		}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		/* Something went wrong somewhere - this shouldn't happen */
352*4882a593Smuzhiyun 		if (!opts->auxtrace_snapshot_size ||
353*4882a593Smuzhiyun 		    !opts->auxtrace_mmap_pages) {
354*4882a593Smuzhiyun 			pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
355*4882a593Smuzhiyun 			return -EINVAL;
356*4882a593Smuzhiyun 		}
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* We are in full trace mode but '-m,xyz' wasn't specified */
360*4882a593Smuzhiyun 	if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
361*4882a593Smuzhiyun 		if (privileged) {
362*4882a593Smuzhiyun 			opts->auxtrace_mmap_pages = MiB(4) / page_size;
363*4882a593Smuzhiyun 		} else {
364*4882a593Smuzhiyun 			opts->auxtrace_mmap_pages = KiB(128) / page_size;
365*4882a593Smuzhiyun 			if (opts->mmap_pages == UINT_MAX)
366*4882a593Smuzhiyun 				opts->mmap_pages = KiB(256) / page_size;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* Validate auxtrace_mmap_pages provided by user */
372*4882a593Smuzhiyun 	if (opts->auxtrace_mmap_pages) {
373*4882a593Smuzhiyun 		unsigned int max_page = (KiB(128) / page_size);
374*4882a593Smuzhiyun 		size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		if (!privileged &&
377*4882a593Smuzhiyun 		    opts->auxtrace_mmap_pages > max_page) {
378*4882a593Smuzhiyun 			opts->auxtrace_mmap_pages = max_page;
379*4882a593Smuzhiyun 			pr_err("auxtrace too big, truncating to %d\n",
380*4882a593Smuzhiyun 			       max_page);
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		if (!is_power_of_2(sz)) {
384*4882a593Smuzhiyun 			pr_err("Invalid mmap size for %s: must be a power of 2\n",
385*4882a593Smuzhiyun 			       CORESIGHT_ETM_PMU_NAME);
386*4882a593Smuzhiyun 			return -EINVAL;
387*4882a593Smuzhiyun 		}
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (opts->auxtrace_snapshot_mode)
391*4882a593Smuzhiyun 		pr_debug2("%s snapshot size: %zu\n", CORESIGHT_ETM_PMU_NAME,
392*4882a593Smuzhiyun 			  opts->auxtrace_snapshot_size);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/*
395*4882a593Smuzhiyun 	 * To obtain the auxtrace buffer file descriptor, the auxtrace
396*4882a593Smuzhiyun 	 * event must come first.
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	perf_evlist__to_front(evlist, cs_etm_evsel);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/*
401*4882a593Smuzhiyun 	 * In the case of per-cpu mmaps, we need the CPU on the
402*4882a593Smuzhiyun 	 * AUX event.  We also need the contextID in order to be notified
403*4882a593Smuzhiyun 	 * when a context switch happened.
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	if (!perf_cpu_map__empty(cpus)) {
406*4882a593Smuzhiyun 		evsel__set_sample_bit(cs_etm_evsel, CPU);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		err = cs_etm_set_option(itr, cs_etm_evsel,
409*4882a593Smuzhiyun 					ETM_OPT_CTXTID | ETM_OPT_TS);
410*4882a593Smuzhiyun 		if (err)
411*4882a593Smuzhiyun 			goto out;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	/* Add dummy event to keep tracking */
415*4882a593Smuzhiyun 	if (opts->full_auxtrace) {
416*4882a593Smuzhiyun 		struct evsel *tracking_evsel;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		err = parse_events(evlist, "dummy:u", NULL);
419*4882a593Smuzhiyun 		if (err)
420*4882a593Smuzhiyun 			goto out;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		tracking_evsel = evlist__last(evlist);
423*4882a593Smuzhiyun 		perf_evlist__set_tracking_event(evlist, tracking_evsel);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		tracking_evsel->core.attr.freq = 0;
426*4882a593Smuzhiyun 		tracking_evsel->core.attr.sample_period = 1;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		/* In per-cpu case, always need the time of mmap events etc */
429*4882a593Smuzhiyun 		if (!perf_cpu_map__empty(cpus))
430*4882a593Smuzhiyun 			evsel__set_sample_bit(tracking_evsel, TIME);
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun out:
434*4882a593Smuzhiyun 	return err;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
cs_etm_get_config(struct auxtrace_record * itr)437*4882a593Smuzhiyun static u64 cs_etm_get_config(struct auxtrace_record *itr)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	u64 config = 0;
440*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
441*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
442*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
443*4882a593Smuzhiyun 	struct evlist *evlist = ptr->evlist;
444*4882a593Smuzhiyun 	struct evsel *evsel;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
447*4882a593Smuzhiyun 		if (evsel->core.attr.type == cs_etm_pmu->type) {
448*4882a593Smuzhiyun 			/*
449*4882a593Smuzhiyun 			 * Variable perf_event_attr::config is assigned to
450*4882a593Smuzhiyun 			 * ETMv3/PTM.  The bit fields have been made to match
451*4882a593Smuzhiyun 			 * the ETMv3.5 ETRMCR register specification.  See the
452*4882a593Smuzhiyun 			 * PMU_FORMAT_ATTR() declarations in
453*4882a593Smuzhiyun 			 * drivers/hwtracing/coresight/coresight-perf.c for
454*4882a593Smuzhiyun 			 * details.
455*4882a593Smuzhiyun 			 */
456*4882a593Smuzhiyun 			config = evsel->core.attr.config;
457*4882a593Smuzhiyun 			break;
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	return config;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun #ifndef BIT
465*4882a593Smuzhiyun #define BIT(N) (1UL << (N))
466*4882a593Smuzhiyun #endif
467*4882a593Smuzhiyun 
cs_etmv4_get_config(struct auxtrace_record * itr)468*4882a593Smuzhiyun static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	u64 config = 0;
471*4882a593Smuzhiyun 	u64 config_opts = 0;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/*
474*4882a593Smuzhiyun 	 * The perf event variable config bits represent both
475*4882a593Smuzhiyun 	 * the command line options and register programming
476*4882a593Smuzhiyun 	 * bits in ETMv3/PTM. For ETMv4 we must remap options
477*4882a593Smuzhiyun 	 * to real bits
478*4882a593Smuzhiyun 	 */
479*4882a593Smuzhiyun 	config_opts = cs_etm_get_config(itr);
480*4882a593Smuzhiyun 	if (config_opts & BIT(ETM_OPT_CYCACC))
481*4882a593Smuzhiyun 		config |= BIT(ETM4_CFG_BIT_CYCACC);
482*4882a593Smuzhiyun 	if (config_opts & BIT(ETM_OPT_CTXTID))
483*4882a593Smuzhiyun 		config |= BIT(ETM4_CFG_BIT_CTXTID);
484*4882a593Smuzhiyun 	if (config_opts & BIT(ETM_OPT_TS))
485*4882a593Smuzhiyun 		config |= BIT(ETM4_CFG_BIT_TS);
486*4882a593Smuzhiyun 	if (config_opts & BIT(ETM_OPT_RETSTK))
487*4882a593Smuzhiyun 		config |= BIT(ETM4_CFG_BIT_RETSTK);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	return config;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static size_t
cs_etm_info_priv_size(struct auxtrace_record * itr __maybe_unused,struct evlist * evlist __maybe_unused)493*4882a593Smuzhiyun cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
494*4882a593Smuzhiyun 		      struct evlist *evlist __maybe_unused)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	int i;
497*4882a593Smuzhiyun 	int etmv3 = 0, etmv4 = 0;
498*4882a593Smuzhiyun 	struct perf_cpu_map *event_cpus = evlist->core.cpus;
499*4882a593Smuzhiyun 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/* cpu map is not empty, we have specific CPUs to work with */
502*4882a593Smuzhiyun 	if (!perf_cpu_map__empty(event_cpus)) {
503*4882a593Smuzhiyun 		for (i = 0; i < cpu__max_cpu(); i++) {
504*4882a593Smuzhiyun 			if (!cpu_map__has(event_cpus, i) ||
505*4882a593Smuzhiyun 			    !cpu_map__has(online_cpus, i))
506*4882a593Smuzhiyun 				continue;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 			if (cs_etm_is_etmv4(itr, i))
509*4882a593Smuzhiyun 				etmv4++;
510*4882a593Smuzhiyun 			else
511*4882a593Smuzhiyun 				etmv3++;
512*4882a593Smuzhiyun 		}
513*4882a593Smuzhiyun 	} else {
514*4882a593Smuzhiyun 		/* get configuration for all CPUs in the system */
515*4882a593Smuzhiyun 		for (i = 0; i < cpu__max_cpu(); i++) {
516*4882a593Smuzhiyun 			if (!cpu_map__has(online_cpus, i))
517*4882a593Smuzhiyun 				continue;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 			if (cs_etm_is_etmv4(itr, i))
520*4882a593Smuzhiyun 				etmv4++;
521*4882a593Smuzhiyun 			else
522*4882a593Smuzhiyun 				etmv3++;
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	perf_cpu_map__put(online_cpus);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	return (CS_ETM_HEADER_SIZE +
529*4882a593Smuzhiyun 	       (etmv4 * CS_ETMV4_PRIV_SIZE) +
530*4882a593Smuzhiyun 	       (etmv3 * CS_ETMV3_PRIV_SIZE));
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
cs_etm_is_etmv4(struct auxtrace_record * itr,int cpu)533*4882a593Smuzhiyun static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	bool ret = false;
536*4882a593Smuzhiyun 	char path[PATH_MAX];
537*4882a593Smuzhiyun 	int scan;
538*4882a593Smuzhiyun 	unsigned int val;
539*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
540*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
541*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Take any of the RO files for ETMv4 and see if it present */
544*4882a593Smuzhiyun 	snprintf(path, PATH_MAX, "cpu%d/%s",
545*4882a593Smuzhiyun 		 cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
546*4882a593Smuzhiyun 	scan = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	/* The file was read successfully, we have a winner */
549*4882a593Smuzhiyun 	if (scan == 1)
550*4882a593Smuzhiyun 		ret = true;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	return ret;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
cs_etm_get_ro(struct perf_pmu * pmu,int cpu,const char * path)555*4882a593Smuzhiyun static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	char pmu_path[PATH_MAX];
558*4882a593Smuzhiyun 	int scan;
559*4882a593Smuzhiyun 	unsigned int val = 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* Get RO metadata from sysfs */
562*4882a593Smuzhiyun 	snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	scan = perf_pmu__scan_file(pmu, pmu_path, "%x", &val);
565*4882a593Smuzhiyun 	if (scan != 1)
566*4882a593Smuzhiyun 		pr_err("%s: error reading: %s\n", __func__, pmu_path);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return val;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
cs_etm_get_metadata(int cpu,u32 * offset,struct auxtrace_record * itr,struct perf_record_auxtrace_info * info)571*4882a593Smuzhiyun static void cs_etm_get_metadata(int cpu, u32 *offset,
572*4882a593Smuzhiyun 				struct auxtrace_record *itr,
573*4882a593Smuzhiyun 				struct perf_record_auxtrace_info *info)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	u32 increment;
576*4882a593Smuzhiyun 	u64 magic;
577*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
578*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
579*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* first see what kind of tracer this cpu is affined to */
582*4882a593Smuzhiyun 	if (cs_etm_is_etmv4(itr, cpu)) {
583*4882a593Smuzhiyun 		magic = __perf_cs_etmv4_magic;
584*4882a593Smuzhiyun 		/* Get trace configuration register */
585*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
586*4882a593Smuzhiyun 						cs_etmv4_get_config(itr);
587*4882a593Smuzhiyun 		/* Get traceID from the framework */
588*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
589*4882a593Smuzhiyun 						coresight_get_trace_id(cpu);
590*4882a593Smuzhiyun 		/* Get read-only information from sysFS */
591*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCIDR0] =
592*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
593*4882a593Smuzhiyun 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
594*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCIDR1] =
595*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
596*4882a593Smuzhiyun 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
597*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCIDR2] =
598*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
599*4882a593Smuzhiyun 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
600*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCIDR8] =
601*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
602*4882a593Smuzhiyun 				      metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
603*4882a593Smuzhiyun 		info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
604*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
605*4882a593Smuzhiyun 				      metadata_etmv4_ro
606*4882a593Smuzhiyun 				      [CS_ETMV4_TRCAUTHSTATUS]);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		/* How much space was used */
609*4882a593Smuzhiyun 		increment = CS_ETMV4_PRIV_MAX;
610*4882a593Smuzhiyun 	} else {
611*4882a593Smuzhiyun 		magic = __perf_cs_etmv3_magic;
612*4882a593Smuzhiyun 		/* Get configuration register */
613*4882a593Smuzhiyun 		info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
614*4882a593Smuzhiyun 		/* Get traceID from the framework */
615*4882a593Smuzhiyun 		info->priv[*offset + CS_ETM_ETMTRACEIDR] =
616*4882a593Smuzhiyun 						coresight_get_trace_id(cpu);
617*4882a593Smuzhiyun 		/* Get read-only information from sysFS */
618*4882a593Smuzhiyun 		info->priv[*offset + CS_ETM_ETMCCER] =
619*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
620*4882a593Smuzhiyun 				      metadata_etmv3_ro[CS_ETM_ETMCCER]);
621*4882a593Smuzhiyun 		info->priv[*offset + CS_ETM_ETMIDR] =
622*4882a593Smuzhiyun 			cs_etm_get_ro(cs_etm_pmu, cpu,
623*4882a593Smuzhiyun 				      metadata_etmv3_ro[CS_ETM_ETMIDR]);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		/* How much space was used */
626*4882a593Smuzhiyun 		increment = CS_ETM_PRIV_MAX;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* Build generic header portion */
630*4882a593Smuzhiyun 	info->priv[*offset + CS_ETM_MAGIC] = magic;
631*4882a593Smuzhiyun 	info->priv[*offset + CS_ETM_CPU] = cpu;
632*4882a593Smuzhiyun 	/* Where the next CPU entry should start from */
633*4882a593Smuzhiyun 	*offset += increment;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
cs_etm_info_fill(struct auxtrace_record * itr,struct perf_session * session,struct perf_record_auxtrace_info * info,size_t priv_size)636*4882a593Smuzhiyun static int cs_etm_info_fill(struct auxtrace_record *itr,
637*4882a593Smuzhiyun 			    struct perf_session *session,
638*4882a593Smuzhiyun 			    struct perf_record_auxtrace_info *info,
639*4882a593Smuzhiyun 			    size_t priv_size)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	int i;
642*4882a593Smuzhiyun 	u32 offset;
643*4882a593Smuzhiyun 	u64 nr_cpu, type;
644*4882a593Smuzhiyun 	struct perf_cpu_map *cpu_map;
645*4882a593Smuzhiyun 	struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
646*4882a593Smuzhiyun 	struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
647*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
648*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
649*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	if (priv_size != cs_etm_info_priv_size(itr, session->evlist))
652*4882a593Smuzhiyun 		return -EINVAL;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!session->evlist->core.nr_mmaps)
655*4882a593Smuzhiyun 		return -EINVAL;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* If the cpu_map is empty all online CPUs are involved */
658*4882a593Smuzhiyun 	if (perf_cpu_map__empty(event_cpus)) {
659*4882a593Smuzhiyun 		cpu_map = online_cpus;
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		/* Make sure all specified CPUs are online */
662*4882a593Smuzhiyun 		for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
663*4882a593Smuzhiyun 			if (cpu_map__has(event_cpus, i) &&
664*4882a593Smuzhiyun 			    !cpu_map__has(online_cpus, i))
665*4882a593Smuzhiyun 				return -EINVAL;
666*4882a593Smuzhiyun 		}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		cpu_map = event_cpus;
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	nr_cpu = perf_cpu_map__nr(cpu_map);
672*4882a593Smuzhiyun 	/* Get PMU type as dynamically assigned by the core */
673*4882a593Smuzhiyun 	type = cs_etm_pmu->type;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* First fill out the session header */
676*4882a593Smuzhiyun 	info->type = PERF_AUXTRACE_CS_ETM;
677*4882a593Smuzhiyun 	info->priv[CS_HEADER_VERSION_0] = 0;
678*4882a593Smuzhiyun 	info->priv[CS_PMU_TYPE_CPUS] = type << 32;
679*4882a593Smuzhiyun 	info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
680*4882a593Smuzhiyun 	info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	offset = CS_ETM_SNAPSHOT + 1;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
685*4882a593Smuzhiyun 		if (cpu_map__has(cpu_map, i))
686*4882a593Smuzhiyun 			cs_etm_get_metadata(i, &offset, itr, info);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	perf_cpu_map__put(online_cpus);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	return 0;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun 
cs_etm_alloc_wrapped_array(struct cs_etm_recording * ptr,int idx)693*4882a593Smuzhiyun static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	bool *wrapped;
696*4882a593Smuzhiyun 	int cnt = ptr->wrapped_cnt;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/* Make @ptr->wrapped as big as @idx */
699*4882a593Smuzhiyun 	while (cnt <= idx)
700*4882a593Smuzhiyun 		cnt++;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/*
703*4882a593Smuzhiyun 	 * Free'ed in cs_etm_recording_free().  Using realloc() to avoid
704*4882a593Smuzhiyun 	 * cross compilation problems where the host's system supports
705*4882a593Smuzhiyun 	 * reallocarray() but not the target.
706*4882a593Smuzhiyun 	 */
707*4882a593Smuzhiyun 	wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
708*4882a593Smuzhiyun 	if (!wrapped)
709*4882a593Smuzhiyun 		return -ENOMEM;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	wrapped[cnt - 1] = false;
712*4882a593Smuzhiyun 	ptr->wrapped_cnt = cnt;
713*4882a593Smuzhiyun 	ptr->wrapped = wrapped;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	return 0;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
cs_etm_buffer_has_wrapped(unsigned char * buffer,size_t buffer_size,u64 head)718*4882a593Smuzhiyun static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
719*4882a593Smuzhiyun 				      size_t buffer_size, u64 head)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	u64 i, watermark;
722*4882a593Smuzhiyun 	u64 *buf = (u64 *)buffer;
723*4882a593Smuzhiyun 	size_t buf_size = buffer_size;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/*
726*4882a593Smuzhiyun 	 * We want to look the very last 512 byte (chosen arbitrarily) in
727*4882a593Smuzhiyun 	 * the ring buffer.
728*4882a593Smuzhiyun 	 */
729*4882a593Smuzhiyun 	watermark = buf_size - 512;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/*
732*4882a593Smuzhiyun 	 * @head is continuously increasing - if its value is equal or greater
733*4882a593Smuzhiyun 	 * than the size of the ring buffer, it has wrapped around.
734*4882a593Smuzhiyun 	 */
735*4882a593Smuzhiyun 	if (head >= buffer_size)
736*4882a593Smuzhiyun 		return true;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	/*
739*4882a593Smuzhiyun 	 * The value of @head is somewhere within the size of the ring buffer.
740*4882a593Smuzhiyun 	 * This can be that there hasn't been enough data to fill the ring
741*4882a593Smuzhiyun 	 * buffer yet or the trace time was so long that @head has numerically
742*4882a593Smuzhiyun 	 * wrapped around.  To find we need to check if we have data at the very
743*4882a593Smuzhiyun 	 * end of the ring buffer.  We can reliably do this because mmap'ed
744*4882a593Smuzhiyun 	 * pages are zeroed out and there is a fresh mapping with every new
745*4882a593Smuzhiyun 	 * session.
746*4882a593Smuzhiyun 	 */
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* @head is less than 512 byte from the end of the ring buffer */
749*4882a593Smuzhiyun 	if (head > watermark)
750*4882a593Smuzhiyun 		watermark = head;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/*
753*4882a593Smuzhiyun 	 * Speed things up by using 64 bit transactions (see "u64 *buf" above)
754*4882a593Smuzhiyun 	 */
755*4882a593Smuzhiyun 	watermark >>= 3;
756*4882a593Smuzhiyun 	buf_size >>= 3;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/*
759*4882a593Smuzhiyun 	 * If we find trace data at the end of the ring buffer, @head has
760*4882a593Smuzhiyun 	 * been there and has numerically wrapped around at least once.
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	for (i = watermark; i < buf_size; i++)
763*4882a593Smuzhiyun 		if (buf[i])
764*4882a593Smuzhiyun 			return true;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	return false;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
cs_etm_find_snapshot(struct auxtrace_record * itr,int idx,struct auxtrace_mmap * mm,unsigned char * data,u64 * head,u64 * old)769*4882a593Smuzhiyun static int cs_etm_find_snapshot(struct auxtrace_record *itr,
770*4882a593Smuzhiyun 				int idx, struct auxtrace_mmap *mm,
771*4882a593Smuzhiyun 				unsigned char *data,
772*4882a593Smuzhiyun 				u64 *head, u64 *old)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun 	int err;
775*4882a593Smuzhiyun 	bool wrapped;
776*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
777*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	/*
780*4882a593Smuzhiyun 	 * Allocate memory to keep track of wrapping if this is the first
781*4882a593Smuzhiyun 	 * time we deal with this *mm.
782*4882a593Smuzhiyun 	 */
783*4882a593Smuzhiyun 	if (idx >= ptr->wrapped_cnt) {
784*4882a593Smuzhiyun 		err = cs_etm_alloc_wrapped_array(ptr, idx);
785*4882a593Smuzhiyun 		if (err)
786*4882a593Smuzhiyun 			return err;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/*
790*4882a593Smuzhiyun 	 * Check to see if *head has wrapped around.  If it hasn't only the
791*4882a593Smuzhiyun 	 * amount of data between *head and *old is snapshot'ed to avoid
792*4882a593Smuzhiyun 	 * bloating the perf.data file with zeros.  But as soon as *head has
793*4882a593Smuzhiyun 	 * wrapped around the entire size of the AUX ring buffer it taken.
794*4882a593Smuzhiyun 	 */
795*4882a593Smuzhiyun 	wrapped = ptr->wrapped[idx];
796*4882a593Smuzhiyun 	if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
797*4882a593Smuzhiyun 		wrapped = true;
798*4882a593Smuzhiyun 		ptr->wrapped[idx] = true;
799*4882a593Smuzhiyun 	}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
802*4882a593Smuzhiyun 		  __func__, idx, (size_t)*old, (size_t)*head, mm->len);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/* No wrap has occurred, we can just use *head and *old. */
805*4882a593Smuzhiyun 	if (!wrapped)
806*4882a593Smuzhiyun 		return 0;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	/*
809*4882a593Smuzhiyun 	 * *head has wrapped around - adjust *head and *old to pickup the
810*4882a593Smuzhiyun 	 * entire content of the AUX buffer.
811*4882a593Smuzhiyun 	 */
812*4882a593Smuzhiyun 	if (*head >= mm->len) {
813*4882a593Smuzhiyun 		*old = *head - mm->len;
814*4882a593Smuzhiyun 	} else {
815*4882a593Smuzhiyun 		*head += mm->len;
816*4882a593Smuzhiyun 		*old = *head - mm->len;
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	return 0;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
cs_etm_snapshot_start(struct auxtrace_record * itr)822*4882a593Smuzhiyun static int cs_etm_snapshot_start(struct auxtrace_record *itr)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
825*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
826*4882a593Smuzhiyun 	struct evsel *evsel;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	evlist__for_each_entry(ptr->evlist, evsel) {
829*4882a593Smuzhiyun 		if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
830*4882a593Smuzhiyun 			return evsel__disable(evsel);
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 	return -EINVAL;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
cs_etm_snapshot_finish(struct auxtrace_record * itr)835*4882a593Smuzhiyun static int cs_etm_snapshot_finish(struct auxtrace_record *itr)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
838*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
839*4882a593Smuzhiyun 	struct evsel *evsel;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	evlist__for_each_entry(ptr->evlist, evsel) {
842*4882a593Smuzhiyun 		if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
843*4882a593Smuzhiyun 			return evsel__enable(evsel);
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 	return -EINVAL;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
cs_etm_reference(struct auxtrace_record * itr __maybe_unused)848*4882a593Smuzhiyun static u64 cs_etm_reference(struct auxtrace_record *itr __maybe_unused)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	return (((u64) rand() <<  0) & 0x00000000FFFFFFFFull) |
851*4882a593Smuzhiyun 		(((u64) rand() << 32) & 0xFFFFFFFF00000000ull);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
cs_etm_recording_free(struct auxtrace_record * itr)854*4882a593Smuzhiyun static void cs_etm_recording_free(struct auxtrace_record *itr)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	struct cs_etm_recording *ptr =
857*4882a593Smuzhiyun 			container_of(itr, struct cs_etm_recording, itr);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	zfree(&ptr->wrapped);
860*4882a593Smuzhiyun 	free(ptr);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
cs_etm_record_init(int * err)863*4882a593Smuzhiyun struct auxtrace_record *cs_etm_record_init(int *err)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct perf_pmu *cs_etm_pmu;
866*4882a593Smuzhiyun 	struct cs_etm_recording *ptr;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	if (!cs_etm_pmu) {
871*4882a593Smuzhiyun 		*err = -EINVAL;
872*4882a593Smuzhiyun 		goto out;
873*4882a593Smuzhiyun 	}
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	ptr = zalloc(sizeof(struct cs_etm_recording));
876*4882a593Smuzhiyun 	if (!ptr) {
877*4882a593Smuzhiyun 		*err = -ENOMEM;
878*4882a593Smuzhiyun 		goto out;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	ptr->cs_etm_pmu			= cs_etm_pmu;
882*4882a593Smuzhiyun 	ptr->itr.pmu			= cs_etm_pmu;
883*4882a593Smuzhiyun 	ptr->itr.parse_snapshot_options	= cs_etm_parse_snapshot_options;
884*4882a593Smuzhiyun 	ptr->itr.recording_options	= cs_etm_recording_options;
885*4882a593Smuzhiyun 	ptr->itr.info_priv_size		= cs_etm_info_priv_size;
886*4882a593Smuzhiyun 	ptr->itr.info_fill		= cs_etm_info_fill;
887*4882a593Smuzhiyun 	ptr->itr.find_snapshot		= cs_etm_find_snapshot;
888*4882a593Smuzhiyun 	ptr->itr.snapshot_start		= cs_etm_snapshot_start;
889*4882a593Smuzhiyun 	ptr->itr.snapshot_finish	= cs_etm_snapshot_finish;
890*4882a593Smuzhiyun 	ptr->itr.reference		= cs_etm_reference;
891*4882a593Smuzhiyun 	ptr->itr.free			= cs_etm_recording_free;
892*4882a593Smuzhiyun 	ptr->itr.read_finish		= auxtrace_record__read_finish;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	*err = 0;
895*4882a593Smuzhiyun 	return &ptr->itr;
896*4882a593Smuzhiyun out:
897*4882a593Smuzhiyun 	return NULL;
898*4882a593Smuzhiyun }
899