xref: /OK3568_Linux_fs/kernel/tools/perf/util/record.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include "debug.h"
3*4882a593Smuzhiyun #include "evlist.h"
4*4882a593Smuzhiyun #include "evsel.h"
5*4882a593Smuzhiyun #include "evsel_config.h"
6*4882a593Smuzhiyun #include "parse-events.h"
7*4882a593Smuzhiyun #include <errno.h>
8*4882a593Smuzhiyun #include <limits.h>
9*4882a593Smuzhiyun #include <stdlib.h>
10*4882a593Smuzhiyun #include <api/fs/fs.h>
11*4882a593Smuzhiyun #include <subcmd/parse-options.h>
12*4882a593Smuzhiyun #include <perf/cpumap.h>
13*4882a593Smuzhiyun #include "cloexec.h"
14*4882a593Smuzhiyun #include "util/perf_api_probe.h"
15*4882a593Smuzhiyun #include "record.h"
16*4882a593Smuzhiyun #include "../perf-sys.h"
17*4882a593Smuzhiyun #include "topdown.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * evsel__config_leader_sampling() uses special rules for leader sampling.
21*4882a593Smuzhiyun  * However, if the leader is an AUX area event, then assume the event to sample
22*4882a593Smuzhiyun  * is the next event.
23*4882a593Smuzhiyun  */
evsel__read_sampler(struct evsel * evsel,struct evlist * evlist)24*4882a593Smuzhiyun static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	struct evsel *leader = evsel->leader;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	if (evsel__is_aux_event(leader) || arch_topdown_sample_read(leader)) {
29*4882a593Smuzhiyun 		evlist__for_each_entry(evlist, evsel) {
30*4882a593Smuzhiyun 			if (evsel->leader == leader && evsel != evsel->leader)
31*4882a593Smuzhiyun 				return evsel;
32*4882a593Smuzhiyun 		}
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return leader;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
evsel__config_term_mask(struct evsel * evsel)38*4882a593Smuzhiyun static u64 evsel__config_term_mask(struct evsel *evsel)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct evsel_config_term *term;
41*4882a593Smuzhiyun 	struct list_head *config_terms = &evsel->config_terms;
42*4882a593Smuzhiyun 	u64 term_types = 0;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	list_for_each_entry(term, config_terms, list) {
45*4882a593Smuzhiyun 		term_types |= 1 << term->type;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 	return term_types;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
evsel__config_leader_sampling(struct evsel * evsel,struct evlist * evlist)50*4882a593Smuzhiyun static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	struct perf_event_attr *attr = &evsel->core.attr;
53*4882a593Smuzhiyun 	struct evsel *leader = evsel->leader;
54*4882a593Smuzhiyun 	struct evsel *read_sampler;
55*4882a593Smuzhiyun 	u64 term_types, freq_mask;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (!leader->sample_read)
58*4882a593Smuzhiyun 		return;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	read_sampler = evsel__read_sampler(evsel, evlist);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	if (evsel == read_sampler)
63*4882a593Smuzhiyun 		return;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	term_types = evsel__config_term_mask(evsel);
66*4882a593Smuzhiyun 	/*
67*4882a593Smuzhiyun 	 * Disable sampling for all group members except those with explicit
68*4882a593Smuzhiyun 	 * config terms or the leader. In the case of an AUX area event, the 2nd
69*4882a593Smuzhiyun 	 * event in the group is the one that 'leads' the sampling.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	freq_mask = (1 << EVSEL__CONFIG_TERM_FREQ) | (1 << EVSEL__CONFIG_TERM_PERIOD);
72*4882a593Smuzhiyun 	if ((term_types & freq_mask) == 0) {
73*4882a593Smuzhiyun 		attr->freq           = 0;
74*4882a593Smuzhiyun 		attr->sample_freq    = 0;
75*4882a593Smuzhiyun 		attr->sample_period  = 0;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	if ((term_types & (1 << EVSEL__CONFIG_TERM_OVERWRITE)) == 0)
78*4882a593Smuzhiyun 		attr->write_backward = 0;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	/*
81*4882a593Smuzhiyun 	 * We don't get a sample for slave events, we make them when delivering
82*4882a593Smuzhiyun 	 * the group leader sample. Set the slave event to follow the master
83*4882a593Smuzhiyun 	 * sample_type to ease up reporting.
84*4882a593Smuzhiyun 	 * An AUX area event also has sample_type requirements, so also include
85*4882a593Smuzhiyun 	 * the sample type bits from the leader's sample_type to cover that
86*4882a593Smuzhiyun 	 * case.
87*4882a593Smuzhiyun 	 */
88*4882a593Smuzhiyun 	attr->sample_type = read_sampler->core.attr.sample_type |
89*4882a593Smuzhiyun 			    leader->core.attr.sample_type;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
perf_evlist__config(struct evlist * evlist,struct record_opts * opts,struct callchain_param * callchain)92*4882a593Smuzhiyun void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
93*4882a593Smuzhiyun 			 struct callchain_param *callchain)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct evsel *evsel;
96*4882a593Smuzhiyun 	bool use_sample_identifier = false;
97*4882a593Smuzhiyun 	bool use_comm_exec;
98*4882a593Smuzhiyun 	bool sample_id = opts->sample_id;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/*
101*4882a593Smuzhiyun 	 * Set the evsel leader links before we configure attributes,
102*4882a593Smuzhiyun 	 * since some might depend on this info.
103*4882a593Smuzhiyun 	 */
104*4882a593Smuzhiyun 	if (opts->group)
105*4882a593Smuzhiyun 		perf_evlist__set_leader(evlist);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (evlist->core.cpus->map[0] < 0)
108*4882a593Smuzhiyun 		opts->no_inherit = true;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	use_comm_exec = perf_can_comm_exec();
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
113*4882a593Smuzhiyun 		evsel__config(evsel, opts, callchain);
114*4882a593Smuzhiyun 		if (evsel->tracking && use_comm_exec)
115*4882a593Smuzhiyun 			evsel->core.attr.comm_exec = 1;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Configure leader sampling here now that the sample type is known */
119*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
120*4882a593Smuzhiyun 		evsel__config_leader_sampling(evsel, evlist);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (opts->full_auxtrace) {
123*4882a593Smuzhiyun 		/*
124*4882a593Smuzhiyun 		 * Need to be able to synthesize and parse selected events with
125*4882a593Smuzhiyun 		 * arbitrary sample types, which requires always being able to
126*4882a593Smuzhiyun 		 * match the id.
127*4882a593Smuzhiyun 		 */
128*4882a593Smuzhiyun 		use_sample_identifier = perf_can_sample_identifier();
129*4882a593Smuzhiyun 		sample_id = true;
130*4882a593Smuzhiyun 	} else if (evlist->core.nr_entries > 1) {
131*4882a593Smuzhiyun 		struct evsel *first = evlist__first(evlist);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		evlist__for_each_entry(evlist, evsel) {
134*4882a593Smuzhiyun 			if (evsel->core.attr.sample_type == first->core.attr.sample_type)
135*4882a593Smuzhiyun 				continue;
136*4882a593Smuzhiyun 			use_sample_identifier = perf_can_sample_identifier();
137*4882a593Smuzhiyun 			break;
138*4882a593Smuzhiyun 		}
139*4882a593Smuzhiyun 		sample_id = true;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (sample_id) {
143*4882a593Smuzhiyun 		evlist__for_each_entry(evlist, evsel)
144*4882a593Smuzhiyun 			evsel__set_sample_id(evsel, use_sample_identifier);
145*4882a593Smuzhiyun 	}
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	perf_evlist__set_id_pos(evlist);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
get_max_rate(unsigned int * rate)150*4882a593Smuzhiyun static int get_max_rate(unsigned int *rate)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
record_opts__config_freq(struct record_opts * opts)155*4882a593Smuzhiyun static int record_opts__config_freq(struct record_opts *opts)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	bool user_freq = opts->user_freq != UINT_MAX;
158*4882a593Smuzhiyun 	unsigned int max_rate;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (opts->user_interval != ULLONG_MAX)
161*4882a593Smuzhiyun 		opts->default_interval = opts->user_interval;
162*4882a593Smuzhiyun 	if (user_freq)
163*4882a593Smuzhiyun 		opts->freq = opts->user_freq;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * User specified count overrides default frequency.
167*4882a593Smuzhiyun 	 */
168*4882a593Smuzhiyun 	if (opts->default_interval)
169*4882a593Smuzhiyun 		opts->freq = 0;
170*4882a593Smuzhiyun 	else if (opts->freq) {
171*4882a593Smuzhiyun 		opts->default_interval = opts->freq;
172*4882a593Smuzhiyun 	} else {
173*4882a593Smuzhiyun 		pr_err("frequency and count are zero, aborting\n");
174*4882a593Smuzhiyun 		return -1;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (get_max_rate(&max_rate))
178*4882a593Smuzhiyun 		return 0;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/*
181*4882a593Smuzhiyun 	 * User specified frequency is over current maximum.
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	if (user_freq && (max_rate < opts->freq)) {
184*4882a593Smuzhiyun 		if (opts->strict_freq) {
185*4882a593Smuzhiyun 			pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
186*4882a593Smuzhiyun 			       "       Please use -F freq option with a lower value or consider\n"
187*4882a593Smuzhiyun 			       "       tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
188*4882a593Smuzhiyun 			       max_rate);
189*4882a593Smuzhiyun 			return -1;
190*4882a593Smuzhiyun 		} else {
191*4882a593Smuzhiyun 			pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
192*4882a593Smuzhiyun 				   "         The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
193*4882a593Smuzhiyun 				   "         The kernel will lower it when perf's interrupts take too long.\n"
194*4882a593Smuzhiyun 				   "         Use --strict-freq to disable this throttling, refusing to record.\n",
195*4882a593Smuzhiyun 				   max_rate, opts->freq, max_rate);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 			opts->freq = max_rate;
198*4882a593Smuzhiyun 		}
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/*
202*4882a593Smuzhiyun 	 * Default frequency is over current maximum.
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	if (max_rate < opts->freq) {
205*4882a593Smuzhiyun 		pr_warning("Lowering default frequency rate to %u.\n"
206*4882a593Smuzhiyun 			   "Please consider tweaking "
207*4882a593Smuzhiyun 			   "/proc/sys/kernel/perf_event_max_sample_rate.\n",
208*4882a593Smuzhiyun 			   max_rate);
209*4882a593Smuzhiyun 		opts->freq = max_rate;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return 0;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
record_opts__config(struct record_opts * opts)215*4882a593Smuzhiyun int record_opts__config(struct record_opts *opts)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	return record_opts__config_freq(opts);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
perf_evlist__can_select_event(struct evlist * evlist,const char * str)220*4882a593Smuzhiyun bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct evlist *temp_evlist;
223*4882a593Smuzhiyun 	struct evsel *evsel;
224*4882a593Smuzhiyun 	int err, fd, cpu;
225*4882a593Smuzhiyun 	bool ret = false;
226*4882a593Smuzhiyun 	pid_t pid = -1;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	temp_evlist = evlist__new();
229*4882a593Smuzhiyun 	if (!temp_evlist)
230*4882a593Smuzhiyun 		return false;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	err = parse_events(temp_evlist, str, NULL);
233*4882a593Smuzhiyun 	if (err)
234*4882a593Smuzhiyun 		goto out_delete;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	evsel = evlist__last(temp_evlist);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
239*4882a593Smuzhiyun 		struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		cpu =  cpus ? cpus->map[0] : 0;
242*4882a593Smuzhiyun 		perf_cpu_map__put(cpus);
243*4882a593Smuzhiyun 	} else {
244*4882a593Smuzhiyun 		cpu = evlist->core.cpus->map[0];
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	while (1) {
248*4882a593Smuzhiyun 		fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
249*4882a593Smuzhiyun 					 perf_event_open_cloexec_flag());
250*4882a593Smuzhiyun 		if (fd < 0) {
251*4882a593Smuzhiyun 			if (pid == -1 && errno == EACCES) {
252*4882a593Smuzhiyun 				pid = 0;
253*4882a593Smuzhiyun 				continue;
254*4882a593Smuzhiyun 			}
255*4882a593Smuzhiyun 			goto out_delete;
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 		break;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	close(fd);
260*4882a593Smuzhiyun 	ret = true;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun out_delete:
263*4882a593Smuzhiyun 	evlist__delete(temp_evlist);
264*4882a593Smuzhiyun 	return ret;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
record__parse_freq(const struct option * opt,const char * str,int unset __maybe_unused)267*4882a593Smuzhiyun int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	unsigned int freq;
270*4882a593Smuzhiyun 	struct record_opts *opts = opt->value;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (!str)
273*4882a593Smuzhiyun 		return -EINVAL;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (strcasecmp(str, "max") == 0) {
276*4882a593Smuzhiyun 		if (get_max_rate(&freq)) {
277*4882a593Smuzhiyun 			pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
278*4882a593Smuzhiyun 			return -1;
279*4882a593Smuzhiyun 		}
280*4882a593Smuzhiyun 		pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
281*4882a593Smuzhiyun 	} else {
282*4882a593Smuzhiyun 		freq = atoi(str);
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	opts->user_freq = freq;
286*4882a593Smuzhiyun 	return 0;
287*4882a593Smuzhiyun }
288