xref: /OK3568_Linux_fs/kernel/tools/perf/util/evlist.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Parts came from builtin-{top,stat,record}.c, see those files for further
6*4882a593Smuzhiyun  * copyright notes.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <api/fs/fs.h>
9*4882a593Smuzhiyun #include <errno.h>
10*4882a593Smuzhiyun #include <inttypes.h>
11*4882a593Smuzhiyun #include <poll.h>
12*4882a593Smuzhiyun #include "cpumap.h"
13*4882a593Smuzhiyun #include "util/mmap.h"
14*4882a593Smuzhiyun #include "thread_map.h"
15*4882a593Smuzhiyun #include "target.h"
16*4882a593Smuzhiyun #include "evlist.h"
17*4882a593Smuzhiyun #include "evsel.h"
18*4882a593Smuzhiyun #include "debug.h"
19*4882a593Smuzhiyun #include "units.h"
20*4882a593Smuzhiyun #include <internal/lib.h> // page_size
21*4882a593Smuzhiyun #include "affinity.h"
22*4882a593Smuzhiyun #include "../perf.h"
23*4882a593Smuzhiyun #include "asm/bug.h"
24*4882a593Smuzhiyun #include "bpf-event.h"
25*4882a593Smuzhiyun #include "util/string2.h"
26*4882a593Smuzhiyun #include "util/perf_api_probe.h"
27*4882a593Smuzhiyun #include <signal.h>
28*4882a593Smuzhiyun #include <unistd.h>
29*4882a593Smuzhiyun #include <sched.h>
30*4882a593Smuzhiyun #include <stdlib.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "parse-events.h"
33*4882a593Smuzhiyun #include <subcmd/parse-options.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <fcntl.h>
36*4882a593Smuzhiyun #include <sys/ioctl.h>
37*4882a593Smuzhiyun #include <sys/mman.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #include <linux/bitops.h>
40*4882a593Smuzhiyun #include <linux/hash.h>
41*4882a593Smuzhiyun #include <linux/log2.h>
42*4882a593Smuzhiyun #include <linux/err.h>
43*4882a593Smuzhiyun #include <linux/string.h>
44*4882a593Smuzhiyun #include <linux/zalloc.h>
45*4882a593Smuzhiyun #include <perf/evlist.h>
46*4882a593Smuzhiyun #include <perf/evsel.h>
47*4882a593Smuzhiyun #include <perf/cpumap.h>
48*4882a593Smuzhiyun #include <perf/mmap.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #include <internal/xyarray.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #ifdef LACKS_SIGQUEUE_PROTOTYPE
53*4882a593Smuzhiyun int sigqueue(pid_t pid, int sig, const union sigval value);
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
57*4882a593Smuzhiyun #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
58*4882a593Smuzhiyun 
evlist__init(struct evlist * evlist,struct perf_cpu_map * cpus,struct perf_thread_map * threads)59*4882a593Smuzhiyun void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60*4882a593Smuzhiyun 		  struct perf_thread_map *threads)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	perf_evlist__init(&evlist->core);
63*4882a593Smuzhiyun 	perf_evlist__set_maps(&evlist->core, cpus, threads);
64*4882a593Smuzhiyun 	evlist->workload.pid = -1;
65*4882a593Smuzhiyun 	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
66*4882a593Smuzhiyun 	evlist->ctl_fd.fd = -1;
67*4882a593Smuzhiyun 	evlist->ctl_fd.ack = -1;
68*4882a593Smuzhiyun 	evlist->ctl_fd.pos = -1;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
evlist__new(void)71*4882a593Smuzhiyun struct evlist *evlist__new(void)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct evlist *evlist = zalloc(sizeof(*evlist));
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (evlist != NULL)
76*4882a593Smuzhiyun 		evlist__init(evlist, NULL, NULL);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return evlist;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
perf_evlist__new_default(void)81*4882a593Smuzhiyun struct evlist *perf_evlist__new_default(void)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	struct evlist *evlist = evlist__new();
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (evlist && evlist__add_default(evlist)) {
86*4882a593Smuzhiyun 		evlist__delete(evlist);
87*4882a593Smuzhiyun 		evlist = NULL;
88*4882a593Smuzhiyun 	}
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return evlist;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
perf_evlist__new_dummy(void)93*4882a593Smuzhiyun struct evlist *perf_evlist__new_dummy(void)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct evlist *evlist = evlist__new();
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (evlist && evlist__add_dummy(evlist)) {
98*4882a593Smuzhiyun 		evlist__delete(evlist);
99*4882a593Smuzhiyun 		evlist = NULL;
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return evlist;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun  * perf_evlist__set_id_pos - set the positions of event ids.
107*4882a593Smuzhiyun  * @evlist: selected event list
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * Events with compatible sample types all have the same id_pos
110*4882a593Smuzhiyun  * and is_pos.  For convenience, put a copy on evlist.
111*4882a593Smuzhiyun  */
perf_evlist__set_id_pos(struct evlist * evlist)112*4882a593Smuzhiyun void perf_evlist__set_id_pos(struct evlist *evlist)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	evlist->id_pos = first->id_pos;
117*4882a593Smuzhiyun 	evlist->is_pos = first->is_pos;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
perf_evlist__update_id_pos(struct evlist * evlist)120*4882a593Smuzhiyun static void perf_evlist__update_id_pos(struct evlist *evlist)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct evsel *evsel;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
125*4882a593Smuzhiyun 		evsel__calc_id_pos(evsel);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	perf_evlist__set_id_pos(evlist);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
evlist__purge(struct evlist * evlist)130*4882a593Smuzhiyun static void evlist__purge(struct evlist *evlist)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct evsel *pos, *n;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	evlist__for_each_entry_safe(evlist, n, pos) {
135*4882a593Smuzhiyun 		list_del_init(&pos->core.node);
136*4882a593Smuzhiyun 		pos->evlist = NULL;
137*4882a593Smuzhiyun 		evsel__delete(pos);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	evlist->core.nr_entries = 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
evlist__exit(struct evlist * evlist)143*4882a593Smuzhiyun void evlist__exit(struct evlist *evlist)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	zfree(&evlist->mmap);
146*4882a593Smuzhiyun 	zfree(&evlist->overwrite_mmap);
147*4882a593Smuzhiyun 	perf_evlist__exit(&evlist->core);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
evlist__delete(struct evlist * evlist)150*4882a593Smuzhiyun void evlist__delete(struct evlist *evlist)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	if (evlist == NULL)
153*4882a593Smuzhiyun 		return;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	evlist__munmap(evlist);
156*4882a593Smuzhiyun 	evlist__close(evlist);
157*4882a593Smuzhiyun 	evlist__purge(evlist);
158*4882a593Smuzhiyun 	evlist__exit(evlist);
159*4882a593Smuzhiyun 	free(evlist);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
evlist__add(struct evlist * evlist,struct evsel * entry)162*4882a593Smuzhiyun void evlist__add(struct evlist *evlist, struct evsel *entry)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	entry->evlist = evlist;
165*4882a593Smuzhiyun 	entry->idx = evlist->core.nr_entries;
166*4882a593Smuzhiyun 	entry->tracking = !entry->idx;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	perf_evlist__add(&evlist->core, &entry->core);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (evlist->core.nr_entries == 1)
171*4882a593Smuzhiyun 		perf_evlist__set_id_pos(evlist);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
evlist__remove(struct evlist * evlist,struct evsel * evsel)174*4882a593Smuzhiyun void evlist__remove(struct evlist *evlist, struct evsel *evsel)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	evsel->evlist = NULL;
177*4882a593Smuzhiyun 	perf_evlist__remove(&evlist->core, &evsel->core);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
perf_evlist__splice_list_tail(struct evlist * evlist,struct list_head * list)180*4882a593Smuzhiyun void perf_evlist__splice_list_tail(struct evlist *evlist,
181*4882a593Smuzhiyun 				   struct list_head *list)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct evsel *evsel, *temp;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	__evlist__for_each_entry_safe(list, temp, evsel) {
186*4882a593Smuzhiyun 		list_del_init(&evsel->core.node);
187*4882a593Smuzhiyun 		evlist__add(evlist, evsel);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
__evlist__set_tracepoints_handlers(struct evlist * evlist,const struct evsel_str_handler * assocs,size_t nr_assocs)191*4882a593Smuzhiyun int __evlist__set_tracepoints_handlers(struct evlist *evlist,
192*4882a593Smuzhiyun 				       const struct evsel_str_handler *assocs, size_t nr_assocs)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct evsel *evsel;
195*4882a593Smuzhiyun 	size_t i;
196*4882a593Smuzhiyun 	int err;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	for (i = 0; i < nr_assocs; i++) {
199*4882a593Smuzhiyun 		// Adding a handler for an event not in this evlist, just ignore it.
200*4882a593Smuzhiyun 		evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
201*4882a593Smuzhiyun 		if (evsel == NULL)
202*4882a593Smuzhiyun 			continue;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		err = -EEXIST;
205*4882a593Smuzhiyun 		if (evsel->handler != NULL)
206*4882a593Smuzhiyun 			goto out;
207*4882a593Smuzhiyun 		evsel->handler = assocs[i].handler;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	err = 0;
211*4882a593Smuzhiyun out:
212*4882a593Smuzhiyun 	return err;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
__perf_evlist__set_leader(struct list_head * list)215*4882a593Smuzhiyun void __perf_evlist__set_leader(struct list_head *list)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct evsel *evsel, *leader;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	leader = list_entry(list->next, struct evsel, core.node);
220*4882a593Smuzhiyun 	evsel = list_entry(list->prev, struct evsel, core.node);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	leader->core.nr_members = evsel->idx - leader->idx + 1;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	__evlist__for_each_entry(list, evsel) {
225*4882a593Smuzhiyun 		evsel->leader = leader;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
perf_evlist__set_leader(struct evlist * evlist)229*4882a593Smuzhiyun void perf_evlist__set_leader(struct evlist *evlist)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	if (evlist->core.nr_entries) {
232*4882a593Smuzhiyun 		evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
233*4882a593Smuzhiyun 		__perf_evlist__set_leader(&evlist->core.entries);
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
__evlist__add_default(struct evlist * evlist,bool precise)237*4882a593Smuzhiyun int __evlist__add_default(struct evlist *evlist, bool precise)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct evsel *evsel = evsel__new_cycles(precise);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (evsel == NULL)
242*4882a593Smuzhiyun 		return -ENOMEM;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	evlist__add(evlist, evsel);
245*4882a593Smuzhiyun 	return 0;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
evlist__add_dummy(struct evlist * evlist)248*4882a593Smuzhiyun int evlist__add_dummy(struct evlist *evlist)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct perf_event_attr attr = {
251*4882a593Smuzhiyun 		.type	= PERF_TYPE_SOFTWARE,
252*4882a593Smuzhiyun 		.config = PERF_COUNT_SW_DUMMY,
253*4882a593Smuzhiyun 		.size	= sizeof(attr), /* to capture ABI version */
254*4882a593Smuzhiyun 	};
255*4882a593Smuzhiyun 	struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (evsel == NULL)
258*4882a593Smuzhiyun 		return -ENOMEM;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	evlist__add(evlist, evsel);
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
evlist__add_attrs(struct evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)264*4882a593Smuzhiyun static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct evsel *evsel, *n;
267*4882a593Smuzhiyun 	LIST_HEAD(head);
268*4882a593Smuzhiyun 	size_t i;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	for (i = 0; i < nr_attrs; i++) {
271*4882a593Smuzhiyun 		evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
272*4882a593Smuzhiyun 		if (evsel == NULL)
273*4882a593Smuzhiyun 			goto out_delete_partial_list;
274*4882a593Smuzhiyun 		list_add_tail(&evsel->core.node, &head);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	perf_evlist__splice_list_tail(evlist, &head);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return 0;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun out_delete_partial_list:
282*4882a593Smuzhiyun 	__evlist__for_each_entry_safe(&head, n, evsel)
283*4882a593Smuzhiyun 		evsel__delete(evsel);
284*4882a593Smuzhiyun 	return -1;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
__evlist__add_default_attrs(struct evlist * evlist,struct perf_event_attr * attrs,size_t nr_attrs)287*4882a593Smuzhiyun int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	size_t i;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	for (i = 0; i < nr_attrs; i++)
292*4882a593Smuzhiyun 		event_attr_init(attrs + i);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return evlist__add_attrs(evlist, attrs, nr_attrs);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun struct evsel *
perf_evlist__find_tracepoint_by_id(struct evlist * evlist,int id)298*4882a593Smuzhiyun perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct evsel *evsel;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
303*4882a593Smuzhiyun 		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
304*4882a593Smuzhiyun 		    (int)evsel->core.attr.config == id)
305*4882a593Smuzhiyun 			return evsel;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	return NULL;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun struct evsel *
perf_evlist__find_tracepoint_by_name(struct evlist * evlist,const char * name)312*4882a593Smuzhiyun perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
313*4882a593Smuzhiyun 				     const char *name)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct evsel *evsel;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
318*4882a593Smuzhiyun 		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
319*4882a593Smuzhiyun 		    (strcmp(evsel->name, name) == 0))
320*4882a593Smuzhiyun 			return evsel;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return NULL;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
evlist__add_newtp(struct evlist * evlist,const char * sys,const char * name,void * handler)326*4882a593Smuzhiyun int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct evsel *evsel = evsel__newtp(sys, name);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (IS_ERR(evsel))
331*4882a593Smuzhiyun 		return -1;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	evsel->handler = handler;
334*4882a593Smuzhiyun 	evlist__add(evlist, evsel);
335*4882a593Smuzhiyun 	return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
perf_evlist__nr_threads(struct evlist * evlist,struct evsel * evsel)338*4882a593Smuzhiyun static int perf_evlist__nr_threads(struct evlist *evlist,
339*4882a593Smuzhiyun 				   struct evsel *evsel)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	if (evsel->core.system_wide)
342*4882a593Smuzhiyun 		return 1;
343*4882a593Smuzhiyun 	else
344*4882a593Smuzhiyun 		return perf_thread_map__nr(evlist->core.threads);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
evlist__cpu_iter_start(struct evlist * evlist)347*4882a593Smuzhiyun void evlist__cpu_iter_start(struct evlist *evlist)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct evsel *pos;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/*
352*4882a593Smuzhiyun 	 * Reset the per evsel cpu_iter. This is needed because
353*4882a593Smuzhiyun 	 * each evsel's cpumap may have a different index space,
354*4882a593Smuzhiyun 	 * and some operations need the index to modify
355*4882a593Smuzhiyun 	 * the FD xyarray (e.g. open, close)
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos)
358*4882a593Smuzhiyun 		pos->cpu_iter = 0;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
evsel__cpu_iter_skip_no_inc(struct evsel * ev,int cpu)361*4882a593Smuzhiyun bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	if (ev->cpu_iter >= ev->core.cpus->nr)
364*4882a593Smuzhiyun 		return true;
365*4882a593Smuzhiyun 	if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
366*4882a593Smuzhiyun 		return true;
367*4882a593Smuzhiyun 	return false;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
evsel__cpu_iter_skip(struct evsel * ev,int cpu)370*4882a593Smuzhiyun bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
373*4882a593Smuzhiyun 		ev->cpu_iter++;
374*4882a593Smuzhiyun 		return false;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	return true;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
evlist__disable(struct evlist * evlist)379*4882a593Smuzhiyun void evlist__disable(struct evlist *evlist)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct evsel *pos;
382*4882a593Smuzhiyun 	struct affinity affinity;
383*4882a593Smuzhiyun 	int cpu, i, imm = 0;
384*4882a593Smuzhiyun 	bool has_imm = false;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (affinity__setup(&affinity) < 0)
387*4882a593Smuzhiyun 		return;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Disable 'immediate' events last */
390*4882a593Smuzhiyun 	for (imm = 0; imm <= 1; imm++) {
391*4882a593Smuzhiyun 		evlist__for_each_cpu(evlist, i, cpu) {
392*4882a593Smuzhiyun 			affinity__set(&affinity, cpu);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 			evlist__for_each_entry(evlist, pos) {
395*4882a593Smuzhiyun 				if (evsel__cpu_iter_skip(pos, cpu))
396*4882a593Smuzhiyun 					continue;
397*4882a593Smuzhiyun 				if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
398*4882a593Smuzhiyun 					continue;
399*4882a593Smuzhiyun 				if (pos->immediate)
400*4882a593Smuzhiyun 					has_imm = true;
401*4882a593Smuzhiyun 				if (pos->immediate != imm)
402*4882a593Smuzhiyun 					continue;
403*4882a593Smuzhiyun 				evsel__disable_cpu(pos, pos->cpu_iter - 1);
404*4882a593Smuzhiyun 			}
405*4882a593Smuzhiyun 		}
406*4882a593Smuzhiyun 		if (!has_imm)
407*4882a593Smuzhiyun 			break;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	affinity__cleanup(&affinity);
411*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos) {
412*4882a593Smuzhiyun 		if (!evsel__is_group_leader(pos) || !pos->core.fd)
413*4882a593Smuzhiyun 			continue;
414*4882a593Smuzhiyun 		pos->disabled = true;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	evlist->enabled = false;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
evlist__enable(struct evlist * evlist)420*4882a593Smuzhiyun void evlist__enable(struct evlist *evlist)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	struct evsel *pos;
423*4882a593Smuzhiyun 	struct affinity affinity;
424*4882a593Smuzhiyun 	int cpu, i;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (affinity__setup(&affinity) < 0)
427*4882a593Smuzhiyun 		return;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	evlist__for_each_cpu(evlist, i, cpu) {
430*4882a593Smuzhiyun 		affinity__set(&affinity, cpu);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		evlist__for_each_entry(evlist, pos) {
433*4882a593Smuzhiyun 			if (evsel__cpu_iter_skip(pos, cpu))
434*4882a593Smuzhiyun 				continue;
435*4882a593Smuzhiyun 			if (!evsel__is_group_leader(pos) || !pos->core.fd)
436*4882a593Smuzhiyun 				continue;
437*4882a593Smuzhiyun 			evsel__enable_cpu(pos, pos->cpu_iter - 1);
438*4882a593Smuzhiyun 		}
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 	affinity__cleanup(&affinity);
441*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos) {
442*4882a593Smuzhiyun 		if (!evsel__is_group_leader(pos) || !pos->core.fd)
443*4882a593Smuzhiyun 			continue;
444*4882a593Smuzhiyun 		pos->disabled = false;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	evlist->enabled = true;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
perf_evlist__toggle_enable(struct evlist * evlist)450*4882a593Smuzhiyun void perf_evlist__toggle_enable(struct evlist *evlist)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
perf_evlist__enable_event_cpu(struct evlist * evlist,struct evsel * evsel,int cpu)455*4882a593Smuzhiyun static int perf_evlist__enable_event_cpu(struct evlist *evlist,
456*4882a593Smuzhiyun 					 struct evsel *evsel, int cpu)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	int thread;
459*4882a593Smuzhiyun 	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (!evsel->core.fd)
462*4882a593Smuzhiyun 		return -EINVAL;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	for (thread = 0; thread < nr_threads; thread++) {
465*4882a593Smuzhiyun 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
466*4882a593Smuzhiyun 		if (err)
467*4882a593Smuzhiyun 			return err;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 	return 0;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun 
perf_evlist__enable_event_thread(struct evlist * evlist,struct evsel * evsel,int thread)472*4882a593Smuzhiyun static int perf_evlist__enable_event_thread(struct evlist *evlist,
473*4882a593Smuzhiyun 					    struct evsel *evsel,
474*4882a593Smuzhiyun 					    int thread)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	int cpu;
477*4882a593Smuzhiyun 	int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (!evsel->core.fd)
480*4882a593Smuzhiyun 		return -EINVAL;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	for (cpu = 0; cpu < nr_cpus; cpu++) {
483*4882a593Smuzhiyun 		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
484*4882a593Smuzhiyun 		if (err)
485*4882a593Smuzhiyun 			return err;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
perf_evlist__enable_event_idx(struct evlist * evlist,struct evsel * evsel,int idx)490*4882a593Smuzhiyun int perf_evlist__enable_event_idx(struct evlist *evlist,
491*4882a593Smuzhiyun 				  struct evsel *evsel, int idx)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (per_cpu_mmaps)
496*4882a593Smuzhiyun 		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
497*4882a593Smuzhiyun 	else
498*4882a593Smuzhiyun 		return perf_evlist__enable_event_thread(evlist, evsel, idx);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
evlist__add_pollfd(struct evlist * evlist,int fd)501*4882a593Smuzhiyun int evlist__add_pollfd(struct evlist *evlist, int fd)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
evlist__filter_pollfd(struct evlist * evlist,short revents_and_mask)506*4882a593Smuzhiyun int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun #ifdef HAVE_EVENTFD_SUPPORT
evlist__add_wakeup_eventfd(struct evlist * evlist,int fd)512*4882a593Smuzhiyun int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
515*4882a593Smuzhiyun 				       fdarray_flag__nonfilterable);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun #endif
518*4882a593Smuzhiyun 
evlist__poll(struct evlist * evlist,int timeout)519*4882a593Smuzhiyun int evlist__poll(struct evlist *evlist, int timeout)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	return perf_evlist__poll(&evlist->core, timeout);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
perf_evlist__id2sid(struct evlist * evlist,u64 id)524*4882a593Smuzhiyun struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct hlist_head *head;
527*4882a593Smuzhiyun 	struct perf_sample_id *sid;
528*4882a593Smuzhiyun 	int hash;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
531*4882a593Smuzhiyun 	head = &evlist->core.heads[hash];
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	hlist_for_each_entry(sid, head, node)
534*4882a593Smuzhiyun 		if (sid->id == id)
535*4882a593Smuzhiyun 			return sid;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	return NULL;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
perf_evlist__id2evsel(struct evlist * evlist,u64 id)540*4882a593Smuzhiyun struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	struct perf_sample_id *sid;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (evlist->core.nr_entries == 1 || !id)
545*4882a593Smuzhiyun 		return evlist__first(evlist);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	sid = perf_evlist__id2sid(evlist, id);
548*4882a593Smuzhiyun 	if (sid)
549*4882a593Smuzhiyun 		return container_of(sid->evsel, struct evsel, core);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (!evlist__sample_id_all(evlist))
552*4882a593Smuzhiyun 		return evlist__first(evlist);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return NULL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
perf_evlist__id2evsel_strict(struct evlist * evlist,u64 id)557*4882a593Smuzhiyun struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
558*4882a593Smuzhiyun 						u64 id)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	struct perf_sample_id *sid;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (!id)
563*4882a593Smuzhiyun 		return NULL;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	sid = perf_evlist__id2sid(evlist, id);
566*4882a593Smuzhiyun 	if (sid)
567*4882a593Smuzhiyun 		return container_of(sid->evsel, struct evsel, core);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	return NULL;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
perf_evlist__event2id(struct evlist * evlist,union perf_event * event,u64 * id)572*4882a593Smuzhiyun static int perf_evlist__event2id(struct evlist *evlist,
573*4882a593Smuzhiyun 				 union perf_event *event, u64 *id)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	const __u64 *array = event->sample.array;
576*4882a593Smuzhiyun 	ssize_t n;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	n = (event->header.size - sizeof(event->header)) >> 3;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (event->header.type == PERF_RECORD_SAMPLE) {
581*4882a593Smuzhiyun 		if (evlist->id_pos >= n)
582*4882a593Smuzhiyun 			return -1;
583*4882a593Smuzhiyun 		*id = array[evlist->id_pos];
584*4882a593Smuzhiyun 	} else {
585*4882a593Smuzhiyun 		if (evlist->is_pos > n)
586*4882a593Smuzhiyun 			return -1;
587*4882a593Smuzhiyun 		n -= evlist->is_pos;
588*4882a593Smuzhiyun 		*id = array[n];
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 	return 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
perf_evlist__event2evsel(struct evlist * evlist,union perf_event * event)593*4882a593Smuzhiyun struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
594*4882a593Smuzhiyun 					    union perf_event *event)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist);
597*4882a593Smuzhiyun 	struct hlist_head *head;
598*4882a593Smuzhiyun 	struct perf_sample_id *sid;
599*4882a593Smuzhiyun 	int hash;
600*4882a593Smuzhiyun 	u64 id;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (evlist->core.nr_entries == 1)
603*4882a593Smuzhiyun 		return first;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (!first->core.attr.sample_id_all &&
606*4882a593Smuzhiyun 	    event->header.type != PERF_RECORD_SAMPLE)
607*4882a593Smuzhiyun 		return first;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	if (perf_evlist__event2id(evlist, event, &id))
610*4882a593Smuzhiyun 		return NULL;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/* Synthesized events have an id of zero */
613*4882a593Smuzhiyun 	if (!id)
614*4882a593Smuzhiyun 		return first;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
617*4882a593Smuzhiyun 	head = &evlist->core.heads[hash];
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	hlist_for_each_entry(sid, head, node) {
620*4882a593Smuzhiyun 		if (sid->id == id)
621*4882a593Smuzhiyun 			return container_of(sid->evsel, struct evsel, core);
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 	return NULL;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
perf_evlist__set_paused(struct evlist * evlist,bool value)626*4882a593Smuzhiyun static int perf_evlist__set_paused(struct evlist *evlist, bool value)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	int i;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	if (!evlist->overwrite_mmap)
631*4882a593Smuzhiyun 		return 0;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
634*4882a593Smuzhiyun 		int fd = evlist->overwrite_mmap[i].core.fd;
635*4882a593Smuzhiyun 		int err;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		if (fd < 0)
638*4882a593Smuzhiyun 			continue;
639*4882a593Smuzhiyun 		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
640*4882a593Smuzhiyun 		if (err)
641*4882a593Smuzhiyun 			return err;
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun 	return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
perf_evlist__pause(struct evlist * evlist)646*4882a593Smuzhiyun static int perf_evlist__pause(struct evlist *evlist)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	return perf_evlist__set_paused(evlist, true);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
perf_evlist__resume(struct evlist * evlist)651*4882a593Smuzhiyun static int perf_evlist__resume(struct evlist *evlist)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	return perf_evlist__set_paused(evlist, false);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
evlist__munmap_nofree(struct evlist * evlist)656*4882a593Smuzhiyun static void evlist__munmap_nofree(struct evlist *evlist)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	int i;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (evlist->mmap)
661*4882a593Smuzhiyun 		for (i = 0; i < evlist->core.nr_mmaps; i++)
662*4882a593Smuzhiyun 			perf_mmap__munmap(&evlist->mmap[i].core);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	if (evlist->overwrite_mmap)
665*4882a593Smuzhiyun 		for (i = 0; i < evlist->core.nr_mmaps; i++)
666*4882a593Smuzhiyun 			perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
evlist__munmap(struct evlist * evlist)669*4882a593Smuzhiyun void evlist__munmap(struct evlist *evlist)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	evlist__munmap_nofree(evlist);
672*4882a593Smuzhiyun 	zfree(&evlist->mmap);
673*4882a593Smuzhiyun 	zfree(&evlist->overwrite_mmap);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
perf_mmap__unmap_cb(struct perf_mmap * map)676*4882a593Smuzhiyun static void perf_mmap__unmap_cb(struct perf_mmap *map)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct mmap *m = container_of(map, struct mmap, core);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	mmap__munmap(m);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
evlist__alloc_mmap(struct evlist * evlist,bool overwrite)683*4882a593Smuzhiyun static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
684*4882a593Smuzhiyun 				       bool overwrite)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	int i;
687*4882a593Smuzhiyun 	struct mmap *map;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
690*4882a593Smuzhiyun 	if (!map)
691*4882a593Smuzhiyun 		return NULL;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
694*4882a593Smuzhiyun 		struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 		/*
697*4882a593Smuzhiyun 		 * When the perf_mmap() call is made we grab one refcount, plus
698*4882a593Smuzhiyun 		 * one extra to let perf_mmap__consume() get the last
699*4882a593Smuzhiyun 		 * events after all real references (perf_mmap__get()) are
700*4882a593Smuzhiyun 		 * dropped.
701*4882a593Smuzhiyun 		 *
702*4882a593Smuzhiyun 		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
703*4882a593Smuzhiyun 		 * thus does perf_mmap__get() on it.
704*4882a593Smuzhiyun 		 */
705*4882a593Smuzhiyun 		perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	return map;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun static void
perf_evlist__mmap_cb_idx(struct perf_evlist * _evlist,struct perf_mmap_param * _mp,int idx,bool per_cpu)712*4882a593Smuzhiyun perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
713*4882a593Smuzhiyun 			 struct perf_mmap_param *_mp,
714*4882a593Smuzhiyun 			 int idx, bool per_cpu)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun 	struct evlist *evlist = container_of(_evlist, struct evlist, core);
717*4882a593Smuzhiyun 	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun static struct perf_mmap*
perf_evlist__mmap_cb_get(struct perf_evlist * _evlist,bool overwrite,int idx)723*4882a593Smuzhiyun perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	struct evlist *evlist = container_of(_evlist, struct evlist, core);
726*4882a593Smuzhiyun 	struct mmap *maps;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (!maps) {
731*4882a593Smuzhiyun 		maps = evlist__alloc_mmap(evlist, overwrite);
732*4882a593Smuzhiyun 		if (!maps)
733*4882a593Smuzhiyun 			return NULL;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 		if (overwrite) {
736*4882a593Smuzhiyun 			evlist->overwrite_mmap = maps;
737*4882a593Smuzhiyun 			if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
738*4882a593Smuzhiyun 				perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
739*4882a593Smuzhiyun 		} else {
740*4882a593Smuzhiyun 			evlist->mmap = maps;
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return &maps[idx].core;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun static int
perf_evlist__mmap_cb_mmap(struct perf_mmap * _map,struct perf_mmap_param * _mp,int output,int cpu)748*4882a593Smuzhiyun perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
749*4882a593Smuzhiyun 			  int output, int cpu)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	struct mmap *map = container_of(_map, struct mmap, core);
752*4882a593Smuzhiyun 	struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return mmap__mmap(map, mp, output, cpu);
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
perf_event_mlock_kb_in_pages(void)757*4882a593Smuzhiyun unsigned long perf_event_mlock_kb_in_pages(void)
758*4882a593Smuzhiyun {
759*4882a593Smuzhiyun 	unsigned long pages;
760*4882a593Smuzhiyun 	int max;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
763*4882a593Smuzhiyun 		/*
764*4882a593Smuzhiyun 		 * Pick a once upon a time good value, i.e. things look
765*4882a593Smuzhiyun 		 * strange since we can't read a sysctl value, but lets not
766*4882a593Smuzhiyun 		 * die yet...
767*4882a593Smuzhiyun 		 */
768*4882a593Smuzhiyun 		max = 512;
769*4882a593Smuzhiyun 	} else {
770*4882a593Smuzhiyun 		max -= (page_size / 1024);
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	pages = (max * 1024) / page_size;
774*4882a593Smuzhiyun 	if (!is_power_of_2(pages))
775*4882a593Smuzhiyun 		pages = rounddown_pow_of_two(pages);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	return pages;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun 
evlist__mmap_size(unsigned long pages)780*4882a593Smuzhiyun size_t evlist__mmap_size(unsigned long pages)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	if (pages == UINT_MAX)
783*4882a593Smuzhiyun 		pages = perf_event_mlock_kb_in_pages();
784*4882a593Smuzhiyun 	else if (!is_power_of_2(pages))
785*4882a593Smuzhiyun 		return 0;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	return (pages + 1) * page_size;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
parse_pages_arg(const char * str,unsigned long min,unsigned long max)790*4882a593Smuzhiyun static long parse_pages_arg(const char *str, unsigned long min,
791*4882a593Smuzhiyun 			    unsigned long max)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	unsigned long pages, val;
794*4882a593Smuzhiyun 	static struct parse_tag tags[] = {
795*4882a593Smuzhiyun 		{ .tag  = 'B', .mult = 1       },
796*4882a593Smuzhiyun 		{ .tag  = 'K', .mult = 1 << 10 },
797*4882a593Smuzhiyun 		{ .tag  = 'M', .mult = 1 << 20 },
798*4882a593Smuzhiyun 		{ .tag  = 'G', .mult = 1 << 30 },
799*4882a593Smuzhiyun 		{ .tag  = 0 },
800*4882a593Smuzhiyun 	};
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	if (str == NULL)
803*4882a593Smuzhiyun 		return -EINVAL;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	val = parse_tag_value(str, tags);
806*4882a593Smuzhiyun 	if (val != (unsigned long) -1) {
807*4882a593Smuzhiyun 		/* we got file size value */
808*4882a593Smuzhiyun 		pages = PERF_ALIGN(val, page_size) / page_size;
809*4882a593Smuzhiyun 	} else {
810*4882a593Smuzhiyun 		/* we got pages count value */
811*4882a593Smuzhiyun 		char *eptr;
812*4882a593Smuzhiyun 		pages = strtoul(str, &eptr, 10);
813*4882a593Smuzhiyun 		if (*eptr != '\0')
814*4882a593Smuzhiyun 			return -EINVAL;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (pages == 0 && min == 0) {
818*4882a593Smuzhiyun 		/* leave number of pages at 0 */
819*4882a593Smuzhiyun 	} else if (!is_power_of_2(pages)) {
820*4882a593Smuzhiyun 		char buf[100];
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 		/* round pages up to next power of 2 */
823*4882a593Smuzhiyun 		pages = roundup_pow_of_two(pages);
824*4882a593Smuzhiyun 		if (!pages)
825*4882a593Smuzhiyun 			return -EINVAL;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
828*4882a593Smuzhiyun 		pr_info("rounding mmap pages size to %s (%lu pages)\n",
829*4882a593Smuzhiyun 			buf, pages);
830*4882a593Smuzhiyun 	}
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (pages > max)
833*4882a593Smuzhiyun 		return -EINVAL;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	return pages;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
__perf_evlist__parse_mmap_pages(unsigned int * mmap_pages,const char * str)838*4882a593Smuzhiyun int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	unsigned long max = UINT_MAX;
841*4882a593Smuzhiyun 	long pages;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	if (max > SIZE_MAX / page_size)
844*4882a593Smuzhiyun 		max = SIZE_MAX / page_size;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	pages = parse_pages_arg(str, 1, max);
847*4882a593Smuzhiyun 	if (pages < 0) {
848*4882a593Smuzhiyun 		pr_err("Invalid argument for --mmap_pages/-m\n");
849*4882a593Smuzhiyun 		return -1;
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	*mmap_pages = pages;
853*4882a593Smuzhiyun 	return 0;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
perf_evlist__parse_mmap_pages(const struct option * opt,const char * str,int unset __maybe_unused)856*4882a593Smuzhiyun int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
857*4882a593Smuzhiyun 				  int unset __maybe_unused)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	return __perf_evlist__parse_mmap_pages(opt->value, str);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /**
863*4882a593Smuzhiyun  * evlist__mmap_ex - Create mmaps to receive events.
864*4882a593Smuzhiyun  * @evlist: list of events
865*4882a593Smuzhiyun  * @pages: map length in pages
866*4882a593Smuzhiyun  * @overwrite: overwrite older events?
867*4882a593Smuzhiyun  * @auxtrace_pages - auxtrace map length in pages
868*4882a593Smuzhiyun  * @auxtrace_overwrite - overwrite older auxtrace data?
869*4882a593Smuzhiyun  *
870*4882a593Smuzhiyun  * If @overwrite is %false the user needs to signal event consumption using
871*4882a593Smuzhiyun  * perf_mmap__write_tail().  Using evlist__mmap_read() does this
872*4882a593Smuzhiyun  * automatically.
873*4882a593Smuzhiyun  *
874*4882a593Smuzhiyun  * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
875*4882a593Smuzhiyun  * consumption using auxtrace_mmap__write_tail().
876*4882a593Smuzhiyun  *
877*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
878*4882a593Smuzhiyun  */
evlist__mmap_ex(struct evlist * evlist,unsigned int pages,unsigned int auxtrace_pages,bool auxtrace_overwrite,int nr_cblocks,int affinity,int flush,int comp_level)879*4882a593Smuzhiyun int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
880*4882a593Smuzhiyun 			 unsigned int auxtrace_pages,
881*4882a593Smuzhiyun 			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
882*4882a593Smuzhiyun 			 int comp_level)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	/*
885*4882a593Smuzhiyun 	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
886*4882a593Smuzhiyun 	 * Its value is decided by evsel's write_backward.
887*4882a593Smuzhiyun 	 * So &mp should not be passed through const pointer.
888*4882a593Smuzhiyun 	 */
889*4882a593Smuzhiyun 	struct mmap_params mp = {
890*4882a593Smuzhiyun 		.nr_cblocks	= nr_cblocks,
891*4882a593Smuzhiyun 		.affinity	= affinity,
892*4882a593Smuzhiyun 		.flush		= flush,
893*4882a593Smuzhiyun 		.comp_level	= comp_level
894*4882a593Smuzhiyun 	};
895*4882a593Smuzhiyun 	struct perf_evlist_mmap_ops ops = {
896*4882a593Smuzhiyun 		.idx  = perf_evlist__mmap_cb_idx,
897*4882a593Smuzhiyun 		.get  = perf_evlist__mmap_cb_get,
898*4882a593Smuzhiyun 		.mmap = perf_evlist__mmap_cb_mmap,
899*4882a593Smuzhiyun 	};
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	evlist->core.mmap_len = evlist__mmap_size(pages);
902*4882a593Smuzhiyun 	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
905*4882a593Smuzhiyun 				   auxtrace_pages, auxtrace_overwrite);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
evlist__mmap(struct evlist * evlist,unsigned int pages)910*4882a593Smuzhiyun int evlist__mmap(struct evlist *evlist, unsigned int pages)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun 	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
perf_evlist__create_maps(struct evlist * evlist,struct target * target)915*4882a593Smuzhiyun int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	bool all_threads = (target->per_thread && target->system_wide);
918*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
919*4882a593Smuzhiyun 	struct perf_thread_map *threads;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	/*
922*4882a593Smuzhiyun 	 * If specify '-a' and '--per-thread' to perf record, perf record
923*4882a593Smuzhiyun 	 * will override '--per-thread'. target->per_thread = false and
924*4882a593Smuzhiyun 	 * target->system_wide = true.
925*4882a593Smuzhiyun 	 *
926*4882a593Smuzhiyun 	 * If specify '--per-thread' only to perf record,
927*4882a593Smuzhiyun 	 * target->per_thread = true and target->system_wide = false.
928*4882a593Smuzhiyun 	 *
929*4882a593Smuzhiyun 	 * So target->per_thread && target->system_wide is false.
930*4882a593Smuzhiyun 	 * For perf record, thread_map__new_str doesn't call
931*4882a593Smuzhiyun 	 * thread_map__new_all_cpus. That will keep perf record's
932*4882a593Smuzhiyun 	 * current behavior.
933*4882a593Smuzhiyun 	 *
934*4882a593Smuzhiyun 	 * For perf stat, it allows the case that target->per_thread and
935*4882a593Smuzhiyun 	 * target->system_wide are all true. It means to collect system-wide
936*4882a593Smuzhiyun 	 * per-thread data. thread_map__new_str will call
937*4882a593Smuzhiyun 	 * thread_map__new_all_cpus to enumerate all threads.
938*4882a593Smuzhiyun 	 */
939*4882a593Smuzhiyun 	threads = thread_map__new_str(target->pid, target->tid, target->uid,
940*4882a593Smuzhiyun 				      all_threads);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (!threads)
943*4882a593Smuzhiyun 		return -1;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	if (target__uses_dummy_map(target))
946*4882a593Smuzhiyun 		cpus = perf_cpu_map__dummy_new();
947*4882a593Smuzhiyun 	else
948*4882a593Smuzhiyun 		cpus = perf_cpu_map__new(target->cpu_list);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (!cpus)
951*4882a593Smuzhiyun 		goto out_delete_threads;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	evlist->core.has_user_cpus = !!target->cpu_list;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	perf_evlist__set_maps(&evlist->core, cpus, threads);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	/* as evlist now has references, put count here */
958*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
959*4882a593Smuzhiyun 	perf_thread_map__put(threads);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	return 0;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun out_delete_threads:
964*4882a593Smuzhiyun 	perf_thread_map__put(threads);
965*4882a593Smuzhiyun 	return -1;
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
__perf_evlist__set_sample_bit(struct evlist * evlist,enum perf_event_sample_format bit)968*4882a593Smuzhiyun void __perf_evlist__set_sample_bit(struct evlist *evlist,
969*4882a593Smuzhiyun 				   enum perf_event_sample_format bit)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct evsel *evsel;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
974*4882a593Smuzhiyun 		__evsel__set_sample_bit(evsel, bit);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun 
__perf_evlist__reset_sample_bit(struct evlist * evlist,enum perf_event_sample_format bit)977*4882a593Smuzhiyun void __perf_evlist__reset_sample_bit(struct evlist *evlist,
978*4882a593Smuzhiyun 				     enum perf_event_sample_format bit)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	struct evsel *evsel;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
983*4882a593Smuzhiyun 		__evsel__reset_sample_bit(evsel, bit);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
perf_evlist__apply_filters(struct evlist * evlist,struct evsel ** err_evsel)986*4882a593Smuzhiyun int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun 	struct evsel *evsel;
989*4882a593Smuzhiyun 	int err = 0;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
992*4882a593Smuzhiyun 		if (evsel->filter == NULL)
993*4882a593Smuzhiyun 			continue;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		/*
996*4882a593Smuzhiyun 		 * filters only work for tracepoint event, which doesn't have cpu limit.
997*4882a593Smuzhiyun 		 * So evlist and evsel should always be same.
998*4882a593Smuzhiyun 		 */
999*4882a593Smuzhiyun 		err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1000*4882a593Smuzhiyun 		if (err) {
1001*4882a593Smuzhiyun 			*err_evsel = evsel;
1002*4882a593Smuzhiyun 			break;
1003*4882a593Smuzhiyun 		}
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	return err;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun 
perf_evlist__set_tp_filter(struct evlist * evlist,const char * filter)1009*4882a593Smuzhiyun int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct evsel *evsel;
1012*4882a593Smuzhiyun 	int err = 0;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	if (filter == NULL)
1015*4882a593Smuzhiyun 		return -1;
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1018*4882a593Smuzhiyun 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1019*4882a593Smuzhiyun 			continue;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 		err = evsel__set_filter(evsel, filter);
1022*4882a593Smuzhiyun 		if (err)
1023*4882a593Smuzhiyun 			break;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	return err;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun 
perf_evlist__append_tp_filter(struct evlist * evlist,const char * filter)1029*4882a593Smuzhiyun int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct evsel *evsel;
1032*4882a593Smuzhiyun 	int err = 0;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	if (filter == NULL)
1035*4882a593Smuzhiyun 		return -1;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1038*4882a593Smuzhiyun 		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1039*4882a593Smuzhiyun 			continue;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 		err = evsel__append_tp_filter(evsel, filter);
1042*4882a593Smuzhiyun 		if (err)
1043*4882a593Smuzhiyun 			break;
1044*4882a593Smuzhiyun 	}
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	return err;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
asprintf__tp_filter_pids(size_t npids,pid_t * pids)1049*4882a593Smuzhiyun char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	char *filter;
1052*4882a593Smuzhiyun 	size_t i;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	for (i = 0; i < npids; ++i) {
1055*4882a593Smuzhiyun 		if (i == 0) {
1056*4882a593Smuzhiyun 			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1057*4882a593Smuzhiyun 				return NULL;
1058*4882a593Smuzhiyun 		} else {
1059*4882a593Smuzhiyun 			char *tmp;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1062*4882a593Smuzhiyun 				goto out_free;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 			free(filter);
1065*4882a593Smuzhiyun 			filter = tmp;
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	return filter;
1070*4882a593Smuzhiyun out_free:
1071*4882a593Smuzhiyun 	free(filter);
1072*4882a593Smuzhiyun 	return NULL;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
perf_evlist__set_tp_filter_pids(struct evlist * evlist,size_t npids,pid_t * pids)1075*4882a593Smuzhiyun int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun 	char *filter = asprintf__tp_filter_pids(npids, pids);
1078*4882a593Smuzhiyun 	int ret = perf_evlist__set_tp_filter(evlist, filter);
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	free(filter);
1081*4882a593Smuzhiyun 	return ret;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
perf_evlist__set_tp_filter_pid(struct evlist * evlist,pid_t pid)1084*4882a593Smuzhiyun int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
perf_evlist__append_tp_filter_pids(struct evlist * evlist,size_t npids,pid_t * pids)1089*4882a593Smuzhiyun int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	char *filter = asprintf__tp_filter_pids(npids, pids);
1092*4882a593Smuzhiyun 	int ret = perf_evlist__append_tp_filter(evlist, filter);
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	free(filter);
1095*4882a593Smuzhiyun 	return ret;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun 
perf_evlist__append_tp_filter_pid(struct evlist * evlist,pid_t pid)1098*4882a593Smuzhiyun int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun 	return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun 
evlist__valid_sample_type(struct evlist * evlist)1103*4882a593Smuzhiyun bool evlist__valid_sample_type(struct evlist *evlist)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	struct evsel *pos;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	if (evlist->core.nr_entries == 1)
1108*4882a593Smuzhiyun 		return true;
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1111*4882a593Smuzhiyun 		return false;
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos) {
1114*4882a593Smuzhiyun 		if (pos->id_pos != evlist->id_pos ||
1115*4882a593Smuzhiyun 		    pos->is_pos != evlist->is_pos)
1116*4882a593Smuzhiyun 			return false;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	return true;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun 
__evlist__combined_sample_type(struct evlist * evlist)1122*4882a593Smuzhiyun u64 __evlist__combined_sample_type(struct evlist *evlist)
1123*4882a593Smuzhiyun {
1124*4882a593Smuzhiyun 	struct evsel *evsel;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	if (evlist->combined_sample_type)
1127*4882a593Smuzhiyun 		return evlist->combined_sample_type;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
1130*4882a593Smuzhiyun 		evlist->combined_sample_type |= evsel->core.attr.sample_type;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	return evlist->combined_sample_type;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
evlist__combined_sample_type(struct evlist * evlist)1135*4882a593Smuzhiyun u64 evlist__combined_sample_type(struct evlist *evlist)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	evlist->combined_sample_type = 0;
1138*4882a593Smuzhiyun 	return __evlist__combined_sample_type(evlist);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
evlist__combined_branch_type(struct evlist * evlist)1141*4882a593Smuzhiyun u64 evlist__combined_branch_type(struct evlist *evlist)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct evsel *evsel;
1144*4882a593Smuzhiyun 	u64 branch_type = 0;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
1147*4882a593Smuzhiyun 		branch_type |= evsel->core.attr.branch_sample_type;
1148*4882a593Smuzhiyun 	return branch_type;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun 
perf_evlist__valid_read_format(struct evlist * evlist)1151*4882a593Smuzhiyun bool perf_evlist__valid_read_format(struct evlist *evlist)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist), *pos = first;
1154*4882a593Smuzhiyun 	u64 read_format = first->core.attr.read_format;
1155*4882a593Smuzhiyun 	u64 sample_type = first->core.attr.sample_type;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, pos) {
1158*4882a593Smuzhiyun 		if (read_format != pos->core.attr.read_format) {
1159*4882a593Smuzhiyun 			pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1160*4882a593Smuzhiyun 				 read_format, (u64)pos->core.attr.read_format);
1161*4882a593Smuzhiyun 		}
1162*4882a593Smuzhiyun 	}
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1165*4882a593Smuzhiyun 	if ((sample_type & PERF_SAMPLE_READ) &&
1166*4882a593Smuzhiyun 	    !(read_format & PERF_FORMAT_ID)) {
1167*4882a593Smuzhiyun 		return false;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	return true;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun 
perf_evlist__id_hdr_size(struct evlist * evlist)1173*4882a593Smuzhiyun u16 perf_evlist__id_hdr_size(struct evlist *evlist)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist);
1176*4882a593Smuzhiyun 	struct perf_sample *data;
1177*4882a593Smuzhiyun 	u64 sample_type;
1178*4882a593Smuzhiyun 	u16 size = 0;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	if (!first->core.attr.sample_id_all)
1181*4882a593Smuzhiyun 		goto out;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	sample_type = first->core.attr.sample_type;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	if (sample_type & PERF_SAMPLE_TID)
1186*4882a593Smuzhiyun 		size += sizeof(data->tid) * 2;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun        if (sample_type & PERF_SAMPLE_TIME)
1189*4882a593Smuzhiyun 		size += sizeof(data->time);
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	if (sample_type & PERF_SAMPLE_ID)
1192*4882a593Smuzhiyun 		size += sizeof(data->id);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	if (sample_type & PERF_SAMPLE_STREAM_ID)
1195*4882a593Smuzhiyun 		size += sizeof(data->stream_id);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (sample_type & PERF_SAMPLE_CPU)
1198*4882a593Smuzhiyun 		size += sizeof(data->cpu) * 2;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1201*4882a593Smuzhiyun 		size += sizeof(data->id);
1202*4882a593Smuzhiyun out:
1203*4882a593Smuzhiyun 	return size;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun 
evlist__valid_sample_id_all(struct evlist * evlist)1206*4882a593Smuzhiyun bool evlist__valid_sample_id_all(struct evlist *evlist)
1207*4882a593Smuzhiyun {
1208*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist), *pos = first;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	evlist__for_each_entry_continue(evlist, pos) {
1211*4882a593Smuzhiyun 		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1212*4882a593Smuzhiyun 			return false;
1213*4882a593Smuzhiyun 	}
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	return true;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun 
evlist__sample_id_all(struct evlist * evlist)1218*4882a593Smuzhiyun bool evlist__sample_id_all(struct evlist *evlist)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun 	struct evsel *first = evlist__first(evlist);
1221*4882a593Smuzhiyun 	return first->core.attr.sample_id_all;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
perf_evlist__set_selected(struct evlist * evlist,struct evsel * evsel)1224*4882a593Smuzhiyun void perf_evlist__set_selected(struct evlist *evlist,
1225*4882a593Smuzhiyun 			       struct evsel *evsel)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	evlist->selected = evsel;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
evlist__close(struct evlist * evlist)1230*4882a593Smuzhiyun void evlist__close(struct evlist *evlist)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	struct evsel *evsel;
1233*4882a593Smuzhiyun 	struct affinity affinity;
1234*4882a593Smuzhiyun 	int cpu, i;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	/*
1237*4882a593Smuzhiyun 	 * With perf record core.cpus is usually NULL.
1238*4882a593Smuzhiyun 	 * Use the old method to handle this for now.
1239*4882a593Smuzhiyun 	 */
1240*4882a593Smuzhiyun 	if (!evlist->core.cpus) {
1241*4882a593Smuzhiyun 		evlist__for_each_entry_reverse(evlist, evsel)
1242*4882a593Smuzhiyun 			evsel__close(evsel);
1243*4882a593Smuzhiyun 		return;
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	if (affinity__setup(&affinity) < 0)
1247*4882a593Smuzhiyun 		return;
1248*4882a593Smuzhiyun 	evlist__for_each_cpu(evlist, i, cpu) {
1249*4882a593Smuzhiyun 		affinity__set(&affinity, cpu);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 		evlist__for_each_entry_reverse(evlist, evsel) {
1252*4882a593Smuzhiyun 			if (evsel__cpu_iter_skip(evsel, cpu))
1253*4882a593Smuzhiyun 			    continue;
1254*4882a593Smuzhiyun 			perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1255*4882a593Smuzhiyun 		}
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun 	affinity__cleanup(&affinity);
1258*4882a593Smuzhiyun 	evlist__for_each_entry_reverse(evlist, evsel) {
1259*4882a593Smuzhiyun 		perf_evsel__free_fd(&evsel->core);
1260*4882a593Smuzhiyun 		perf_evsel__free_id(&evsel->core);
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun 
perf_evlist__create_syswide_maps(struct evlist * evlist)1264*4882a593Smuzhiyun static int perf_evlist__create_syswide_maps(struct evlist *evlist)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
1267*4882a593Smuzhiyun 	struct perf_thread_map *threads;
1268*4882a593Smuzhiyun 	int err = -ENOMEM;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	/*
1271*4882a593Smuzhiyun 	 * Try reading /sys/devices/system/cpu/online to get
1272*4882a593Smuzhiyun 	 * an all cpus map.
1273*4882a593Smuzhiyun 	 *
1274*4882a593Smuzhiyun 	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1275*4882a593Smuzhiyun 	 * code needs an overhaul to properly forward the
1276*4882a593Smuzhiyun 	 * error, and we may not want to do that fallback to a
1277*4882a593Smuzhiyun 	 * default cpu identity map :-\
1278*4882a593Smuzhiyun 	 */
1279*4882a593Smuzhiyun 	cpus = perf_cpu_map__new(NULL);
1280*4882a593Smuzhiyun 	if (!cpus)
1281*4882a593Smuzhiyun 		goto out;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	threads = perf_thread_map__new_dummy();
1284*4882a593Smuzhiyun 	if (!threads)
1285*4882a593Smuzhiyun 		goto out_put;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	perf_evlist__set_maps(&evlist->core, cpus, threads);
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	perf_thread_map__put(threads);
1290*4882a593Smuzhiyun out_put:
1291*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
1292*4882a593Smuzhiyun out:
1293*4882a593Smuzhiyun 	return err;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
evlist__open(struct evlist * evlist)1296*4882a593Smuzhiyun int evlist__open(struct evlist *evlist)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun 	struct evsel *evsel;
1299*4882a593Smuzhiyun 	int err;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	/*
1302*4882a593Smuzhiyun 	 * Default: one fd per CPU, all threads, aka systemwide
1303*4882a593Smuzhiyun 	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1304*4882a593Smuzhiyun 	 */
1305*4882a593Smuzhiyun 	if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1306*4882a593Smuzhiyun 		err = perf_evlist__create_syswide_maps(evlist);
1307*4882a593Smuzhiyun 		if (err < 0)
1308*4882a593Smuzhiyun 			goto out_err;
1309*4882a593Smuzhiyun 	}
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	perf_evlist__update_id_pos(evlist);
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1314*4882a593Smuzhiyun 		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1315*4882a593Smuzhiyun 		if (err < 0)
1316*4882a593Smuzhiyun 			goto out_err;
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	return 0;
1320*4882a593Smuzhiyun out_err:
1321*4882a593Smuzhiyun 	evlist__close(evlist);
1322*4882a593Smuzhiyun 	errno = -err;
1323*4882a593Smuzhiyun 	return err;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun 
perf_evlist__prepare_workload(struct evlist * evlist,struct target * target,const char * argv[],bool pipe_output,void (* exec_error)(int signo,siginfo_t * info,void * ucontext))1326*4882a593Smuzhiyun int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
1327*4882a593Smuzhiyun 				  const char *argv[], bool pipe_output,
1328*4882a593Smuzhiyun 				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun 	int child_ready_pipe[2], go_pipe[2];
1331*4882a593Smuzhiyun 	char bf;
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	if (pipe(child_ready_pipe) < 0) {
1334*4882a593Smuzhiyun 		perror("failed to create 'ready' pipe");
1335*4882a593Smuzhiyun 		return -1;
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (pipe(go_pipe) < 0) {
1339*4882a593Smuzhiyun 		perror("failed to create 'go' pipe");
1340*4882a593Smuzhiyun 		goto out_close_ready_pipe;
1341*4882a593Smuzhiyun 	}
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	evlist->workload.pid = fork();
1344*4882a593Smuzhiyun 	if (evlist->workload.pid < 0) {
1345*4882a593Smuzhiyun 		perror("failed to fork");
1346*4882a593Smuzhiyun 		goto out_close_pipes;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (!evlist->workload.pid) {
1350*4882a593Smuzhiyun 		int ret;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 		if (pipe_output)
1353*4882a593Smuzhiyun 			dup2(2, 1);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		signal(SIGTERM, SIG_DFL);
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 		close(child_ready_pipe[0]);
1358*4882a593Smuzhiyun 		close(go_pipe[1]);
1359*4882a593Smuzhiyun 		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 		/*
1362*4882a593Smuzhiyun 		 * Tell the parent we're ready to go
1363*4882a593Smuzhiyun 		 */
1364*4882a593Smuzhiyun 		close(child_ready_pipe[1]);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 		/*
1367*4882a593Smuzhiyun 		 * Wait until the parent tells us to go.
1368*4882a593Smuzhiyun 		 */
1369*4882a593Smuzhiyun 		ret = read(go_pipe[0], &bf, 1);
1370*4882a593Smuzhiyun 		/*
1371*4882a593Smuzhiyun 		 * The parent will ask for the execvp() to be performed by
1372*4882a593Smuzhiyun 		 * writing exactly one byte, in workload.cork_fd, usually via
1373*4882a593Smuzhiyun 		 * perf_evlist__start_workload().
1374*4882a593Smuzhiyun 		 *
1375*4882a593Smuzhiyun 		 * For cancelling the workload without actually running it,
1376*4882a593Smuzhiyun 		 * the parent will just close workload.cork_fd, without writing
1377*4882a593Smuzhiyun 		 * anything, i.e. read will return zero and we just exit()
1378*4882a593Smuzhiyun 		 * here.
1379*4882a593Smuzhiyun 		 */
1380*4882a593Smuzhiyun 		if (ret != 1) {
1381*4882a593Smuzhiyun 			if (ret == -1)
1382*4882a593Smuzhiyun 				perror("unable to read pipe");
1383*4882a593Smuzhiyun 			exit(ret);
1384*4882a593Smuzhiyun 		}
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 		execvp(argv[0], (char **)argv);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 		if (exec_error) {
1389*4882a593Smuzhiyun 			union sigval val;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 			val.sival_int = errno;
1392*4882a593Smuzhiyun 			if (sigqueue(getppid(), SIGUSR1, val))
1393*4882a593Smuzhiyun 				perror(argv[0]);
1394*4882a593Smuzhiyun 		} else
1395*4882a593Smuzhiyun 			perror(argv[0]);
1396*4882a593Smuzhiyun 		exit(-1);
1397*4882a593Smuzhiyun 	}
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	if (exec_error) {
1400*4882a593Smuzhiyun 		struct sigaction act = {
1401*4882a593Smuzhiyun 			.sa_flags     = SA_SIGINFO,
1402*4882a593Smuzhiyun 			.sa_sigaction = exec_error,
1403*4882a593Smuzhiyun 		};
1404*4882a593Smuzhiyun 		sigaction(SIGUSR1, &act, NULL);
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (target__none(target)) {
1408*4882a593Smuzhiyun 		if (evlist->core.threads == NULL) {
1409*4882a593Smuzhiyun 			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1410*4882a593Smuzhiyun 				__func__, __LINE__);
1411*4882a593Smuzhiyun 			goto out_close_pipes;
1412*4882a593Smuzhiyun 		}
1413*4882a593Smuzhiyun 		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1414*4882a593Smuzhiyun 	}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	close(child_ready_pipe[1]);
1417*4882a593Smuzhiyun 	close(go_pipe[0]);
1418*4882a593Smuzhiyun 	/*
1419*4882a593Smuzhiyun 	 * wait for child to settle
1420*4882a593Smuzhiyun 	 */
1421*4882a593Smuzhiyun 	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1422*4882a593Smuzhiyun 		perror("unable to read pipe");
1423*4882a593Smuzhiyun 		goto out_close_pipes;
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1427*4882a593Smuzhiyun 	evlist->workload.cork_fd = go_pipe[1];
1428*4882a593Smuzhiyun 	close(child_ready_pipe[0]);
1429*4882a593Smuzhiyun 	return 0;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun out_close_pipes:
1432*4882a593Smuzhiyun 	close(go_pipe[0]);
1433*4882a593Smuzhiyun 	close(go_pipe[1]);
1434*4882a593Smuzhiyun out_close_ready_pipe:
1435*4882a593Smuzhiyun 	close(child_ready_pipe[0]);
1436*4882a593Smuzhiyun 	close(child_ready_pipe[1]);
1437*4882a593Smuzhiyun 	return -1;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun 
perf_evlist__start_workload(struct evlist * evlist)1440*4882a593Smuzhiyun int perf_evlist__start_workload(struct evlist *evlist)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	if (evlist->workload.cork_fd > 0) {
1443*4882a593Smuzhiyun 		char bf = 0;
1444*4882a593Smuzhiyun 		int ret;
1445*4882a593Smuzhiyun 		/*
1446*4882a593Smuzhiyun 		 * Remove the cork, let it rip!
1447*4882a593Smuzhiyun 		 */
1448*4882a593Smuzhiyun 		ret = write(evlist->workload.cork_fd, &bf, 1);
1449*4882a593Smuzhiyun 		if (ret < 0)
1450*4882a593Smuzhiyun 			perror("unable to write to pipe");
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		close(evlist->workload.cork_fd);
1453*4882a593Smuzhiyun 		return ret;
1454*4882a593Smuzhiyun 	}
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	return 0;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun 
perf_evlist__parse_sample(struct evlist * evlist,union perf_event * event,struct perf_sample * sample)1459*4882a593Smuzhiyun int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
1460*4882a593Smuzhiyun 			      struct perf_sample *sample)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun 	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	if (!evsel)
1465*4882a593Smuzhiyun 		return -EFAULT;
1466*4882a593Smuzhiyun 	return evsel__parse_sample(evsel, event, sample);
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
perf_evlist__parse_sample_timestamp(struct evlist * evlist,union perf_event * event,u64 * timestamp)1469*4882a593Smuzhiyun int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
1470*4882a593Smuzhiyun 					union perf_event *event,
1471*4882a593Smuzhiyun 					u64 *timestamp)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	if (!evsel)
1476*4882a593Smuzhiyun 		return -EFAULT;
1477*4882a593Smuzhiyun 	return evsel__parse_sample_timestamp(evsel, event, timestamp);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
evlist__strerror_open(struct evlist * evlist,int err,char * buf,size_t size)1480*4882a593Smuzhiyun int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun 	int printed, value;
1483*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	switch (err) {
1486*4882a593Smuzhiyun 	case EACCES:
1487*4882a593Smuzhiyun 	case EPERM:
1488*4882a593Smuzhiyun 		printed = scnprintf(buf, size,
1489*4882a593Smuzhiyun 				    "Error:\t%s.\n"
1490*4882a593Smuzhiyun 				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 		value = perf_event_paranoid();
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 		if (value >= 2) {
1497*4882a593Smuzhiyun 			printed += scnprintf(buf + printed, size - printed,
1498*4882a593Smuzhiyun 					     "For your workloads it needs to be <= 1\nHint:\t");
1499*4882a593Smuzhiyun 		}
1500*4882a593Smuzhiyun 		printed += scnprintf(buf + printed, size - printed,
1501*4882a593Smuzhiyun 				     "For system wide tracing it needs to be set to -1.\n");
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 		printed += scnprintf(buf + printed, size - printed,
1504*4882a593Smuzhiyun 				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1505*4882a593Smuzhiyun 				    "Hint:\tThe current value is %d.", value);
1506*4882a593Smuzhiyun 		break;
1507*4882a593Smuzhiyun 	case EINVAL: {
1508*4882a593Smuzhiyun 		struct evsel *first = evlist__first(evlist);
1509*4882a593Smuzhiyun 		int max_freq;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1512*4882a593Smuzhiyun 			goto out_default;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 		if (first->core.attr.sample_freq < (u64)max_freq)
1515*4882a593Smuzhiyun 			goto out_default;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 		printed = scnprintf(buf, size,
1518*4882a593Smuzhiyun 				    "Error:\t%s.\n"
1519*4882a593Smuzhiyun 				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1520*4882a593Smuzhiyun 				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1521*4882a593Smuzhiyun 				    emsg, max_freq, first->core.attr.sample_freq);
1522*4882a593Smuzhiyun 		break;
1523*4882a593Smuzhiyun 	}
1524*4882a593Smuzhiyun 	default:
1525*4882a593Smuzhiyun out_default:
1526*4882a593Smuzhiyun 		scnprintf(buf, size, "%s", emsg);
1527*4882a593Smuzhiyun 		break;
1528*4882a593Smuzhiyun 	}
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	return 0;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
evlist__strerror_mmap(struct evlist * evlist,int err,char * buf,size_t size)1533*4882a593Smuzhiyun int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1536*4882a593Smuzhiyun 	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	switch (err) {
1539*4882a593Smuzhiyun 	case EPERM:
1540*4882a593Smuzhiyun 		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1541*4882a593Smuzhiyun 		printed += scnprintf(buf + printed, size - printed,
1542*4882a593Smuzhiyun 				     "Error:\t%s.\n"
1543*4882a593Smuzhiyun 				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1544*4882a593Smuzhiyun 				     "Hint:\tTried using %zd kB.\n",
1545*4882a593Smuzhiyun 				     emsg, pages_max_per_user, pages_attempted);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		if (pages_attempted >= pages_max_per_user) {
1548*4882a593Smuzhiyun 			printed += scnprintf(buf + printed, size - printed,
1549*4882a593Smuzhiyun 					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1550*4882a593Smuzhiyun 					     pages_max_per_user + pages_attempted);
1551*4882a593Smuzhiyun 		}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 		printed += scnprintf(buf + printed, size - printed,
1554*4882a593Smuzhiyun 				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1555*4882a593Smuzhiyun 		break;
1556*4882a593Smuzhiyun 	default:
1557*4882a593Smuzhiyun 		scnprintf(buf, size, "%s", emsg);
1558*4882a593Smuzhiyun 		break;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	return 0;
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun 
perf_evlist__to_front(struct evlist * evlist,struct evsel * move_evsel)1564*4882a593Smuzhiyun void perf_evlist__to_front(struct evlist *evlist,
1565*4882a593Smuzhiyun 			   struct evsel *move_evsel)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun 	struct evsel *evsel, *n;
1568*4882a593Smuzhiyun 	LIST_HEAD(move);
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	if (move_evsel == evlist__first(evlist))
1571*4882a593Smuzhiyun 		return;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	evlist__for_each_entry_safe(evlist, n, evsel) {
1574*4882a593Smuzhiyun 		if (evsel->leader == move_evsel->leader)
1575*4882a593Smuzhiyun 			list_move_tail(&evsel->core.node, &move);
1576*4882a593Smuzhiyun 	}
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	list_splice(&move, &evlist->core.entries);
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun 
perf_evlist__get_tracking_event(struct evlist * evlist)1581*4882a593Smuzhiyun struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	struct evsel *evsel;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1586*4882a593Smuzhiyun 		if (evsel->tracking)
1587*4882a593Smuzhiyun 			return evsel;
1588*4882a593Smuzhiyun 	}
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	return evlist__first(evlist);
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun 
perf_evlist__set_tracking_event(struct evlist * evlist,struct evsel * tracking_evsel)1593*4882a593Smuzhiyun void perf_evlist__set_tracking_event(struct evlist *evlist,
1594*4882a593Smuzhiyun 				     struct evsel *tracking_evsel)
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun 	struct evsel *evsel;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	if (tracking_evsel->tracking)
1599*4882a593Smuzhiyun 		return;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1602*4882a593Smuzhiyun 		if (evsel != tracking_evsel)
1603*4882a593Smuzhiyun 			evsel->tracking = false;
1604*4882a593Smuzhiyun 	}
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	tracking_evsel->tracking = true;
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun struct evsel *
perf_evlist__find_evsel_by_str(struct evlist * evlist,const char * str)1610*4882a593Smuzhiyun perf_evlist__find_evsel_by_str(struct evlist *evlist,
1611*4882a593Smuzhiyun 			       const char *str)
1612*4882a593Smuzhiyun {
1613*4882a593Smuzhiyun 	struct evsel *evsel;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1616*4882a593Smuzhiyun 		if (!evsel->name)
1617*4882a593Smuzhiyun 			continue;
1618*4882a593Smuzhiyun 		if (strcmp(str, evsel->name) == 0)
1619*4882a593Smuzhiyun 			return evsel;
1620*4882a593Smuzhiyun 	}
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	return NULL;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun 
perf_evlist__toggle_bkw_mmap(struct evlist * evlist,enum bkw_mmap_state state)1625*4882a593Smuzhiyun void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
1626*4882a593Smuzhiyun 				  enum bkw_mmap_state state)
1627*4882a593Smuzhiyun {
1628*4882a593Smuzhiyun 	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1629*4882a593Smuzhiyun 	enum action {
1630*4882a593Smuzhiyun 		NONE,
1631*4882a593Smuzhiyun 		PAUSE,
1632*4882a593Smuzhiyun 		RESUME,
1633*4882a593Smuzhiyun 	} action = NONE;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	if (!evlist->overwrite_mmap)
1636*4882a593Smuzhiyun 		return;
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	switch (old_state) {
1639*4882a593Smuzhiyun 	case BKW_MMAP_NOTREADY: {
1640*4882a593Smuzhiyun 		if (state != BKW_MMAP_RUNNING)
1641*4882a593Smuzhiyun 			goto state_err;
1642*4882a593Smuzhiyun 		break;
1643*4882a593Smuzhiyun 	}
1644*4882a593Smuzhiyun 	case BKW_MMAP_RUNNING: {
1645*4882a593Smuzhiyun 		if (state != BKW_MMAP_DATA_PENDING)
1646*4882a593Smuzhiyun 			goto state_err;
1647*4882a593Smuzhiyun 		action = PAUSE;
1648*4882a593Smuzhiyun 		break;
1649*4882a593Smuzhiyun 	}
1650*4882a593Smuzhiyun 	case BKW_MMAP_DATA_PENDING: {
1651*4882a593Smuzhiyun 		if (state != BKW_MMAP_EMPTY)
1652*4882a593Smuzhiyun 			goto state_err;
1653*4882a593Smuzhiyun 		break;
1654*4882a593Smuzhiyun 	}
1655*4882a593Smuzhiyun 	case BKW_MMAP_EMPTY: {
1656*4882a593Smuzhiyun 		if (state != BKW_MMAP_RUNNING)
1657*4882a593Smuzhiyun 			goto state_err;
1658*4882a593Smuzhiyun 		action = RESUME;
1659*4882a593Smuzhiyun 		break;
1660*4882a593Smuzhiyun 	}
1661*4882a593Smuzhiyun 	default:
1662*4882a593Smuzhiyun 		WARN_ONCE(1, "Shouldn't get there\n");
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun 	evlist->bkw_mmap_state = state;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	switch (action) {
1668*4882a593Smuzhiyun 	case PAUSE:
1669*4882a593Smuzhiyun 		perf_evlist__pause(evlist);
1670*4882a593Smuzhiyun 		break;
1671*4882a593Smuzhiyun 	case RESUME:
1672*4882a593Smuzhiyun 		perf_evlist__resume(evlist);
1673*4882a593Smuzhiyun 		break;
1674*4882a593Smuzhiyun 	case NONE:
1675*4882a593Smuzhiyun 	default:
1676*4882a593Smuzhiyun 		break;
1677*4882a593Smuzhiyun 	}
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun state_err:
1680*4882a593Smuzhiyun 	return;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
perf_evlist__exclude_kernel(struct evlist * evlist)1683*4882a593Smuzhiyun bool perf_evlist__exclude_kernel(struct evlist *evlist)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun 	struct evsel *evsel;
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1688*4882a593Smuzhiyun 		if (!evsel->core.attr.exclude_kernel)
1689*4882a593Smuzhiyun 			return false;
1690*4882a593Smuzhiyun 	}
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	return true;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun /*
1696*4882a593Smuzhiyun  * Events in data file are not collect in groups, but we still want
1697*4882a593Smuzhiyun  * the group display. Set the artificial group and set the leader's
1698*4882a593Smuzhiyun  * forced_leader flag to notify the display code.
1699*4882a593Smuzhiyun  */
perf_evlist__force_leader(struct evlist * evlist)1700*4882a593Smuzhiyun void perf_evlist__force_leader(struct evlist *evlist)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun 	if (!evlist->nr_groups) {
1703*4882a593Smuzhiyun 		struct evsel *leader = evlist__first(evlist);
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 		perf_evlist__set_leader(evlist);
1706*4882a593Smuzhiyun 		leader->forced_leader = true;
1707*4882a593Smuzhiyun 	}
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun 
perf_evlist__reset_weak_group(struct evlist * evsel_list,struct evsel * evsel,bool close)1710*4882a593Smuzhiyun struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
1711*4882a593Smuzhiyun 						 struct evsel *evsel,
1712*4882a593Smuzhiyun 						bool close)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun 	struct evsel *c2, *leader;
1715*4882a593Smuzhiyun 	bool is_open = true;
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	leader = evsel->leader;
1718*4882a593Smuzhiyun 	pr_debug("Weak group for %s/%d failed\n",
1719*4882a593Smuzhiyun 			leader->name, leader->core.nr_members);
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	/*
1722*4882a593Smuzhiyun 	 * for_each_group_member doesn't work here because it doesn't
1723*4882a593Smuzhiyun 	 * include the first entry.
1724*4882a593Smuzhiyun 	 */
1725*4882a593Smuzhiyun 	evlist__for_each_entry(evsel_list, c2) {
1726*4882a593Smuzhiyun 		if (c2 == evsel)
1727*4882a593Smuzhiyun 			is_open = false;
1728*4882a593Smuzhiyun 		if (c2->leader == leader) {
1729*4882a593Smuzhiyun 			if (is_open && close)
1730*4882a593Smuzhiyun 				perf_evsel__close(&c2->core);
1731*4882a593Smuzhiyun 			c2->leader = c2;
1732*4882a593Smuzhiyun 			c2->core.nr_members = 0;
1733*4882a593Smuzhiyun 			/*
1734*4882a593Smuzhiyun 			 * Set this for all former members of the group
1735*4882a593Smuzhiyun 			 * to indicate they get reopened.
1736*4882a593Smuzhiyun 			 */
1737*4882a593Smuzhiyun 			c2->reset_group = true;
1738*4882a593Smuzhiyun 		}
1739*4882a593Smuzhiyun 	}
1740*4882a593Smuzhiyun 	return leader;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun 
evlist__parse_control_fifo(const char * str,int * ctl_fd,int * ctl_fd_ack,bool * ctl_fd_close)1743*4882a593Smuzhiyun static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	char *s, *p;
1746*4882a593Smuzhiyun 	int ret = 0, fd;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	if (strncmp(str, "fifo:", 5))
1749*4882a593Smuzhiyun 		return -EINVAL;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	str += 5;
1752*4882a593Smuzhiyun 	if (!*str || *str == ',')
1753*4882a593Smuzhiyun 		return -EINVAL;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	s = strdup(str);
1756*4882a593Smuzhiyun 	if (!s)
1757*4882a593Smuzhiyun 		return -ENOMEM;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	p = strchr(s, ',');
1760*4882a593Smuzhiyun 	if (p)
1761*4882a593Smuzhiyun 		*p = '\0';
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	/*
1764*4882a593Smuzhiyun 	 * O_RDWR avoids POLLHUPs which is necessary to allow the other
1765*4882a593Smuzhiyun 	 * end of a FIFO to be repeatedly opened and closed.
1766*4882a593Smuzhiyun 	 */
1767*4882a593Smuzhiyun 	fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1768*4882a593Smuzhiyun 	if (fd < 0) {
1769*4882a593Smuzhiyun 		pr_err("Failed to open '%s'\n", s);
1770*4882a593Smuzhiyun 		ret = -errno;
1771*4882a593Smuzhiyun 		goto out_free;
1772*4882a593Smuzhiyun 	}
1773*4882a593Smuzhiyun 	*ctl_fd = fd;
1774*4882a593Smuzhiyun 	*ctl_fd_close = true;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	if (p && *++p) {
1777*4882a593Smuzhiyun 		/* O_RDWR | O_NONBLOCK means the other end need not be open */
1778*4882a593Smuzhiyun 		fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1779*4882a593Smuzhiyun 		if (fd < 0) {
1780*4882a593Smuzhiyun 			pr_err("Failed to open '%s'\n", p);
1781*4882a593Smuzhiyun 			ret = -errno;
1782*4882a593Smuzhiyun 			goto out_free;
1783*4882a593Smuzhiyun 		}
1784*4882a593Smuzhiyun 		*ctl_fd_ack = fd;
1785*4882a593Smuzhiyun 	}
1786*4882a593Smuzhiyun 
1787*4882a593Smuzhiyun out_free:
1788*4882a593Smuzhiyun 	free(s);
1789*4882a593Smuzhiyun 	return ret;
1790*4882a593Smuzhiyun }
1791*4882a593Smuzhiyun 
evlist__parse_control(const char * str,int * ctl_fd,int * ctl_fd_ack,bool * ctl_fd_close)1792*4882a593Smuzhiyun int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun 	char *comma = NULL, *endptr = NULL;
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	*ctl_fd_close = false;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	if (strncmp(str, "fd:", 3))
1799*4882a593Smuzhiyun 		return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	*ctl_fd = strtoul(&str[3], &endptr, 0);
1802*4882a593Smuzhiyun 	if (endptr == &str[3])
1803*4882a593Smuzhiyun 		return -EINVAL;
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	comma = strchr(str, ',');
1806*4882a593Smuzhiyun 	if (comma) {
1807*4882a593Smuzhiyun 		if (endptr != comma)
1808*4882a593Smuzhiyun 			return -EINVAL;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 		*ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1811*4882a593Smuzhiyun 		if (endptr == comma + 1 || *endptr != '\0')
1812*4882a593Smuzhiyun 			return -EINVAL;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	return 0;
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun 
evlist__close_control(int ctl_fd,int ctl_fd_ack,bool * ctl_fd_close)1818*4882a593Smuzhiyun void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close)
1819*4882a593Smuzhiyun {
1820*4882a593Smuzhiyun 	if (*ctl_fd_close) {
1821*4882a593Smuzhiyun 		*ctl_fd_close = false;
1822*4882a593Smuzhiyun 		close(ctl_fd);
1823*4882a593Smuzhiyun 		if (ctl_fd_ack >= 0)
1824*4882a593Smuzhiyun 			close(ctl_fd_ack);
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun 
evlist__initialize_ctlfd(struct evlist * evlist,int fd,int ack)1828*4882a593Smuzhiyun int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun 	if (fd == -1) {
1831*4882a593Smuzhiyun 		pr_debug("Control descriptor is not initialized\n");
1832*4882a593Smuzhiyun 		return 0;
1833*4882a593Smuzhiyun 	}
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1836*4882a593Smuzhiyun 						     fdarray_flag__nonfilterable);
1837*4882a593Smuzhiyun 	if (evlist->ctl_fd.pos < 0) {
1838*4882a593Smuzhiyun 		evlist->ctl_fd.pos = -1;
1839*4882a593Smuzhiyun 		pr_err("Failed to add ctl fd entry: %m\n");
1840*4882a593Smuzhiyun 		return -1;
1841*4882a593Smuzhiyun 	}
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	evlist->ctl_fd.fd = fd;
1844*4882a593Smuzhiyun 	evlist->ctl_fd.ack = ack;
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	return 0;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun 
evlist__ctlfd_initialized(struct evlist * evlist)1849*4882a593Smuzhiyun bool evlist__ctlfd_initialized(struct evlist *evlist)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun 	return evlist->ctl_fd.pos >= 0;
1852*4882a593Smuzhiyun }
1853*4882a593Smuzhiyun 
evlist__finalize_ctlfd(struct evlist * evlist)1854*4882a593Smuzhiyun int evlist__finalize_ctlfd(struct evlist *evlist)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun 	struct pollfd *entries = evlist->core.pollfd.entries;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	if (!evlist__ctlfd_initialized(evlist))
1859*4882a593Smuzhiyun 		return 0;
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	entries[evlist->ctl_fd.pos].fd = -1;
1862*4882a593Smuzhiyun 	entries[evlist->ctl_fd.pos].events = 0;
1863*4882a593Smuzhiyun 	entries[evlist->ctl_fd.pos].revents = 0;
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	evlist->ctl_fd.pos = -1;
1866*4882a593Smuzhiyun 	evlist->ctl_fd.ack = -1;
1867*4882a593Smuzhiyun 	evlist->ctl_fd.fd = -1;
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	return 0;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun 
evlist__ctlfd_recv(struct evlist * evlist,enum evlist_ctl_cmd * cmd,char * cmd_data,size_t data_size)1872*4882a593Smuzhiyun static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1873*4882a593Smuzhiyun 			      char *cmd_data, size_t data_size)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun 	int err;
1876*4882a593Smuzhiyun 	char c;
1877*4882a593Smuzhiyun 	size_t bytes_read = 0;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 	*cmd = EVLIST_CTL_CMD_UNSUPPORTED;
1880*4882a593Smuzhiyun 	memset(cmd_data, 0, data_size);
1881*4882a593Smuzhiyun 	data_size--;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	do {
1884*4882a593Smuzhiyun 		err = read(evlist->ctl_fd.fd, &c, 1);
1885*4882a593Smuzhiyun 		if (err > 0) {
1886*4882a593Smuzhiyun 			if (c == '\n' || c == '\0')
1887*4882a593Smuzhiyun 				break;
1888*4882a593Smuzhiyun 			cmd_data[bytes_read++] = c;
1889*4882a593Smuzhiyun 			if (bytes_read == data_size)
1890*4882a593Smuzhiyun 				break;
1891*4882a593Smuzhiyun 			continue;
1892*4882a593Smuzhiyun 		} else if (err == -1) {
1893*4882a593Smuzhiyun 			if (errno == EINTR)
1894*4882a593Smuzhiyun 				continue;
1895*4882a593Smuzhiyun 			if (errno == EAGAIN || errno == EWOULDBLOCK)
1896*4882a593Smuzhiyun 				err = 0;
1897*4882a593Smuzhiyun 			else
1898*4882a593Smuzhiyun 				pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
1899*4882a593Smuzhiyun 		}
1900*4882a593Smuzhiyun 		break;
1901*4882a593Smuzhiyun 	} while (1);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1904*4882a593Smuzhiyun 		 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	if (bytes_read > 0) {
1907*4882a593Smuzhiyun 		if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1908*4882a593Smuzhiyun 			     (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1909*4882a593Smuzhiyun 			*cmd = EVLIST_CTL_CMD_ENABLE;
1910*4882a593Smuzhiyun 		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1911*4882a593Smuzhiyun 				    (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1912*4882a593Smuzhiyun 			*cmd = EVLIST_CTL_CMD_DISABLE;
1913*4882a593Smuzhiyun 		} else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG,
1914*4882a593Smuzhiyun 				    (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) {
1915*4882a593Smuzhiyun 			*cmd = EVLIST_CTL_CMD_SNAPSHOT;
1916*4882a593Smuzhiyun 			pr_debug("is snapshot\n");
1917*4882a593Smuzhiyun 		}
1918*4882a593Smuzhiyun 	}
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	return bytes_read ? (int)bytes_read : err;
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun 
evlist__ctlfd_ack(struct evlist * evlist)1923*4882a593Smuzhiyun int evlist__ctlfd_ack(struct evlist *evlist)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun 	int err;
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun 	if (evlist->ctl_fd.ack == -1)
1928*4882a593Smuzhiyun 		return 0;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
1931*4882a593Smuzhiyun 		    sizeof(EVLIST_CTL_CMD_ACK_TAG));
1932*4882a593Smuzhiyun 	if (err == -1)
1933*4882a593Smuzhiyun 		pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	return err;
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun 
evlist__ctlfd_process(struct evlist * evlist,enum evlist_ctl_cmd * cmd)1938*4882a593Smuzhiyun int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
1939*4882a593Smuzhiyun {
1940*4882a593Smuzhiyun 	int err = 0;
1941*4882a593Smuzhiyun 	char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
1942*4882a593Smuzhiyun 	int ctlfd_pos = evlist->ctl_fd.pos;
1943*4882a593Smuzhiyun 	struct pollfd *entries = evlist->core.pollfd.entries;
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
1946*4882a593Smuzhiyun 		return 0;
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	if (entries[ctlfd_pos].revents & POLLIN) {
1949*4882a593Smuzhiyun 		err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
1950*4882a593Smuzhiyun 					 EVLIST_CTL_CMD_MAX_LEN);
1951*4882a593Smuzhiyun 		if (err > 0) {
1952*4882a593Smuzhiyun 			switch (*cmd) {
1953*4882a593Smuzhiyun 			case EVLIST_CTL_CMD_ENABLE:
1954*4882a593Smuzhiyun 				evlist__enable(evlist);
1955*4882a593Smuzhiyun 				break;
1956*4882a593Smuzhiyun 			case EVLIST_CTL_CMD_DISABLE:
1957*4882a593Smuzhiyun 				evlist__disable(evlist);
1958*4882a593Smuzhiyun 				break;
1959*4882a593Smuzhiyun 			case EVLIST_CTL_CMD_SNAPSHOT:
1960*4882a593Smuzhiyun 				break;
1961*4882a593Smuzhiyun 			case EVLIST_CTL_CMD_ACK:
1962*4882a593Smuzhiyun 			case EVLIST_CTL_CMD_UNSUPPORTED:
1963*4882a593Smuzhiyun 			default:
1964*4882a593Smuzhiyun 				pr_debug("ctlfd: unsupported %d\n", *cmd);
1965*4882a593Smuzhiyun 				break;
1966*4882a593Smuzhiyun 			}
1967*4882a593Smuzhiyun 			if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED ||
1968*4882a593Smuzhiyun 			      *cmd == EVLIST_CTL_CMD_SNAPSHOT))
1969*4882a593Smuzhiyun 				evlist__ctlfd_ack(evlist);
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 	}
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
1974*4882a593Smuzhiyun 		evlist__finalize_ctlfd(evlist);
1975*4882a593Smuzhiyun 	else
1976*4882a593Smuzhiyun 		entries[ctlfd_pos].revents = 0;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	return err;
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun 
evlist__find_evsel(struct evlist * evlist,int idx)1981*4882a593Smuzhiyun struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun 	struct evsel *evsel;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1986*4882a593Smuzhiyun 		if (evsel->idx == idx)
1987*4882a593Smuzhiyun 			return evsel;
1988*4882a593Smuzhiyun 	}
1989*4882a593Smuzhiyun 	return NULL;
1990*4882a593Smuzhiyun }
1991