xref: /OK3568_Linux_fs/kernel/tools/perf/util/sideband_evlist.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include "util/debug.h"
4*4882a593Smuzhiyun #include "util/evlist.h"
5*4882a593Smuzhiyun #include "util/evsel.h"
6*4882a593Smuzhiyun #include "util/mmap.h"
7*4882a593Smuzhiyun #include "util/perf_api_probe.h"
8*4882a593Smuzhiyun #include <perf/mmap.h>
9*4882a593Smuzhiyun #include <linux/perf_event.h>
10*4882a593Smuzhiyun #include <limits.h>
11*4882a593Smuzhiyun #include <pthread.h>
12*4882a593Smuzhiyun #include <sched.h>
13*4882a593Smuzhiyun #include <stdbool.h>
14*4882a593Smuzhiyun 
perf_evlist__add_sb_event(struct evlist * evlist,struct perf_event_attr * attr,evsel__sb_cb_t cb,void * data)15*4882a593Smuzhiyun int perf_evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
16*4882a593Smuzhiyun 			      evsel__sb_cb_t cb, void *data)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	struct evsel *evsel;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	if (!attr->sample_id_all) {
21*4882a593Smuzhiyun 		pr_warning("enabling sample_id_all for all side band events\n");
22*4882a593Smuzhiyun 		attr->sample_id_all = 1;
23*4882a593Smuzhiyun 	}
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	evsel = evsel__new_idx(attr, evlist->core.nr_entries);
26*4882a593Smuzhiyun 	if (!evsel)
27*4882a593Smuzhiyun 		return -1;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	evsel->side_band.cb = cb;
30*4882a593Smuzhiyun 	evsel->side_band.data = data;
31*4882a593Smuzhiyun 	evlist__add(evlist, evsel);
32*4882a593Smuzhiyun 	return 0;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
perf_evlist__poll_thread(void * arg)35*4882a593Smuzhiyun static void *perf_evlist__poll_thread(void *arg)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct evlist *evlist = arg;
38*4882a593Smuzhiyun 	bool draining = false;
39*4882a593Smuzhiyun 	int i, done = 0;
40*4882a593Smuzhiyun 	/*
41*4882a593Smuzhiyun 	 * In order to read symbols from other namespaces perf to needs to call
42*4882a593Smuzhiyun 	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
43*4882a593Smuzhiyun 	 * unshare(2) the fs so that we may continue to setns into namespaces
44*4882a593Smuzhiyun 	 * that we're observing when, for instance, reading the build-ids at
45*4882a593Smuzhiyun 	 * the end of a 'perf record' session.
46*4882a593Smuzhiyun 	 */
47*4882a593Smuzhiyun 	unshare(CLONE_FS);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	while (!done) {
50*4882a593Smuzhiyun 		bool got_data = false;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		if (evlist->thread.done)
53*4882a593Smuzhiyun 			draining = true;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		if (!draining)
56*4882a593Smuzhiyun 			evlist__poll(evlist, 1000);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 		for (i = 0; i < evlist->core.nr_mmaps; i++) {
59*4882a593Smuzhiyun 			struct mmap *map = &evlist->mmap[i];
60*4882a593Smuzhiyun 			union perf_event *event;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 			if (perf_mmap__read_init(&map->core))
63*4882a593Smuzhiyun 				continue;
64*4882a593Smuzhiyun 			while ((event = perf_mmap__read_event(&map->core)) != NULL) {
65*4882a593Smuzhiyun 				struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 				if (evsel && evsel->side_band.cb)
68*4882a593Smuzhiyun 					evsel->side_band.cb(event, evsel->side_band.data);
69*4882a593Smuzhiyun 				else
70*4882a593Smuzhiyun 					pr_warning("cannot locate proper evsel for the side band event\n");
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 				perf_mmap__consume(&map->core);
73*4882a593Smuzhiyun 				got_data = true;
74*4882a593Smuzhiyun 			}
75*4882a593Smuzhiyun 			perf_mmap__read_done(&map->core);
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		if (draining && !got_data)
79*4882a593Smuzhiyun 			break;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	return NULL;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
evlist__set_cb(struct evlist * evlist,evsel__sb_cb_t cb,void * data)84*4882a593Smuzhiyun void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct evsel *evsel;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
89*4882a593Smuzhiyun 		evsel->core.attr.sample_id_all    = 1;
90*4882a593Smuzhiyun 		evsel->core.attr.watermark        = 1;
91*4882a593Smuzhiyun 		evsel->core.attr.wakeup_watermark = 1;
92*4882a593Smuzhiyun 		evsel->side_band.cb   = cb;
93*4882a593Smuzhiyun 		evsel->side_band.data = data;
94*4882a593Smuzhiyun       }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
perf_evlist__start_sb_thread(struct evlist * evlist,struct target * target)97*4882a593Smuzhiyun int perf_evlist__start_sb_thread(struct evlist *evlist, struct target *target)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct evsel *counter;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (!evlist)
102*4882a593Smuzhiyun 		return 0;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (perf_evlist__create_maps(evlist, target))
105*4882a593Smuzhiyun 		goto out_delete_evlist;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (evlist->core.nr_entries > 1) {
108*4882a593Smuzhiyun 		bool can_sample_identifier = perf_can_sample_identifier();
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 		evlist__for_each_entry(evlist, counter)
111*4882a593Smuzhiyun 			evsel__set_sample_id(counter, can_sample_identifier);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		perf_evlist__set_id_pos(evlist);
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, counter) {
117*4882a593Smuzhiyun 		if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0)
118*4882a593Smuzhiyun 			goto out_delete_evlist;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (evlist__mmap(evlist, UINT_MAX))
122*4882a593Smuzhiyun 		goto out_delete_evlist;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, counter) {
125*4882a593Smuzhiyun 		if (evsel__enable(counter))
126*4882a593Smuzhiyun 			goto out_delete_evlist;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	evlist->thread.done = 0;
130*4882a593Smuzhiyun 	if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
131*4882a593Smuzhiyun 		goto out_delete_evlist;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return 0;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun out_delete_evlist:
136*4882a593Smuzhiyun 	evlist__delete(evlist);
137*4882a593Smuzhiyun 	evlist = NULL;
138*4882a593Smuzhiyun 	return -1;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
perf_evlist__stop_sb_thread(struct evlist * evlist)141*4882a593Smuzhiyun void perf_evlist__stop_sb_thread(struct evlist *evlist)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	if (!evlist)
144*4882a593Smuzhiyun 		return;
145*4882a593Smuzhiyun 	evlist->thread.done = 1;
146*4882a593Smuzhiyun 	pthread_join(evlist->thread.th, NULL);
147*4882a593Smuzhiyun 	evlist__delete(evlist);
148*4882a593Smuzhiyun }
149