xref: /OK3568_Linux_fs/kernel/tools/perf/tests/switch-tracking.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <sys/time.h>
3*4882a593Smuzhiyun #include <sys/prctl.h>
4*4882a593Smuzhiyun #include <errno.h>
5*4882a593Smuzhiyun #include <limits.h>
6*4882a593Smuzhiyun #include <time.h>
7*4882a593Smuzhiyun #include <stdlib.h>
8*4882a593Smuzhiyun #include <linux/zalloc.h>
9*4882a593Smuzhiyun #include <perf/cpumap.h>
10*4882a593Smuzhiyun #include <perf/evlist.h>
11*4882a593Smuzhiyun #include <perf/mmap.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include "debug.h"
14*4882a593Smuzhiyun #include "parse-events.h"
15*4882a593Smuzhiyun #include "evlist.h"
16*4882a593Smuzhiyun #include "evsel.h"
17*4882a593Smuzhiyun #include "thread_map.h"
18*4882a593Smuzhiyun #include "record.h"
19*4882a593Smuzhiyun #include "tests.h"
20*4882a593Smuzhiyun #include "util/mmap.h"
21*4882a593Smuzhiyun 
spin_sleep(void)22*4882a593Smuzhiyun static int spin_sleep(void)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	struct timeval start, now, diff, maxtime;
25*4882a593Smuzhiyun 	struct timespec ts;
26*4882a593Smuzhiyun 	int err, i;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	maxtime.tv_sec = 0;
29*4882a593Smuzhiyun 	maxtime.tv_usec = 50000;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	err = gettimeofday(&start, NULL);
32*4882a593Smuzhiyun 	if (err)
33*4882a593Smuzhiyun 		return err;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	/* Spin for 50ms */
36*4882a593Smuzhiyun 	while (1) {
37*4882a593Smuzhiyun 		for (i = 0; i < 1000; i++)
38*4882a593Smuzhiyun 			barrier();
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 		err = gettimeofday(&now, NULL);
41*4882a593Smuzhiyun 		if (err)
42*4882a593Smuzhiyun 			return err;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		timersub(&now, &start, &diff);
45*4882a593Smuzhiyun 		if (timercmp(&diff, &maxtime, > /* For checkpatch */))
46*4882a593Smuzhiyun 			break;
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	ts.tv_nsec = 50 * 1000 * 1000;
50*4882a593Smuzhiyun 	ts.tv_sec = 0;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* Sleep for 50ms */
53*4882a593Smuzhiyun 	err = nanosleep(&ts, NULL);
54*4882a593Smuzhiyun 	if (err == EINTR)
55*4882a593Smuzhiyun 		err = 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	return err;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct switch_tracking {
61*4882a593Smuzhiyun 	struct evsel *switch_evsel;
62*4882a593Smuzhiyun 	struct evsel *cycles_evsel;
63*4882a593Smuzhiyun 	pid_t *tids;
64*4882a593Smuzhiyun 	int nr_tids;
65*4882a593Smuzhiyun 	int comm_seen[4];
66*4882a593Smuzhiyun 	int cycles_before_comm_1;
67*4882a593Smuzhiyun 	int cycles_between_comm_2_and_comm_3;
68*4882a593Smuzhiyun 	int cycles_after_comm_4;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
check_comm(struct switch_tracking * switch_tracking,union perf_event * event,const char * comm,int nr)71*4882a593Smuzhiyun static int check_comm(struct switch_tracking *switch_tracking,
72*4882a593Smuzhiyun 		      union perf_event *event, const char *comm, int nr)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	if (event->header.type == PERF_RECORD_COMM &&
75*4882a593Smuzhiyun 	    (pid_t)event->comm.pid == getpid() &&
76*4882a593Smuzhiyun 	    (pid_t)event->comm.tid == getpid() &&
77*4882a593Smuzhiyun 	    strcmp(event->comm.comm, comm) == 0) {
78*4882a593Smuzhiyun 		if (switch_tracking->comm_seen[nr]) {
79*4882a593Smuzhiyun 			pr_debug("Duplicate comm event\n");
80*4882a593Smuzhiyun 			return -1;
81*4882a593Smuzhiyun 		}
82*4882a593Smuzhiyun 		switch_tracking->comm_seen[nr] = 1;
83*4882a593Smuzhiyun 		pr_debug3("comm event: %s nr: %d\n", event->comm.comm, nr);
84*4882a593Smuzhiyun 		return 1;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 	return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
check_cpu(struct switch_tracking * switch_tracking,int cpu)89*4882a593Smuzhiyun static int check_cpu(struct switch_tracking *switch_tracking, int cpu)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	int i, nr = cpu + 1;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (cpu < 0)
94*4882a593Smuzhiyun 		return -1;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (!switch_tracking->tids) {
97*4882a593Smuzhiyun 		switch_tracking->tids = calloc(nr, sizeof(pid_t));
98*4882a593Smuzhiyun 		if (!switch_tracking->tids)
99*4882a593Smuzhiyun 			return -1;
100*4882a593Smuzhiyun 		for (i = 0; i < nr; i++)
101*4882a593Smuzhiyun 			switch_tracking->tids[i] = -1;
102*4882a593Smuzhiyun 		switch_tracking->nr_tids = nr;
103*4882a593Smuzhiyun 		return 0;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (cpu >= switch_tracking->nr_tids) {
107*4882a593Smuzhiyun 		void *addr;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		addr = realloc(switch_tracking->tids, nr * sizeof(pid_t));
110*4882a593Smuzhiyun 		if (!addr)
111*4882a593Smuzhiyun 			return -1;
112*4882a593Smuzhiyun 		switch_tracking->tids = addr;
113*4882a593Smuzhiyun 		for (i = switch_tracking->nr_tids; i < nr; i++)
114*4882a593Smuzhiyun 			switch_tracking->tids[i] = -1;
115*4882a593Smuzhiyun 		switch_tracking->nr_tids = nr;
116*4882a593Smuzhiyun 		return 0;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
process_sample_event(struct evlist * evlist,union perf_event * event,struct switch_tracking * switch_tracking)122*4882a593Smuzhiyun static int process_sample_event(struct evlist *evlist,
123*4882a593Smuzhiyun 				union perf_event *event,
124*4882a593Smuzhiyun 				struct switch_tracking *switch_tracking)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct perf_sample sample;
127*4882a593Smuzhiyun 	struct evsel *evsel;
128*4882a593Smuzhiyun 	pid_t next_tid, prev_tid;
129*4882a593Smuzhiyun 	int cpu, err;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (perf_evlist__parse_sample(evlist, event, &sample)) {
132*4882a593Smuzhiyun 		pr_debug("perf_evlist__parse_sample failed\n");
133*4882a593Smuzhiyun 		return -1;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	evsel = perf_evlist__id2evsel(evlist, sample.id);
137*4882a593Smuzhiyun 	if (evsel == switch_tracking->switch_evsel) {
138*4882a593Smuzhiyun 		next_tid = evsel__intval(evsel, &sample, "next_pid");
139*4882a593Smuzhiyun 		prev_tid = evsel__intval(evsel, &sample, "prev_pid");
140*4882a593Smuzhiyun 		cpu = sample.cpu;
141*4882a593Smuzhiyun 		pr_debug3("sched_switch: cpu: %d prev_tid %d next_tid %d\n",
142*4882a593Smuzhiyun 			  cpu, prev_tid, next_tid);
143*4882a593Smuzhiyun 		err = check_cpu(switch_tracking, cpu);
144*4882a593Smuzhiyun 		if (err)
145*4882a593Smuzhiyun 			return err;
146*4882a593Smuzhiyun 		/*
147*4882a593Smuzhiyun 		 * Check for no missing sched_switch events i.e. that the
148*4882a593Smuzhiyun 		 * evsel->core.system_wide flag has worked.
149*4882a593Smuzhiyun 		 */
150*4882a593Smuzhiyun 		if (switch_tracking->tids[cpu] != -1 &&
151*4882a593Smuzhiyun 		    switch_tracking->tids[cpu] != prev_tid) {
152*4882a593Smuzhiyun 			pr_debug("Missing sched_switch events\n");
153*4882a593Smuzhiyun 			return -1;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 		switch_tracking->tids[cpu] = next_tid;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (evsel == switch_tracking->cycles_evsel) {
159*4882a593Smuzhiyun 		pr_debug3("cycles event\n");
160*4882a593Smuzhiyun 		if (!switch_tracking->comm_seen[0])
161*4882a593Smuzhiyun 			switch_tracking->cycles_before_comm_1 = 1;
162*4882a593Smuzhiyun 		if (switch_tracking->comm_seen[1] &&
163*4882a593Smuzhiyun 		    !switch_tracking->comm_seen[2])
164*4882a593Smuzhiyun 			switch_tracking->cycles_between_comm_2_and_comm_3 = 1;
165*4882a593Smuzhiyun 		if (switch_tracking->comm_seen[3])
166*4882a593Smuzhiyun 			switch_tracking->cycles_after_comm_4 = 1;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
process_event(struct evlist * evlist,union perf_event * event,struct switch_tracking * switch_tracking)172*4882a593Smuzhiyun static int process_event(struct evlist *evlist, union perf_event *event,
173*4882a593Smuzhiyun 			 struct switch_tracking *switch_tracking)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	if (event->header.type == PERF_RECORD_SAMPLE)
176*4882a593Smuzhiyun 		return process_sample_event(evlist, event, switch_tracking);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (event->header.type == PERF_RECORD_COMM) {
179*4882a593Smuzhiyun 		int err, done = 0;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		err = check_comm(switch_tracking, event, "Test COMM 1", 0);
182*4882a593Smuzhiyun 		if (err < 0)
183*4882a593Smuzhiyun 			return -1;
184*4882a593Smuzhiyun 		done += err;
185*4882a593Smuzhiyun 		err = check_comm(switch_tracking, event, "Test COMM 2", 1);
186*4882a593Smuzhiyun 		if (err < 0)
187*4882a593Smuzhiyun 			return -1;
188*4882a593Smuzhiyun 		done += err;
189*4882a593Smuzhiyun 		err = check_comm(switch_tracking, event, "Test COMM 3", 2);
190*4882a593Smuzhiyun 		if (err < 0)
191*4882a593Smuzhiyun 			return -1;
192*4882a593Smuzhiyun 		done += err;
193*4882a593Smuzhiyun 		err = check_comm(switch_tracking, event, "Test COMM 4", 3);
194*4882a593Smuzhiyun 		if (err < 0)
195*4882a593Smuzhiyun 			return -1;
196*4882a593Smuzhiyun 		done += err;
197*4882a593Smuzhiyun 		if (done != 1) {
198*4882a593Smuzhiyun 			pr_debug("Unexpected comm event\n");
199*4882a593Smuzhiyun 			return -1;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 	}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun struct event_node {
207*4882a593Smuzhiyun 	struct list_head list;
208*4882a593Smuzhiyun 	union perf_event *event;
209*4882a593Smuzhiyun 	u64 event_time;
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun 
add_event(struct evlist * evlist,struct list_head * events,union perf_event * event)212*4882a593Smuzhiyun static int add_event(struct evlist *evlist, struct list_head *events,
213*4882a593Smuzhiyun 		     union perf_event *event)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	struct perf_sample sample;
216*4882a593Smuzhiyun 	struct event_node *node;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	node = malloc(sizeof(struct event_node));
219*4882a593Smuzhiyun 	if (!node) {
220*4882a593Smuzhiyun 		pr_debug("malloc failed\n");
221*4882a593Smuzhiyun 		return -1;
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 	node->event = event;
224*4882a593Smuzhiyun 	list_add(&node->list, events);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (perf_evlist__parse_sample(evlist, event, &sample)) {
227*4882a593Smuzhiyun 		pr_debug("perf_evlist__parse_sample failed\n");
228*4882a593Smuzhiyun 		return -1;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (!sample.time) {
232*4882a593Smuzhiyun 		pr_debug("event with no time\n");
233*4882a593Smuzhiyun 		return -1;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	node->event_time = sample.time;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
free_event_nodes(struct list_head * events)241*4882a593Smuzhiyun static void free_event_nodes(struct list_head *events)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct event_node *node;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	while (!list_empty(events)) {
246*4882a593Smuzhiyun 		node = list_entry(events->next, struct event_node, list);
247*4882a593Smuzhiyun 		list_del_init(&node->list);
248*4882a593Smuzhiyun 		free(node);
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
compar(const void * a,const void * b)252*4882a593Smuzhiyun static int compar(const void *a, const void *b)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	const struct event_node *nodea = a;
255*4882a593Smuzhiyun 	const struct event_node *nodeb = b;
256*4882a593Smuzhiyun 	s64 cmp = nodea->event_time - nodeb->event_time;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	return cmp;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
process_events(struct evlist * evlist,struct switch_tracking * switch_tracking)261*4882a593Smuzhiyun static int process_events(struct evlist *evlist,
262*4882a593Smuzhiyun 			  struct switch_tracking *switch_tracking)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	union perf_event *event;
265*4882a593Smuzhiyun 	unsigned pos, cnt = 0;
266*4882a593Smuzhiyun 	LIST_HEAD(events);
267*4882a593Smuzhiyun 	struct event_node *events_array, *node;
268*4882a593Smuzhiyun 	struct mmap *md;
269*4882a593Smuzhiyun 	int i, ret;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	for (i = 0; i < evlist->core.nr_mmaps; i++) {
272*4882a593Smuzhiyun 		md = &evlist->mmap[i];
273*4882a593Smuzhiyun 		if (perf_mmap__read_init(&md->core) < 0)
274*4882a593Smuzhiyun 			continue;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 		while ((event = perf_mmap__read_event(&md->core)) != NULL) {
277*4882a593Smuzhiyun 			cnt += 1;
278*4882a593Smuzhiyun 			ret = add_event(evlist, &events, event);
279*4882a593Smuzhiyun 			 perf_mmap__consume(&md->core);
280*4882a593Smuzhiyun 			if (ret < 0)
281*4882a593Smuzhiyun 				goto out_free_nodes;
282*4882a593Smuzhiyun 		}
283*4882a593Smuzhiyun 		perf_mmap__read_done(&md->core);
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	events_array = calloc(cnt, sizeof(struct event_node));
287*4882a593Smuzhiyun 	if (!events_array) {
288*4882a593Smuzhiyun 		pr_debug("calloc failed\n");
289*4882a593Smuzhiyun 		ret = -1;
290*4882a593Smuzhiyun 		goto out_free_nodes;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	pos = 0;
294*4882a593Smuzhiyun 	list_for_each_entry(node, &events, list)
295*4882a593Smuzhiyun 		events_array[pos++] = *node;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	qsort(events_array, cnt, sizeof(struct event_node), compar);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	for (pos = 0; pos < cnt; pos++) {
300*4882a593Smuzhiyun 		ret = process_event(evlist, events_array[pos].event,
301*4882a593Smuzhiyun 				    switch_tracking);
302*4882a593Smuzhiyun 		if (ret < 0)
303*4882a593Smuzhiyun 			goto out_free;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	ret = 0;
307*4882a593Smuzhiyun out_free:
308*4882a593Smuzhiyun 	pr_debug("%u events recorded\n", cnt);
309*4882a593Smuzhiyun 	free(events_array);
310*4882a593Smuzhiyun out_free_nodes:
311*4882a593Smuzhiyun 	free_event_nodes(&events);
312*4882a593Smuzhiyun 	return ret;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun  * test__switch_tracking - test using sched_switch and tracking events.
317*4882a593Smuzhiyun  *
318*4882a593Smuzhiyun  * This function implements a test that checks that sched_switch events and
319*4882a593Smuzhiyun  * tracking events can be recorded for a workload (current process) using the
320*4882a593Smuzhiyun  * evsel->core.system_wide and evsel->tracking flags (respectively) with other events
321*4882a593Smuzhiyun  * sometimes enabled or disabled.
322*4882a593Smuzhiyun  */
test__switch_tracking(struct test * test __maybe_unused,int subtest __maybe_unused)323*4882a593Smuzhiyun int test__switch_tracking(struct test *test __maybe_unused, int subtest __maybe_unused)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	const char *sched_switch = "sched:sched_switch";
326*4882a593Smuzhiyun 	struct switch_tracking switch_tracking = { .tids = NULL, };
327*4882a593Smuzhiyun 	struct record_opts opts = {
328*4882a593Smuzhiyun 		.mmap_pages	     = UINT_MAX,
329*4882a593Smuzhiyun 		.user_freq	     = UINT_MAX,
330*4882a593Smuzhiyun 		.user_interval	     = ULLONG_MAX,
331*4882a593Smuzhiyun 		.freq		     = 4000,
332*4882a593Smuzhiyun 		.target		     = {
333*4882a593Smuzhiyun 			.uses_mmap   = true,
334*4882a593Smuzhiyun 		},
335*4882a593Smuzhiyun 	};
336*4882a593Smuzhiyun 	struct perf_thread_map *threads = NULL;
337*4882a593Smuzhiyun 	struct perf_cpu_map *cpus = NULL;
338*4882a593Smuzhiyun 	struct evlist *evlist = NULL;
339*4882a593Smuzhiyun 	struct evsel *evsel, *cpu_clocks_evsel, *cycles_evsel;
340*4882a593Smuzhiyun 	struct evsel *switch_evsel, *tracking_evsel;
341*4882a593Smuzhiyun 	const char *comm;
342*4882a593Smuzhiyun 	int err = -1;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	threads = thread_map__new(-1, getpid(), UINT_MAX);
345*4882a593Smuzhiyun 	if (!threads) {
346*4882a593Smuzhiyun 		pr_debug("thread_map__new failed!\n");
347*4882a593Smuzhiyun 		goto out_err;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	cpus = perf_cpu_map__new(NULL);
351*4882a593Smuzhiyun 	if (!cpus) {
352*4882a593Smuzhiyun 		pr_debug("perf_cpu_map__new failed!\n");
353*4882a593Smuzhiyun 		goto out_err;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	evlist = evlist__new();
357*4882a593Smuzhiyun 	if (!evlist) {
358*4882a593Smuzhiyun 		pr_debug("evlist__new failed!\n");
359*4882a593Smuzhiyun 		goto out_err;
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	perf_evlist__set_maps(&evlist->core, cpus, threads);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* First event */
365*4882a593Smuzhiyun 	err = parse_events(evlist, "cpu-clock:u", NULL);
366*4882a593Smuzhiyun 	if (err) {
367*4882a593Smuzhiyun 		pr_debug("Failed to parse event dummy:u\n");
368*4882a593Smuzhiyun 		goto out_err;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	cpu_clocks_evsel = evlist__last(evlist);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Second event */
374*4882a593Smuzhiyun 	err = parse_events(evlist, "cycles:u", NULL);
375*4882a593Smuzhiyun 	if (err) {
376*4882a593Smuzhiyun 		pr_debug("Failed to parse event cycles:u\n");
377*4882a593Smuzhiyun 		goto out_err;
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	cycles_evsel = evlist__last(evlist);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* Third event */
383*4882a593Smuzhiyun 	if (!perf_evlist__can_select_event(evlist, sched_switch)) {
384*4882a593Smuzhiyun 		pr_debug("No sched_switch\n");
385*4882a593Smuzhiyun 		err = 0;
386*4882a593Smuzhiyun 		goto out;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	err = parse_events(evlist, sched_switch, NULL);
390*4882a593Smuzhiyun 	if (err) {
391*4882a593Smuzhiyun 		pr_debug("Failed to parse event %s\n", sched_switch);
392*4882a593Smuzhiyun 		goto out_err;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	switch_evsel = evlist__last(evlist);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	evsel__set_sample_bit(switch_evsel, CPU);
398*4882a593Smuzhiyun 	evsel__set_sample_bit(switch_evsel, TIME);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	switch_evsel->core.system_wide = true;
401*4882a593Smuzhiyun 	switch_evsel->no_aux_samples = true;
402*4882a593Smuzhiyun 	switch_evsel->immediate = true;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	/* Test moving an event to the front */
405*4882a593Smuzhiyun 	if (cycles_evsel == evlist__first(evlist)) {
406*4882a593Smuzhiyun 		pr_debug("cycles event already at front");
407*4882a593Smuzhiyun 		goto out_err;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	perf_evlist__to_front(evlist, cycles_evsel);
410*4882a593Smuzhiyun 	if (cycles_evsel != evlist__first(evlist)) {
411*4882a593Smuzhiyun 		pr_debug("Failed to move cycles event to front");
412*4882a593Smuzhiyun 		goto out_err;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	evsel__set_sample_bit(cycles_evsel, CPU);
416*4882a593Smuzhiyun 	evsel__set_sample_bit(cycles_evsel, TIME);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* Fourth event */
419*4882a593Smuzhiyun 	err = parse_events(evlist, "dummy:u", NULL);
420*4882a593Smuzhiyun 	if (err) {
421*4882a593Smuzhiyun 		pr_debug("Failed to parse event dummy:u\n");
422*4882a593Smuzhiyun 		goto out_err;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	tracking_evsel = evlist__last(evlist);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	perf_evlist__set_tracking_event(evlist, tracking_evsel);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	tracking_evsel->core.attr.freq = 0;
430*4882a593Smuzhiyun 	tracking_evsel->core.attr.sample_period = 1;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	evsel__set_sample_bit(tracking_evsel, TIME);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* Config events */
435*4882a593Smuzhiyun 	perf_evlist__config(evlist, &opts, NULL);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* Check moved event is still at the front */
438*4882a593Smuzhiyun 	if (cycles_evsel != evlist__first(evlist)) {
439*4882a593Smuzhiyun 		pr_debug("Front event no longer at front");
440*4882a593Smuzhiyun 		goto out_err;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/* Check tracking event is tracking */
444*4882a593Smuzhiyun 	if (!tracking_evsel->core.attr.mmap || !tracking_evsel->core.attr.comm) {
445*4882a593Smuzhiyun 		pr_debug("Tracking event not tracking\n");
446*4882a593Smuzhiyun 		goto out_err;
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Check non-tracking events are not tracking */
450*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
451*4882a593Smuzhiyun 		if (evsel != tracking_evsel) {
452*4882a593Smuzhiyun 			if (evsel->core.attr.mmap || evsel->core.attr.comm) {
453*4882a593Smuzhiyun 				pr_debug("Non-tracking event is tracking\n");
454*4882a593Smuzhiyun 				goto out_err;
455*4882a593Smuzhiyun 			}
456*4882a593Smuzhiyun 		}
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (evlist__open(evlist) < 0) {
460*4882a593Smuzhiyun 		pr_debug("Not supported\n");
461*4882a593Smuzhiyun 		err = 0;
462*4882a593Smuzhiyun 		goto out;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	err = evlist__mmap(evlist, UINT_MAX);
466*4882a593Smuzhiyun 	if (err) {
467*4882a593Smuzhiyun 		pr_debug("evlist__mmap failed!\n");
468*4882a593Smuzhiyun 		goto out_err;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	evlist__enable(evlist);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	err = evsel__disable(cpu_clocks_evsel);
474*4882a593Smuzhiyun 	if (err) {
475*4882a593Smuzhiyun 		pr_debug("perf_evlist__disable_event failed!\n");
476*4882a593Smuzhiyun 		goto out_err;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	err = spin_sleep();
480*4882a593Smuzhiyun 	if (err) {
481*4882a593Smuzhiyun 		pr_debug("spin_sleep failed!\n");
482*4882a593Smuzhiyun 		goto out_err;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	comm = "Test COMM 1";
486*4882a593Smuzhiyun 	err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
487*4882a593Smuzhiyun 	if (err) {
488*4882a593Smuzhiyun 		pr_debug("PR_SET_NAME failed!\n");
489*4882a593Smuzhiyun 		goto out_err;
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	err = evsel__disable(cycles_evsel);
493*4882a593Smuzhiyun 	if (err) {
494*4882a593Smuzhiyun 		pr_debug("perf_evlist__disable_event failed!\n");
495*4882a593Smuzhiyun 		goto out_err;
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	comm = "Test COMM 2";
499*4882a593Smuzhiyun 	err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
500*4882a593Smuzhiyun 	if (err) {
501*4882a593Smuzhiyun 		pr_debug("PR_SET_NAME failed!\n");
502*4882a593Smuzhiyun 		goto out_err;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	err = spin_sleep();
506*4882a593Smuzhiyun 	if (err) {
507*4882a593Smuzhiyun 		pr_debug("spin_sleep failed!\n");
508*4882a593Smuzhiyun 		goto out_err;
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	comm = "Test COMM 3";
512*4882a593Smuzhiyun 	err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
513*4882a593Smuzhiyun 	if (err) {
514*4882a593Smuzhiyun 		pr_debug("PR_SET_NAME failed!\n");
515*4882a593Smuzhiyun 		goto out_err;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	err = evsel__enable(cycles_evsel);
519*4882a593Smuzhiyun 	if (err) {
520*4882a593Smuzhiyun 		pr_debug("perf_evlist__disable_event failed!\n");
521*4882a593Smuzhiyun 		goto out_err;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	comm = "Test COMM 4";
525*4882a593Smuzhiyun 	err = prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0);
526*4882a593Smuzhiyun 	if (err) {
527*4882a593Smuzhiyun 		pr_debug("PR_SET_NAME failed!\n");
528*4882a593Smuzhiyun 		goto out_err;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	err = spin_sleep();
532*4882a593Smuzhiyun 	if (err) {
533*4882a593Smuzhiyun 		pr_debug("spin_sleep failed!\n");
534*4882a593Smuzhiyun 		goto out_err;
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	evlist__disable(evlist);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	switch_tracking.switch_evsel = switch_evsel;
540*4882a593Smuzhiyun 	switch_tracking.cycles_evsel = cycles_evsel;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	err = process_events(evlist, &switch_tracking);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	zfree(&switch_tracking.tids);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (err)
547*4882a593Smuzhiyun 		goto out_err;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/* Check all 4 comm events were seen i.e. that evsel->tracking works */
550*4882a593Smuzhiyun 	if (!switch_tracking.comm_seen[0] || !switch_tracking.comm_seen[1] ||
551*4882a593Smuzhiyun 	    !switch_tracking.comm_seen[2] || !switch_tracking.comm_seen[3]) {
552*4882a593Smuzhiyun 		pr_debug("Missing comm events\n");
553*4882a593Smuzhiyun 		goto out_err;
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* Check cycles event got enabled */
557*4882a593Smuzhiyun 	if (!switch_tracking.cycles_before_comm_1) {
558*4882a593Smuzhiyun 		pr_debug("Missing cycles events\n");
559*4882a593Smuzhiyun 		goto out_err;
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* Check cycles event got disabled */
563*4882a593Smuzhiyun 	if (switch_tracking.cycles_between_comm_2_and_comm_3) {
564*4882a593Smuzhiyun 		pr_debug("cycles events even though event was disabled\n");
565*4882a593Smuzhiyun 		goto out_err;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* Check cycles event got enabled again */
569*4882a593Smuzhiyun 	if (!switch_tracking.cycles_after_comm_4) {
570*4882a593Smuzhiyun 		pr_debug("Missing cycles events\n");
571*4882a593Smuzhiyun 		goto out_err;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun out:
574*4882a593Smuzhiyun 	if (evlist) {
575*4882a593Smuzhiyun 		evlist__disable(evlist);
576*4882a593Smuzhiyun 		evlist__delete(evlist);
577*4882a593Smuzhiyun 	} else {
578*4882a593Smuzhiyun 		perf_cpu_map__put(cpus);
579*4882a593Smuzhiyun 		perf_thread_map__put(threads);
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return err;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun out_err:
585*4882a593Smuzhiyun 	err = -1;
586*4882a593Smuzhiyun 	goto out;
587*4882a593Smuzhiyun }
588