xref: /OK3568_Linux_fs/kernel/tools/perf/tests/event-times.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/compiler.h>
3*4882a593Smuzhiyun #include <linux/string.h>
4*4882a593Smuzhiyun #include <errno.h>
5*4882a593Smuzhiyun #include <inttypes.h>
6*4882a593Smuzhiyun #include <string.h>
7*4882a593Smuzhiyun #include <sys/wait.h>
8*4882a593Smuzhiyun #include <perf/cpumap.h>
9*4882a593Smuzhiyun #include "tests.h"
10*4882a593Smuzhiyun #include "evlist.h"
11*4882a593Smuzhiyun #include "evsel.h"
12*4882a593Smuzhiyun #include "debug.h"
13*4882a593Smuzhiyun #include "parse-events.h"
14*4882a593Smuzhiyun #include "thread_map.h"
15*4882a593Smuzhiyun #include "target.h"
16*4882a593Smuzhiyun 
attach__enable_on_exec(struct evlist * evlist)17*4882a593Smuzhiyun static int attach__enable_on_exec(struct evlist *evlist)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
20*4882a593Smuzhiyun 	struct target target = {
21*4882a593Smuzhiyun 		.uid = UINT_MAX,
22*4882a593Smuzhiyun 	};
23*4882a593Smuzhiyun 	const char *argv[] = { "true", NULL, };
24*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE];
25*4882a593Smuzhiyun 	int err;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	pr_debug("attaching to spawned child, enable on exec\n");
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	err = perf_evlist__create_maps(evlist, &target);
30*4882a593Smuzhiyun 	if (err < 0) {
31*4882a593Smuzhiyun 		pr_debug("Not enough memory to create thread/cpu maps\n");
32*4882a593Smuzhiyun 		return err;
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	err = perf_evlist__prepare_workload(evlist, &target, argv, false, NULL);
36*4882a593Smuzhiyun 	if (err < 0) {
37*4882a593Smuzhiyun 		pr_debug("Couldn't run the workload!\n");
38*4882a593Smuzhiyun 		return err;
39*4882a593Smuzhiyun 	}
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	evsel->core.attr.enable_on_exec = 1;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	err = evlist__open(evlist);
44*4882a593Smuzhiyun 	if (err < 0) {
45*4882a593Smuzhiyun 		pr_debug("perf_evlist__open: %s\n",
46*4882a593Smuzhiyun 			 str_error_r(errno, sbuf, sizeof(sbuf)));
47*4882a593Smuzhiyun 		return err;
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	return perf_evlist__start_workload(evlist) == 1 ? TEST_OK : TEST_FAIL;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun 
detach__enable_on_exec(struct evlist * evlist)53*4882a593Smuzhiyun static int detach__enable_on_exec(struct evlist *evlist)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	waitpid(evlist->workload.pid, NULL, 0);
56*4882a593Smuzhiyun 	return 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
attach__current_disabled(struct evlist * evlist)59*4882a593Smuzhiyun static int attach__current_disabled(struct evlist *evlist)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
62*4882a593Smuzhiyun 	struct perf_thread_map *threads;
63*4882a593Smuzhiyun 	int err;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	pr_debug("attaching to current thread as disabled\n");
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	threads = thread_map__new(-1, getpid(), UINT_MAX);
68*4882a593Smuzhiyun 	if (threads == NULL) {
69*4882a593Smuzhiyun 		pr_debug("thread_map__new\n");
70*4882a593Smuzhiyun 		return -1;
71*4882a593Smuzhiyun 	}
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	evsel->core.attr.disabled = 1;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	err = evsel__open_per_thread(evsel, threads);
76*4882a593Smuzhiyun 	if (err) {
77*4882a593Smuzhiyun 		pr_debug("Failed to open event cpu-clock:u\n");
78*4882a593Smuzhiyun 		return err;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	perf_thread_map__put(threads);
82*4882a593Smuzhiyun 	return evsel__enable(evsel) == 0 ? TEST_OK : TEST_FAIL;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
attach__current_enabled(struct evlist * evlist)85*4882a593Smuzhiyun static int attach__current_enabled(struct evlist *evlist)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
88*4882a593Smuzhiyun 	struct perf_thread_map *threads;
89*4882a593Smuzhiyun 	int err;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	pr_debug("attaching to current thread as enabled\n");
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	threads = thread_map__new(-1, getpid(), UINT_MAX);
94*4882a593Smuzhiyun 	if (threads == NULL) {
95*4882a593Smuzhiyun 		pr_debug("failed to call thread_map__new\n");
96*4882a593Smuzhiyun 		return -1;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	err = evsel__open_per_thread(evsel, threads);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	perf_thread_map__put(threads);
102*4882a593Smuzhiyun 	return err == 0 ? TEST_OK : TEST_FAIL;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
detach__disable(struct evlist * evlist)105*4882a593Smuzhiyun static int detach__disable(struct evlist *evlist)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return evsel__enable(evsel);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
attach__cpu_disabled(struct evlist * evlist)112*4882a593Smuzhiyun static int attach__cpu_disabled(struct evlist *evlist)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
115*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
116*4882a593Smuzhiyun 	int err;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	pr_debug("attaching to CPU 0 as enabled\n");
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	cpus = perf_cpu_map__new("0");
121*4882a593Smuzhiyun 	if (cpus == NULL) {
122*4882a593Smuzhiyun 		pr_debug("failed to call perf_cpu_map__new\n");
123*4882a593Smuzhiyun 		return -1;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	evsel->core.attr.disabled = 1;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	err = evsel__open_per_cpu(evsel, cpus, -1);
129*4882a593Smuzhiyun 	if (err) {
130*4882a593Smuzhiyun 		if (err == -EACCES)
131*4882a593Smuzhiyun 			return TEST_SKIP;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		pr_debug("Failed to open event cpu-clock:u\n");
134*4882a593Smuzhiyun 		return err;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
138*4882a593Smuzhiyun 	return evsel__enable(evsel);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
attach__cpu_enabled(struct evlist * evlist)141*4882a593Smuzhiyun static int attach__cpu_enabled(struct evlist *evlist)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct evsel *evsel = evlist__last(evlist);
144*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
145*4882a593Smuzhiyun 	int err;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	pr_debug("attaching to CPU 0 as enabled\n");
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	cpus = perf_cpu_map__new("0");
150*4882a593Smuzhiyun 	if (cpus == NULL) {
151*4882a593Smuzhiyun 		pr_debug("failed to call perf_cpu_map__new\n");
152*4882a593Smuzhiyun 		return -1;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	err = evsel__open_per_cpu(evsel, cpus, -1);
156*4882a593Smuzhiyun 	if (err == -EACCES)
157*4882a593Smuzhiyun 		return TEST_SKIP;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
160*4882a593Smuzhiyun 	return err ? TEST_FAIL : TEST_OK;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
test_times(int (attach)(struct evlist *),int (detach)(struct evlist *))163*4882a593Smuzhiyun static int test_times(int (attach)(struct evlist *),
164*4882a593Smuzhiyun 		      int (detach)(struct evlist *))
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct perf_counts_values count;
167*4882a593Smuzhiyun 	struct evlist *evlist = NULL;
168*4882a593Smuzhiyun 	struct evsel *evsel;
169*4882a593Smuzhiyun 	int err = -1, i;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	evlist = evlist__new();
172*4882a593Smuzhiyun 	if (!evlist) {
173*4882a593Smuzhiyun 		pr_debug("failed to create event list\n");
174*4882a593Smuzhiyun 		goto out_err;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	err = parse_events(evlist, "cpu-clock:u", NULL);
178*4882a593Smuzhiyun 	if (err) {
179*4882a593Smuzhiyun 		pr_debug("failed to parse event cpu-clock:u\n");
180*4882a593Smuzhiyun 		goto out_err;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	evsel = evlist__last(evlist);
184*4882a593Smuzhiyun 	evsel->core.attr.read_format |=
185*4882a593Smuzhiyun 		PERF_FORMAT_TOTAL_TIME_ENABLED |
186*4882a593Smuzhiyun 		PERF_FORMAT_TOTAL_TIME_RUNNING;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	err = attach(evlist);
189*4882a593Smuzhiyun 	if (err == TEST_SKIP) {
190*4882a593Smuzhiyun 		pr_debug("  SKIP  : not enough rights\n");
191*4882a593Smuzhiyun 		return err;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	TEST_ASSERT_VAL("failed to attach", !err);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	for (i = 0; i < 100000000; i++) { }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	TEST_ASSERT_VAL("failed to detach", !detach(evlist));
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	perf_evsel__read(&evsel->core, 0, 0, &count);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	err = !(count.ena == count.run);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	pr_debug("  %s: ena %" PRIu64", run %" PRIu64"\n",
205*4882a593Smuzhiyun 		 !err ? "OK    " : "FAILED",
206*4882a593Smuzhiyun 		 count.ena, count.run);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun out_err:
209*4882a593Smuzhiyun 	evlist__delete(evlist);
210*4882a593Smuzhiyun 	return !err ? TEST_OK : TEST_FAIL;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun  * This test creates software event 'cpu-clock'
215*4882a593Smuzhiyun  * attaches it in several ways (explained below)
216*4882a593Smuzhiyun  * and checks that enabled and running times
217*4882a593Smuzhiyun  * match.
218*4882a593Smuzhiyun  */
test__event_times(struct test * test __maybe_unused,int subtest __maybe_unused)219*4882a593Smuzhiyun int test__event_times(struct test *test __maybe_unused, int subtest __maybe_unused)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	int err, ret = 0;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #define _T(attach, detach)			\
224*4882a593Smuzhiyun 	err = test_times(attach, detach);	\
225*4882a593Smuzhiyun 	if (err && (ret == TEST_OK || ret == TEST_SKIP))	\
226*4882a593Smuzhiyun 		ret = err;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* attach on newly spawned process after exec */
229*4882a593Smuzhiyun 	_T(attach__enable_on_exec,   detach__enable_on_exec)
230*4882a593Smuzhiyun 	/* attach on current process as enabled */
231*4882a593Smuzhiyun 	_T(attach__current_enabled,  detach__disable)
232*4882a593Smuzhiyun 	/* attach on current process as disabled */
233*4882a593Smuzhiyun 	_T(attach__current_disabled, detach__disable)
234*4882a593Smuzhiyun 	/* attach on cpu as disabled */
235*4882a593Smuzhiyun 	_T(attach__cpu_disabled,     detach__disable)
236*4882a593Smuzhiyun 	/* attach on cpu as enabled */
237*4882a593Smuzhiyun 	_T(attach__cpu_enabled,      detach__disable)
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #undef _T
240*4882a593Smuzhiyun 	return ret;
241*4882a593Smuzhiyun }
242