xref: /OK3568_Linux_fs/kernel/tools/perf/tests/openat-syscall-all-cpus.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <errno.h>
3*4882a593Smuzhiyun #include <inttypes.h>
4*4882a593Smuzhiyun /* For the CPU_* macros */
5*4882a593Smuzhiyun #include <pthread.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <sys/types.h>
8*4882a593Smuzhiyun #include <sys/stat.h>
9*4882a593Smuzhiyun #include <fcntl.h>
10*4882a593Smuzhiyun #include <api/fs/fs.h>
11*4882a593Smuzhiyun #include <linux/err.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <api/fs/tracing_path.h>
14*4882a593Smuzhiyun #include "evsel.h"
15*4882a593Smuzhiyun #include "tests.h"
16*4882a593Smuzhiyun #include "thread_map.h"
17*4882a593Smuzhiyun #include <perf/cpumap.h>
18*4882a593Smuzhiyun #include <internal/cpumap.h>
19*4882a593Smuzhiyun #include "debug.h"
20*4882a593Smuzhiyun #include "stat.h"
21*4882a593Smuzhiyun #include "util/counts.h"
22*4882a593Smuzhiyun 
test__openat_syscall_event_on_all_cpus(struct test * test __maybe_unused,int subtest __maybe_unused)23*4882a593Smuzhiyun int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	int err = -1, fd, cpu;
26*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
27*4882a593Smuzhiyun 	struct evsel *evsel;
28*4882a593Smuzhiyun 	unsigned int nr_openat_calls = 111, i;
29*4882a593Smuzhiyun 	cpu_set_t cpu_set;
30*4882a593Smuzhiyun 	struct perf_thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
31*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE];
32*4882a593Smuzhiyun 	char errbuf[BUFSIZ];
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	if (threads == NULL) {
35*4882a593Smuzhiyun 		pr_debug("thread_map__new\n");
36*4882a593Smuzhiyun 		return -1;
37*4882a593Smuzhiyun 	}
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	cpus = perf_cpu_map__new(NULL);
40*4882a593Smuzhiyun 	if (cpus == NULL) {
41*4882a593Smuzhiyun 		pr_debug("perf_cpu_map__new\n");
42*4882a593Smuzhiyun 		goto out_thread_map_delete;
43*4882a593Smuzhiyun 	}
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	CPU_ZERO(&cpu_set);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	evsel = evsel__newtp("syscalls", "sys_enter_openat");
48*4882a593Smuzhiyun 	if (IS_ERR(evsel)) {
49*4882a593Smuzhiyun 		tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
50*4882a593Smuzhiyun 		pr_debug("%s\n", errbuf);
51*4882a593Smuzhiyun 		goto out_cpu_map_delete;
52*4882a593Smuzhiyun 	}
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (evsel__open(evsel, cpus, threads) < 0) {
55*4882a593Smuzhiyun 		pr_debug("failed to open counter: %s, "
56*4882a593Smuzhiyun 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
57*4882a593Smuzhiyun 			 str_error_r(errno, sbuf, sizeof(sbuf)));
58*4882a593Smuzhiyun 		goto out_evsel_delete;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
62*4882a593Smuzhiyun 		unsigned int ncalls = nr_openat_calls + cpu;
63*4882a593Smuzhiyun 		/*
64*4882a593Smuzhiyun 		 * XXX eventually lift this restriction in a way that
65*4882a593Smuzhiyun 		 * keeps perf building on older glibc installations
66*4882a593Smuzhiyun 		 * without CPU_ALLOC. 1024 cpus in 2010 still seems
67*4882a593Smuzhiyun 		 * a reasonable upper limit tho :-)
68*4882a593Smuzhiyun 		 */
69*4882a593Smuzhiyun 		if (cpus->map[cpu] >= CPU_SETSIZE) {
70*4882a593Smuzhiyun 			pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
71*4882a593Smuzhiyun 			continue;
72*4882a593Smuzhiyun 		}
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		CPU_SET(cpus->map[cpu], &cpu_set);
75*4882a593Smuzhiyun 		if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
76*4882a593Smuzhiyun 			pr_debug("sched_setaffinity() failed on CPU %d: %s ",
77*4882a593Smuzhiyun 				 cpus->map[cpu],
78*4882a593Smuzhiyun 				 str_error_r(errno, sbuf, sizeof(sbuf)));
79*4882a593Smuzhiyun 			goto out_close_fd;
80*4882a593Smuzhiyun 		}
81*4882a593Smuzhiyun 		for (i = 0; i < ncalls; ++i) {
82*4882a593Smuzhiyun 			fd = openat(0, "/etc/passwd", O_RDONLY);
83*4882a593Smuzhiyun 			close(fd);
84*4882a593Smuzhiyun 		}
85*4882a593Smuzhiyun 		CPU_CLR(cpus->map[cpu], &cpu_set);
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	/*
89*4882a593Smuzhiyun 	 * Here we need to explicitly preallocate the counts, as if
90*4882a593Smuzhiyun 	 * we use the auto allocation it will allocate just for 1 cpu,
91*4882a593Smuzhiyun 	 * as we start by cpu 0.
92*4882a593Smuzhiyun 	 */
93*4882a593Smuzhiyun 	if (evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
94*4882a593Smuzhiyun 		pr_debug("evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
95*4882a593Smuzhiyun 		goto out_close_fd;
96*4882a593Smuzhiyun 	}
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	err = 0;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	for (cpu = 0; cpu < cpus->nr; ++cpu) {
101*4882a593Smuzhiyun 		unsigned int expected;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		if (cpus->map[cpu] >= CPU_SETSIZE)
104*4882a593Smuzhiyun 			continue;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if (evsel__read_on_cpu(evsel, cpu, 0) < 0) {
107*4882a593Smuzhiyun 			pr_debug("evsel__read_on_cpu\n");
108*4882a593Smuzhiyun 			err = -1;
109*4882a593Smuzhiyun 			break;
110*4882a593Smuzhiyun 		}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		expected = nr_openat_calls + cpu;
113*4882a593Smuzhiyun 		if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
114*4882a593Smuzhiyun 			pr_debug("evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
115*4882a593Smuzhiyun 				 expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
116*4882a593Smuzhiyun 			err = -1;
117*4882a593Smuzhiyun 		}
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	evsel__free_counts(evsel);
121*4882a593Smuzhiyun out_close_fd:
122*4882a593Smuzhiyun 	perf_evsel__close_fd(&evsel->core);
123*4882a593Smuzhiyun out_evsel_delete:
124*4882a593Smuzhiyun 	evsel__delete(evsel);
125*4882a593Smuzhiyun out_cpu_map_delete:
126*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
127*4882a593Smuzhiyun out_thread_map_delete:
128*4882a593Smuzhiyun 	perf_thread_map__put(threads);
129*4882a593Smuzhiyun 	return err;
130*4882a593Smuzhiyun }
131