1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <errno.h>
3*4882a593Smuzhiyun #include <inttypes.h>
4*4882a593Smuzhiyun /* For the CLR_() macros */
5*4882a593Smuzhiyun #include <pthread.h>
6*4882a593Smuzhiyun #include <stdlib.h>
7*4882a593Smuzhiyun #include <perf/cpumap.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include "debug.h"
10*4882a593Smuzhiyun #include "evlist.h"
11*4882a593Smuzhiyun #include "evsel.h"
12*4882a593Smuzhiyun #include "thread_map.h"
13*4882a593Smuzhiyun #include "tests.h"
14*4882a593Smuzhiyun #include "util/mmap.h"
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun #include <perf/evlist.h>
19*4882a593Smuzhiyun #include <perf/mmap.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * This test will generate random numbers of calls to some getpid syscalls,
23*4882a593Smuzhiyun * then establish an mmap for a group of events that are created to monitor
24*4882a593Smuzhiyun * the syscalls.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * It will receive the events, using mmap, use its PERF_SAMPLE_ID generated
27*4882a593Smuzhiyun * sample.id field to map back to its respective perf_evsel instance.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Then it checks if the number of syscalls reported as perf events by
30*4882a593Smuzhiyun * the kernel corresponds to the number of syscalls made.
31*4882a593Smuzhiyun */
test__basic_mmap(struct test * test __maybe_unused,int subtest __maybe_unused)32*4882a593Smuzhiyun int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unused)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun int err = -1;
35*4882a593Smuzhiyun union perf_event *event;
36*4882a593Smuzhiyun struct perf_thread_map *threads;
37*4882a593Smuzhiyun struct perf_cpu_map *cpus;
38*4882a593Smuzhiyun struct evlist *evlist;
39*4882a593Smuzhiyun cpu_set_t cpu_set;
40*4882a593Smuzhiyun const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
41*4882a593Smuzhiyun pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
42*4882a593Smuzhiyun #define nsyscalls ARRAY_SIZE(syscall_names)
43*4882a593Smuzhiyun unsigned int nr_events[nsyscalls],
44*4882a593Smuzhiyun expected_nr_events[nsyscalls], i, j;
45*4882a593Smuzhiyun struct evsel *evsels[nsyscalls], *evsel;
46*4882a593Smuzhiyun char sbuf[STRERR_BUFSIZE];
47*4882a593Smuzhiyun struct mmap *md;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun threads = thread_map__new(-1, getpid(), UINT_MAX);
50*4882a593Smuzhiyun if (threads == NULL) {
51*4882a593Smuzhiyun pr_debug("thread_map__new\n");
52*4882a593Smuzhiyun return -1;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun cpus = perf_cpu_map__new(NULL);
56*4882a593Smuzhiyun if (cpus == NULL) {
57*4882a593Smuzhiyun pr_debug("perf_cpu_map__new\n");
58*4882a593Smuzhiyun goto out_free_threads;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun CPU_ZERO(&cpu_set);
62*4882a593Smuzhiyun CPU_SET(cpus->map[0], &cpu_set);
63*4882a593Smuzhiyun sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
64*4882a593Smuzhiyun if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
65*4882a593Smuzhiyun pr_debug("sched_setaffinity() failed on CPU %d: %s ",
66*4882a593Smuzhiyun cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf)));
67*4882a593Smuzhiyun goto out_free_cpus;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun evlist = evlist__new();
71*4882a593Smuzhiyun if (evlist == NULL) {
72*4882a593Smuzhiyun pr_debug("perf_evlist__new\n");
73*4882a593Smuzhiyun goto out_free_cpus;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun perf_evlist__set_maps(&evlist->core, cpus, threads);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun for (i = 0; i < nsyscalls; ++i) {
79*4882a593Smuzhiyun char name[64];
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]);
82*4882a593Smuzhiyun evsels[i] = evsel__newtp("syscalls", name);
83*4882a593Smuzhiyun if (IS_ERR(evsels[i])) {
84*4882a593Smuzhiyun pr_debug("evsel__new(%s)\n", name);
85*4882a593Smuzhiyun goto out_delete_evlist;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun evsels[i]->core.attr.wakeup_events = 1;
89*4882a593Smuzhiyun evsel__set_sample_id(evsels[i], false);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun evlist__add(evlist, evsels[i]);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (evsel__open(evsels[i], cpus, threads) < 0) {
94*4882a593Smuzhiyun pr_debug("failed to open counter: %s, "
95*4882a593Smuzhiyun "tweak /proc/sys/kernel/perf_event_paranoid?\n",
96*4882a593Smuzhiyun str_error_r(errno, sbuf, sizeof(sbuf)));
97*4882a593Smuzhiyun goto out_delete_evlist;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun nr_events[i] = 0;
101*4882a593Smuzhiyun expected_nr_events[i] = 1 + rand() % 127;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (evlist__mmap(evlist, 128) < 0) {
105*4882a593Smuzhiyun pr_debug("failed to mmap events: %d (%s)\n", errno,
106*4882a593Smuzhiyun str_error_r(errno, sbuf, sizeof(sbuf)));
107*4882a593Smuzhiyun goto out_delete_evlist;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun for (i = 0; i < nsyscalls; ++i)
111*4882a593Smuzhiyun for (j = 0; j < expected_nr_events[i]; ++j) {
112*4882a593Smuzhiyun int foo = syscalls[i]();
113*4882a593Smuzhiyun ++foo;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun md = &evlist->mmap[0];
117*4882a593Smuzhiyun if (perf_mmap__read_init(&md->core) < 0)
118*4882a593Smuzhiyun goto out_init;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun while ((event = perf_mmap__read_event(&md->core)) != NULL) {
121*4882a593Smuzhiyun struct perf_sample sample;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (event->header.type != PERF_RECORD_SAMPLE) {
124*4882a593Smuzhiyun pr_debug("unexpected %s event\n",
125*4882a593Smuzhiyun perf_event__name(event->header.type));
126*4882a593Smuzhiyun goto out_delete_evlist;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun err = perf_evlist__parse_sample(evlist, event, &sample);
130*4882a593Smuzhiyun if (err) {
131*4882a593Smuzhiyun pr_err("Can't parse sample, err = %d\n", err);
132*4882a593Smuzhiyun goto out_delete_evlist;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun err = -1;
136*4882a593Smuzhiyun evsel = perf_evlist__id2evsel(evlist, sample.id);
137*4882a593Smuzhiyun if (evsel == NULL) {
138*4882a593Smuzhiyun pr_debug("event with id %" PRIu64
139*4882a593Smuzhiyun " doesn't map to an evsel\n", sample.id);
140*4882a593Smuzhiyun goto out_delete_evlist;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun nr_events[evsel->idx]++;
143*4882a593Smuzhiyun perf_mmap__consume(&md->core);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun perf_mmap__read_done(&md->core);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun out_init:
148*4882a593Smuzhiyun err = 0;
149*4882a593Smuzhiyun evlist__for_each_entry(evlist, evsel) {
150*4882a593Smuzhiyun if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
151*4882a593Smuzhiyun pr_debug("expected %d %s events, got %d\n",
152*4882a593Smuzhiyun expected_nr_events[evsel->idx],
153*4882a593Smuzhiyun evsel__name(evsel), nr_events[evsel->idx]);
154*4882a593Smuzhiyun err = -1;
155*4882a593Smuzhiyun goto out_delete_evlist;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun out_delete_evlist:
160*4882a593Smuzhiyun evlist__delete(evlist);
161*4882a593Smuzhiyun cpus = NULL;
162*4882a593Smuzhiyun threads = NULL;
163*4882a593Smuzhiyun out_free_cpus:
164*4882a593Smuzhiyun perf_cpu_map__put(cpus);
165*4882a593Smuzhiyun out_free_threads:
166*4882a593Smuzhiyun perf_thread_map__put(threads);
167*4882a593Smuzhiyun return err;
168*4882a593Smuzhiyun }
169