1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include "debug.h"
3*4882a593Smuzhiyun #include "evlist.h"
4*4882a593Smuzhiyun #include "evsel.h"
5*4882a593Smuzhiyun #include "target.h"
6*4882a593Smuzhiyun #include "thread_map.h"
7*4882a593Smuzhiyun #include "tests.h"
8*4882a593Smuzhiyun #include "util/mmap.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <errno.h>
11*4882a593Smuzhiyun #include <signal.h>
12*4882a593Smuzhiyun #include <linux/string.h>
13*4882a593Smuzhiyun #include <perf/cpumap.h>
14*4882a593Smuzhiyun #include <perf/evlist.h>
15*4882a593Smuzhiyun #include <perf/mmap.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun static int exited;
18*4882a593Smuzhiyun static int nr_exit;
19*4882a593Smuzhiyun
sig_handler(int sig __maybe_unused)20*4882a593Smuzhiyun static void sig_handler(int sig __maybe_unused)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun exited = 1;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
27*4882a593Smuzhiyun * we asked by setting its exec_error to this handler.
28*4882a593Smuzhiyun */
workload_exec_failed_signal(int signo __maybe_unused,siginfo_t * info __maybe_unused,void * ucontext __maybe_unused)29*4882a593Smuzhiyun static void workload_exec_failed_signal(int signo __maybe_unused,
30*4882a593Smuzhiyun siginfo_t *info __maybe_unused,
31*4882a593Smuzhiyun void *ucontext __maybe_unused)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun exited = 1;
34*4882a593Smuzhiyun nr_exit = -1;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun * This test will start a workload that does nothing then it checks
39*4882a593Smuzhiyun * if the number of exit event reported by the kernel is 1 or not
40*4882a593Smuzhiyun * in order to check the kernel returns correct number of event.
41*4882a593Smuzhiyun */
test__task_exit(struct test * test __maybe_unused,int subtest __maybe_unused)42*4882a593Smuzhiyun int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int err = -1;
45*4882a593Smuzhiyun union perf_event *event;
46*4882a593Smuzhiyun struct evsel *evsel;
47*4882a593Smuzhiyun struct evlist *evlist;
48*4882a593Smuzhiyun struct target target = {
49*4882a593Smuzhiyun .uid = UINT_MAX,
50*4882a593Smuzhiyun .uses_mmap = true,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun const char *argv[] = { "true", NULL };
53*4882a593Smuzhiyun char sbuf[STRERR_BUFSIZE];
54*4882a593Smuzhiyun struct perf_cpu_map *cpus;
55*4882a593Smuzhiyun struct perf_thread_map *threads;
56*4882a593Smuzhiyun struct mmap *md;
57*4882a593Smuzhiyun int retry_count = 0;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun signal(SIGCHLD, sig_handler);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun evlist = perf_evlist__new_default();
62*4882a593Smuzhiyun if (evlist == NULL) {
63*4882a593Smuzhiyun pr_debug("perf_evlist__new_default\n");
64*4882a593Smuzhiyun return -1;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Create maps of threads and cpus to monitor. In this case
69*4882a593Smuzhiyun * we start with all threads and cpus (-1, -1) but then in
70*4882a593Smuzhiyun * perf_evlist__prepare_workload we'll fill in the only thread
71*4882a593Smuzhiyun * we're monitoring, the one forked there.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun cpus = perf_cpu_map__dummy_new();
74*4882a593Smuzhiyun threads = thread_map__new_by_tid(-1);
75*4882a593Smuzhiyun if (!cpus || !threads) {
76*4882a593Smuzhiyun err = -ENOMEM;
77*4882a593Smuzhiyun pr_debug("Not enough memory to create thread/cpu maps\n");
78*4882a593Smuzhiyun goto out_free_maps;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun perf_evlist__set_maps(&evlist->core, cpus, threads);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun cpus = NULL;
84*4882a593Smuzhiyun threads = NULL;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun err = perf_evlist__prepare_workload(evlist, &target, argv, false,
87*4882a593Smuzhiyun workload_exec_failed_signal);
88*4882a593Smuzhiyun if (err < 0) {
89*4882a593Smuzhiyun pr_debug("Couldn't run the workload!\n");
90*4882a593Smuzhiyun goto out_delete_evlist;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun evsel = evlist__first(evlist);
94*4882a593Smuzhiyun evsel->core.attr.task = 1;
95*4882a593Smuzhiyun #ifdef __s390x__
96*4882a593Smuzhiyun evsel->core.attr.sample_freq = 1000000;
97*4882a593Smuzhiyun #else
98*4882a593Smuzhiyun evsel->core.attr.sample_freq = 1;
99*4882a593Smuzhiyun #endif
100*4882a593Smuzhiyun evsel->core.attr.inherit = 0;
101*4882a593Smuzhiyun evsel->core.attr.watermark = 0;
102*4882a593Smuzhiyun evsel->core.attr.wakeup_events = 1;
103*4882a593Smuzhiyun evsel->core.attr.exclude_kernel = 1;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun err = evlist__open(evlist);
106*4882a593Smuzhiyun if (err < 0) {
107*4882a593Smuzhiyun pr_debug("Couldn't open the evlist: %s\n",
108*4882a593Smuzhiyun str_error_r(-err, sbuf, sizeof(sbuf)));
109*4882a593Smuzhiyun goto out_delete_evlist;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (evlist__mmap(evlist, 128) < 0) {
113*4882a593Smuzhiyun pr_debug("failed to mmap events: %d (%s)\n", errno,
114*4882a593Smuzhiyun str_error_r(errno, sbuf, sizeof(sbuf)));
115*4882a593Smuzhiyun err = -1;
116*4882a593Smuzhiyun goto out_delete_evlist;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun perf_evlist__start_workload(evlist);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun retry:
122*4882a593Smuzhiyun md = &evlist->mmap[0];
123*4882a593Smuzhiyun if (perf_mmap__read_init(&md->core) < 0)
124*4882a593Smuzhiyun goto out_init;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun while ((event = perf_mmap__read_event(&md->core)) != NULL) {
127*4882a593Smuzhiyun if (event->header.type == PERF_RECORD_EXIT)
128*4882a593Smuzhiyun nr_exit++;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun perf_mmap__consume(&md->core);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun perf_mmap__read_done(&md->core);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun out_init:
135*4882a593Smuzhiyun if (!exited || !nr_exit) {
136*4882a593Smuzhiyun evlist__poll(evlist, -1);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (retry_count++ > 1000) {
139*4882a593Smuzhiyun pr_debug("Failed after retrying 1000 times\n");
140*4882a593Smuzhiyun err = -1;
141*4882a593Smuzhiyun goto out_free_maps;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun goto retry;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (nr_exit != 1) {
148*4882a593Smuzhiyun pr_debug("received %d EXIT records\n", nr_exit);
149*4882a593Smuzhiyun err = -1;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun out_free_maps:
153*4882a593Smuzhiyun perf_cpu_map__put(cpus);
154*4882a593Smuzhiyun perf_thread_map__put(threads);
155*4882a593Smuzhiyun out_delete_evlist:
156*4882a593Smuzhiyun evlist__delete(evlist);
157*4882a593Smuzhiyun return err;
158*4882a593Smuzhiyun }
159