xref: /OK3568_Linux_fs/kernel/tools/perf/tests/sw-clock.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <errno.h>
3*4882a593Smuzhiyun #include <inttypes.h>
4*4882a593Smuzhiyun #include <unistd.h>
5*4882a593Smuzhiyun #include <stdlib.h>
6*4882a593Smuzhiyun #include <signal.h>
7*4882a593Smuzhiyun #include <sys/mman.h>
8*4882a593Smuzhiyun #include <linux/string.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "tests.h"
11*4882a593Smuzhiyun #include "util/debug.h"
12*4882a593Smuzhiyun #include "util/evsel.h"
13*4882a593Smuzhiyun #include "util/evlist.h"
14*4882a593Smuzhiyun #include "util/cpumap.h"
15*4882a593Smuzhiyun #include "util/mmap.h"
16*4882a593Smuzhiyun #include "util/thread_map.h"
17*4882a593Smuzhiyun #include <perf/evlist.h>
18*4882a593Smuzhiyun #include <perf/mmap.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define NR_LOOPS  10000000
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * This test will open software clock events (cpu-clock, task-clock)
24*4882a593Smuzhiyun  * then check their frequency -> period conversion has no artifact of
25*4882a593Smuzhiyun  * setting period to 1 forcefully.
26*4882a593Smuzhiyun  */
__test__sw_clock_freq(enum perf_sw_ids clock_id)27*4882a593Smuzhiyun static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	int i, err = -1;
30*4882a593Smuzhiyun 	volatile int tmp = 0;
31*4882a593Smuzhiyun 	u64 total_periods = 0;
32*4882a593Smuzhiyun 	int nr_samples = 0;
33*4882a593Smuzhiyun 	char sbuf[STRERR_BUFSIZE];
34*4882a593Smuzhiyun 	union perf_event *event;
35*4882a593Smuzhiyun 	struct evsel *evsel;
36*4882a593Smuzhiyun 	struct evlist *evlist;
37*4882a593Smuzhiyun 	struct perf_event_attr attr = {
38*4882a593Smuzhiyun 		.type = PERF_TYPE_SOFTWARE,
39*4882a593Smuzhiyun 		.config = clock_id,
40*4882a593Smuzhiyun 		.sample_type = PERF_SAMPLE_PERIOD,
41*4882a593Smuzhiyun 		.exclude_kernel = 1,
42*4882a593Smuzhiyun 		.disabled = 1,
43*4882a593Smuzhiyun 		.freq = 1,
44*4882a593Smuzhiyun 	};
45*4882a593Smuzhiyun 	struct perf_cpu_map *cpus;
46*4882a593Smuzhiyun 	struct perf_thread_map *threads;
47*4882a593Smuzhiyun 	struct mmap *md;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	attr.sample_freq = 500;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	evlist = evlist__new();
52*4882a593Smuzhiyun 	if (evlist == NULL) {
53*4882a593Smuzhiyun 		pr_debug("evlist__new\n");
54*4882a593Smuzhiyun 		return -1;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	evsel = evsel__new(&attr);
58*4882a593Smuzhiyun 	if (evsel == NULL) {
59*4882a593Smuzhiyun 		pr_debug("evsel__new\n");
60*4882a593Smuzhiyun 		goto out_delete_evlist;
61*4882a593Smuzhiyun 	}
62*4882a593Smuzhiyun 	evlist__add(evlist, evsel);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	cpus = perf_cpu_map__dummy_new();
65*4882a593Smuzhiyun 	threads = thread_map__new_by_tid(getpid());
66*4882a593Smuzhiyun 	if (!cpus || !threads) {
67*4882a593Smuzhiyun 		err = -ENOMEM;
68*4882a593Smuzhiyun 		pr_debug("Not enough memory to create thread/cpu maps\n");
69*4882a593Smuzhiyun 		goto out_free_maps;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	perf_evlist__set_maps(&evlist->core, cpus, threads);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	cpus	= NULL;
75*4882a593Smuzhiyun 	threads = NULL;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	if (evlist__open(evlist)) {
78*4882a593Smuzhiyun 		const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		err = -errno;
81*4882a593Smuzhiyun 		pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
82*4882a593Smuzhiyun 			 str_error_r(errno, sbuf, sizeof(sbuf)),
83*4882a593Smuzhiyun 			 knob, (u64)attr.sample_freq);
84*4882a593Smuzhiyun 		goto out_delete_evlist;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	err = evlist__mmap(evlist, 128);
88*4882a593Smuzhiyun 	if (err < 0) {
89*4882a593Smuzhiyun 		pr_debug("failed to mmap event: %d (%s)\n", errno,
90*4882a593Smuzhiyun 			 str_error_r(errno, sbuf, sizeof(sbuf)));
91*4882a593Smuzhiyun 		goto out_delete_evlist;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	evlist__enable(evlist);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* collect samples */
97*4882a593Smuzhiyun 	for (i = 0; i < NR_LOOPS; i++)
98*4882a593Smuzhiyun 		tmp++;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	evlist__disable(evlist);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	md = &evlist->mmap[0];
103*4882a593Smuzhiyun 	if (perf_mmap__read_init(&md->core) < 0)
104*4882a593Smuzhiyun 		goto out_init;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	while ((event = perf_mmap__read_event(&md->core)) != NULL) {
107*4882a593Smuzhiyun 		struct perf_sample sample;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		if (event->header.type != PERF_RECORD_SAMPLE)
110*4882a593Smuzhiyun 			goto next_event;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		err = perf_evlist__parse_sample(evlist, event, &sample);
113*4882a593Smuzhiyun 		if (err < 0) {
114*4882a593Smuzhiyun 			pr_debug("Error during parse sample\n");
115*4882a593Smuzhiyun 			goto out_delete_evlist;
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		total_periods += sample.period;
119*4882a593Smuzhiyun 		nr_samples++;
120*4882a593Smuzhiyun next_event:
121*4882a593Smuzhiyun 		perf_mmap__consume(&md->core);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	perf_mmap__read_done(&md->core);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun out_init:
126*4882a593Smuzhiyun 	if ((u64) nr_samples == total_periods) {
127*4882a593Smuzhiyun 		pr_debug("All (%d) samples have period value of 1!\n",
128*4882a593Smuzhiyun 			 nr_samples);
129*4882a593Smuzhiyun 		err = -1;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun out_free_maps:
133*4882a593Smuzhiyun 	perf_cpu_map__put(cpus);
134*4882a593Smuzhiyun 	perf_thread_map__put(threads);
135*4882a593Smuzhiyun out_delete_evlist:
136*4882a593Smuzhiyun 	evlist__delete(evlist);
137*4882a593Smuzhiyun 	return err;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
test__sw_clock_freq(struct test * test __maybe_unused,int subtest __maybe_unused)140*4882a593Smuzhiyun int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	int ret;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
145*4882a593Smuzhiyun 	if (!ret)
146*4882a593Smuzhiyun 		ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return ret;
149*4882a593Smuzhiyun }
150