xref: /OK3568_Linux_fs/kernel/tools/lib/perf/mmap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <sys/mman.h>
3*4882a593Smuzhiyun #include <inttypes.h>
4*4882a593Smuzhiyun #include <asm/bug.h>
5*4882a593Smuzhiyun #include <errno.h>
6*4882a593Smuzhiyun #include <string.h>
7*4882a593Smuzhiyun #include <linux/ring_buffer.h>
8*4882a593Smuzhiyun #include <linux/perf_event.h>
9*4882a593Smuzhiyun #include <perf/mmap.h>
10*4882a593Smuzhiyun #include <perf/event.h>
11*4882a593Smuzhiyun #include <internal/mmap.h>
12*4882a593Smuzhiyun #include <internal/lib.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include "internal.h"
15*4882a593Smuzhiyun 
perf_mmap__init(struct perf_mmap * map,struct perf_mmap * prev,bool overwrite,libperf_unmap_cb_t unmap_cb)16*4882a593Smuzhiyun void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
17*4882a593Smuzhiyun 		     bool overwrite, libperf_unmap_cb_t unmap_cb)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	map->fd = -1;
20*4882a593Smuzhiyun 	map->overwrite = overwrite;
21*4882a593Smuzhiyun 	map->unmap_cb  = unmap_cb;
22*4882a593Smuzhiyun 	refcount_set(&map->refcnt, 0);
23*4882a593Smuzhiyun 	if (prev)
24*4882a593Smuzhiyun 		prev->next = map;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun 
perf_mmap__mmap_len(struct perf_mmap * map)27*4882a593Smuzhiyun size_t perf_mmap__mmap_len(struct perf_mmap *map)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	return map->mask + 1 + page_size;
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
perf_mmap__mmap(struct perf_mmap * map,struct perf_mmap_param * mp,int fd,int cpu)32*4882a593Smuzhiyun int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
33*4882a593Smuzhiyun 		    int fd, int cpu)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	map->prev = 0;
36*4882a593Smuzhiyun 	map->mask = mp->mask;
37*4882a593Smuzhiyun 	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
38*4882a593Smuzhiyun 			 MAP_SHARED, fd, 0);
39*4882a593Smuzhiyun 	if (map->base == MAP_FAILED) {
40*4882a593Smuzhiyun 		map->base = NULL;
41*4882a593Smuzhiyun 		return -1;
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	map->fd  = fd;
45*4882a593Smuzhiyun 	map->cpu = cpu;
46*4882a593Smuzhiyun 	return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
perf_mmap__munmap(struct perf_mmap * map)49*4882a593Smuzhiyun void perf_mmap__munmap(struct perf_mmap *map)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	if (map && map->base != NULL) {
52*4882a593Smuzhiyun 		munmap(map->base, perf_mmap__mmap_len(map));
53*4882a593Smuzhiyun 		map->base = NULL;
54*4882a593Smuzhiyun 		map->fd = -1;
55*4882a593Smuzhiyun 		refcount_set(&map->refcnt, 0);
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 	if (map && map->unmap_cb)
58*4882a593Smuzhiyun 		map->unmap_cb(map);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
perf_mmap__get(struct perf_mmap * map)61*4882a593Smuzhiyun void perf_mmap__get(struct perf_mmap *map)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	refcount_inc(&map->refcnt);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
perf_mmap__put(struct perf_mmap * map)66*4882a593Smuzhiyun void perf_mmap__put(struct perf_mmap *map)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	BUG_ON(map->base && refcount_read(&map->refcnt) == 0);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	if (refcount_dec_and_test(&map->refcnt))
71*4882a593Smuzhiyun 		perf_mmap__munmap(map);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
perf_mmap__write_tail(struct perf_mmap * md,u64 tail)74*4882a593Smuzhiyun static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	ring_buffer_write_tail(md->base, tail);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
perf_mmap__read_head(struct perf_mmap * map)79*4882a593Smuzhiyun u64 perf_mmap__read_head(struct perf_mmap *map)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	return ring_buffer_read_head(map->base);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
perf_mmap__empty(struct perf_mmap * map)84*4882a593Smuzhiyun static bool perf_mmap__empty(struct perf_mmap *map)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct perf_event_mmap_page *pc = map->base;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return perf_mmap__read_head(map) == map->prev && !pc->aux_size;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
perf_mmap__consume(struct perf_mmap * map)91*4882a593Smuzhiyun void perf_mmap__consume(struct perf_mmap *map)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	if (!map->overwrite) {
94*4882a593Smuzhiyun 		u64 old = map->prev;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		perf_mmap__write_tail(map, old);
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
100*4882a593Smuzhiyun 		perf_mmap__put(map);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
overwrite_rb_find_range(void * buf,int mask,u64 * start,u64 * end)103*4882a593Smuzhiyun static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct perf_event_header *pheader;
106*4882a593Smuzhiyun 	u64 evt_head = *start;
107*4882a593Smuzhiyun 	int size = mask + 1;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
110*4882a593Smuzhiyun 	pheader = (struct perf_event_header *)(buf + (*start & mask));
111*4882a593Smuzhiyun 	while (true) {
112*4882a593Smuzhiyun 		if (evt_head - *start >= (unsigned int)size) {
113*4882a593Smuzhiyun 			pr_debug("Finished reading overwrite ring buffer: rewind\n");
114*4882a593Smuzhiyun 			if (evt_head - *start > (unsigned int)size)
115*4882a593Smuzhiyun 				evt_head -= pheader->size;
116*4882a593Smuzhiyun 			*end = evt_head;
117*4882a593Smuzhiyun 			return 0;
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		pheader = (struct perf_event_header *)(buf + (evt_head & mask));
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 		if (pheader->size == 0) {
123*4882a593Smuzhiyun 			pr_debug("Finished reading overwrite ring buffer: get start\n");
124*4882a593Smuzhiyun 			*end = evt_head;
125*4882a593Smuzhiyun 			return 0;
126*4882a593Smuzhiyun 		}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		evt_head += pheader->size;
129*4882a593Smuzhiyun 		pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	WARN_ONCE(1, "Shouldn't get here\n");
132*4882a593Smuzhiyun 	return -1;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * Report the start and end of the available data in ringbuffer
137*4882a593Smuzhiyun  */
__perf_mmap__read_init(struct perf_mmap * md)138*4882a593Smuzhiyun static int __perf_mmap__read_init(struct perf_mmap *md)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	u64 head = perf_mmap__read_head(md);
141*4882a593Smuzhiyun 	u64 old = md->prev;
142*4882a593Smuzhiyun 	unsigned char *data = md->base + page_size;
143*4882a593Smuzhiyun 	unsigned long size;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	md->start = md->overwrite ? head : old;
146*4882a593Smuzhiyun 	md->end = md->overwrite ? old : head;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if ((md->end - md->start) < md->flush)
149*4882a593Smuzhiyun 		return -EAGAIN;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	size = md->end - md->start;
152*4882a593Smuzhiyun 	if (size > (unsigned long)(md->mask) + 1) {
153*4882a593Smuzhiyun 		if (!md->overwrite) {
154*4882a593Smuzhiyun 			WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 			md->prev = head;
157*4882a593Smuzhiyun 			perf_mmap__consume(md);
158*4882a593Smuzhiyun 			return -EAGAIN;
159*4882a593Smuzhiyun 		}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		/*
162*4882a593Smuzhiyun 		 * Backward ring buffer is full. We still have a chance to read
163*4882a593Smuzhiyun 		 * most of data from it.
164*4882a593Smuzhiyun 		 */
165*4882a593Smuzhiyun 		if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
166*4882a593Smuzhiyun 			return -EINVAL;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
perf_mmap__read_init(struct perf_mmap * map)172*4882a593Smuzhiyun int perf_mmap__read_init(struct perf_mmap *map)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	/*
175*4882a593Smuzhiyun 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	if (!refcount_read(&map->refcnt))
178*4882a593Smuzhiyun 		return -ENOENT;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return __perf_mmap__read_init(map);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /*
184*4882a593Smuzhiyun  * Mandatory for overwrite mode
185*4882a593Smuzhiyun  * The direction of overwrite mode is backward.
186*4882a593Smuzhiyun  * The last perf_mmap__read() will set tail to map->core.prev.
187*4882a593Smuzhiyun  * Need to correct the map->core.prev to head which is the end of next read.
188*4882a593Smuzhiyun  */
perf_mmap__read_done(struct perf_mmap * map)189*4882a593Smuzhiyun void perf_mmap__read_done(struct perf_mmap *map)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	/*
192*4882a593Smuzhiyun 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
193*4882a593Smuzhiyun 	 */
194*4882a593Smuzhiyun 	if (!refcount_read(&map->refcnt))
195*4882a593Smuzhiyun 		return;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	map->prev = perf_mmap__read_head(map);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* When check_messup is true, 'end' must points to a good entry */
perf_mmap__read(struct perf_mmap * map,u64 * startp,u64 end)201*4882a593Smuzhiyun static union perf_event *perf_mmap__read(struct perf_mmap *map,
202*4882a593Smuzhiyun 					 u64 *startp, u64 end)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	unsigned char *data = map->base + page_size;
205*4882a593Smuzhiyun 	union perf_event *event = NULL;
206*4882a593Smuzhiyun 	int diff = end - *startp;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (diff >= (int)sizeof(event->header)) {
209*4882a593Smuzhiyun 		size_t size;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 		event = (union perf_event *)&data[*startp & map->mask];
212*4882a593Smuzhiyun 		size = event->header.size;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		if (size < sizeof(event->header) || diff < (int)size)
215*4882a593Smuzhiyun 			return NULL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		/*
218*4882a593Smuzhiyun 		 * Event straddles the mmap boundary -- header should always
219*4882a593Smuzhiyun 		 * be inside due to u64 alignment of output.
220*4882a593Smuzhiyun 		 */
221*4882a593Smuzhiyun 		if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) {
222*4882a593Smuzhiyun 			unsigned int offset = *startp;
223*4882a593Smuzhiyun 			unsigned int len = min(sizeof(*event), size), cpy;
224*4882a593Smuzhiyun 			void *dst = map->event_copy;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 			do {
227*4882a593Smuzhiyun 				cpy = min(map->mask + 1 - (offset & map->mask), len);
228*4882a593Smuzhiyun 				memcpy(dst, &data[offset & map->mask], cpy);
229*4882a593Smuzhiyun 				offset += cpy;
230*4882a593Smuzhiyun 				dst += cpy;
231*4882a593Smuzhiyun 				len -= cpy;
232*4882a593Smuzhiyun 			} while (len);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			event = (union perf_event *)map->event_copy;
235*4882a593Smuzhiyun 		}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		*startp += size;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return event;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Read event from ring buffer one by one.
245*4882a593Smuzhiyun  * Return one event for each call.
246*4882a593Smuzhiyun  *
247*4882a593Smuzhiyun  * Usage:
248*4882a593Smuzhiyun  * perf_mmap__read_init()
249*4882a593Smuzhiyun  * while(event = perf_mmap__read_event()) {
250*4882a593Smuzhiyun  *	//process the event
251*4882a593Smuzhiyun  *	perf_mmap__consume()
252*4882a593Smuzhiyun  * }
253*4882a593Smuzhiyun  * perf_mmap__read_done()
254*4882a593Smuzhiyun  */
perf_mmap__read_event(struct perf_mmap * map)255*4882a593Smuzhiyun union perf_event *perf_mmap__read_event(struct perf_mmap *map)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	union perf_event *event;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/*
260*4882a593Smuzhiyun 	 * Check if event was unmapped due to a POLLHUP/POLLERR.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	if (!refcount_read(&map->refcnt))
263*4882a593Smuzhiyun 		return NULL;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* non-overwirte doesn't pause the ringbuffer */
266*4882a593Smuzhiyun 	if (!map->overwrite)
267*4882a593Smuzhiyun 		map->end = perf_mmap__read_head(map);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	event = perf_mmap__read(map, &map->start, map->end);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (!map->overwrite)
272*4882a593Smuzhiyun 		map->prev = map->start;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return event;
275*4882a593Smuzhiyun }
276