1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6*4882a593Smuzhiyun * copyright notes.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <sys/mman.h>
10*4882a593Smuzhiyun #include <inttypes.h>
11*4882a593Smuzhiyun #include <asm/bug.h>
12*4882a593Smuzhiyun #include <linux/zalloc.h>
13*4882a593Smuzhiyun #include <stdlib.h>
14*4882a593Smuzhiyun #include <string.h>
15*4882a593Smuzhiyun #include <unistd.h> // sysconf()
16*4882a593Smuzhiyun #include <perf/mmap.h>
17*4882a593Smuzhiyun #ifdef HAVE_LIBNUMA_SUPPORT
18*4882a593Smuzhiyun #include <numaif.h>
19*4882a593Smuzhiyun #endif
20*4882a593Smuzhiyun #include "cpumap.h"
21*4882a593Smuzhiyun #include "debug.h"
22*4882a593Smuzhiyun #include "event.h"
23*4882a593Smuzhiyun #include "mmap.h"
24*4882a593Smuzhiyun #include "../perf.h"
25*4882a593Smuzhiyun #include <internal/lib.h> /* page_size */
26*4882a593Smuzhiyun #include <linux/bitmap.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define MASK_SIZE 1023
mmap_cpu_mask__scnprintf(struct mmap_cpu_mask * mask,const char * tag)29*4882a593Smuzhiyun void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun char buf[MASK_SIZE + 1];
32*4882a593Smuzhiyun size_t len;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
35*4882a593Smuzhiyun buf[len] = '\0';
36*4882a593Smuzhiyun pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
mmap__mmap_len(struct mmap * map)39*4882a593Smuzhiyun size_t mmap__mmap_len(struct mmap *map)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun return perf_mmap__mmap_len(&map->core);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
auxtrace_mmap__mmap(struct auxtrace_mmap * mm __maybe_unused,struct auxtrace_mmap_params * mp __maybe_unused,void * userpg __maybe_unused,int fd __maybe_unused)44*4882a593Smuzhiyun int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
45*4882a593Smuzhiyun struct auxtrace_mmap_params *mp __maybe_unused,
46*4882a593Smuzhiyun void *userpg __maybe_unused,
47*4882a593Smuzhiyun int fd __maybe_unused)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
auxtrace_mmap__munmap(struct auxtrace_mmap * mm __maybe_unused)52*4882a593Smuzhiyun void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
auxtrace_mmap_params__init(struct auxtrace_mmap_params * mp __maybe_unused,off_t auxtrace_offset __maybe_unused,unsigned int auxtrace_pages __maybe_unused,bool auxtrace_overwrite __maybe_unused)56*4882a593Smuzhiyun void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
57*4882a593Smuzhiyun off_t auxtrace_offset __maybe_unused,
58*4882a593Smuzhiyun unsigned int auxtrace_pages __maybe_unused,
59*4882a593Smuzhiyun bool auxtrace_overwrite __maybe_unused)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params * mp __maybe_unused,struct evlist * evlist __maybe_unused,int idx __maybe_unused,bool per_cpu __maybe_unused)63*4882a593Smuzhiyun void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
64*4882a593Smuzhiyun struct evlist *evlist __maybe_unused,
65*4882a593Smuzhiyun int idx __maybe_unused,
66*4882a593Smuzhiyun bool per_cpu __maybe_unused)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #ifdef HAVE_AIO_SUPPORT
perf_mmap__aio_enabled(struct mmap * map)71*4882a593Smuzhiyun static int perf_mmap__aio_enabled(struct mmap *map)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun return map->aio.nr_cblocks > 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #ifdef HAVE_LIBNUMA_SUPPORT
perf_mmap__aio_alloc(struct mmap * map,int idx)77*4882a593Smuzhiyun static int perf_mmap__aio_alloc(struct mmap *map, int idx)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
80*4882a593Smuzhiyun MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
81*4882a593Smuzhiyun if (map->aio.data[idx] == MAP_FAILED) {
82*4882a593Smuzhiyun map->aio.data[idx] = NULL;
83*4882a593Smuzhiyun return -1;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
perf_mmap__aio_free(struct mmap * map,int idx)89*4882a593Smuzhiyun static void perf_mmap__aio_free(struct mmap *map, int idx)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun if (map->aio.data[idx]) {
92*4882a593Smuzhiyun munmap(map->aio.data[idx], mmap__mmap_len(map));
93*4882a593Smuzhiyun map->aio.data[idx] = NULL;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
perf_mmap__aio_bind(struct mmap * map,int idx,int cpu,int affinity)97*4882a593Smuzhiyun static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun void *data;
100*4882a593Smuzhiyun size_t mmap_len;
101*4882a593Smuzhiyun unsigned long *node_mask;
102*4882a593Smuzhiyun unsigned long node_index;
103*4882a593Smuzhiyun int err = 0;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
106*4882a593Smuzhiyun data = map->aio.data[idx];
107*4882a593Smuzhiyun mmap_len = mmap__mmap_len(map);
108*4882a593Smuzhiyun node_index = cpu__get_node(cpu);
109*4882a593Smuzhiyun node_mask = bitmap_alloc(node_index + 1);
110*4882a593Smuzhiyun if (!node_mask) {
111*4882a593Smuzhiyun pr_err("Failed to allocate node mask for mbind: error %m\n");
112*4882a593Smuzhiyun return -1;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun set_bit(node_index, node_mask);
115*4882a593Smuzhiyun if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) {
116*4882a593Smuzhiyun pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117*4882a593Smuzhiyun data, data + mmap_len, node_index);
118*4882a593Smuzhiyun err = -1;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun bitmap_free(node_mask);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return err;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun #else /* !HAVE_LIBNUMA_SUPPORT */
perf_mmap__aio_alloc(struct mmap * map,int idx)126*4882a593Smuzhiyun static int perf_mmap__aio_alloc(struct mmap *map, int idx)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun map->aio.data[idx] = malloc(mmap__mmap_len(map));
129*4882a593Smuzhiyun if (map->aio.data[idx] == NULL)
130*4882a593Smuzhiyun return -1;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
perf_mmap__aio_free(struct mmap * map,int idx)135*4882a593Smuzhiyun static void perf_mmap__aio_free(struct mmap *map, int idx)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun zfree(&(map->aio.data[idx]));
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
perf_mmap__aio_bind(struct mmap * map __maybe_unused,int idx __maybe_unused,int cpu __maybe_unused,int affinity __maybe_unused)140*4882a593Smuzhiyun static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
141*4882a593Smuzhiyun int cpu __maybe_unused, int affinity __maybe_unused)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun return 0;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun
perf_mmap__aio_mmap(struct mmap * map,struct mmap_params * mp)147*4882a593Smuzhiyun static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun int delta_max, i, prio, ret;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun map->aio.nr_cblocks = mp->nr_cblocks;
152*4882a593Smuzhiyun if (map->aio.nr_cblocks) {
153*4882a593Smuzhiyun map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
154*4882a593Smuzhiyun if (!map->aio.aiocb) {
155*4882a593Smuzhiyun pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
156*4882a593Smuzhiyun return -1;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
159*4882a593Smuzhiyun if (!map->aio.cblocks) {
160*4882a593Smuzhiyun pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
161*4882a593Smuzhiyun return -1;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
164*4882a593Smuzhiyun if (!map->aio.data) {
165*4882a593Smuzhiyun pr_debug2("failed to allocate data buffer, error %m\n");
166*4882a593Smuzhiyun return -1;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
169*4882a593Smuzhiyun for (i = 0; i < map->aio.nr_cblocks; ++i) {
170*4882a593Smuzhiyun ret = perf_mmap__aio_alloc(map, i);
171*4882a593Smuzhiyun if (ret == -1) {
172*4882a593Smuzhiyun pr_debug2("failed to allocate data buffer area, error %m");
173*4882a593Smuzhiyun return -1;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
176*4882a593Smuzhiyun if (ret == -1)
177*4882a593Smuzhiyun return -1;
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun * Use cblock.aio_fildes value different from -1
180*4882a593Smuzhiyun * to denote started aio write operation on the
181*4882a593Smuzhiyun * cblock so it requires explicit record__aio_sync()
182*4882a593Smuzhiyun * call prior the cblock may be reused again.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun map->aio.cblocks[i].aio_fildes = -1;
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * Allocate cblocks with priority delta to have
187*4882a593Smuzhiyun * faster aio write system calls because queued requests
188*4882a593Smuzhiyun * are kept in separate per-prio queues and adding
189*4882a593Smuzhiyun * a new request will iterate thru shorter per-prio
190*4882a593Smuzhiyun * list. Blocks with numbers higher than
191*4882a593Smuzhiyun * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
192*4882a593Smuzhiyun */
193*4882a593Smuzhiyun prio = delta_max - i;
194*4882a593Smuzhiyun map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
perf_mmap__aio_munmap(struct mmap * map)201*4882a593Smuzhiyun static void perf_mmap__aio_munmap(struct mmap *map)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int i;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun for (i = 0; i < map->aio.nr_cblocks; ++i)
206*4882a593Smuzhiyun perf_mmap__aio_free(map, i);
207*4882a593Smuzhiyun if (map->aio.data)
208*4882a593Smuzhiyun zfree(&map->aio.data);
209*4882a593Smuzhiyun zfree(&map->aio.cblocks);
210*4882a593Smuzhiyun zfree(&map->aio.aiocb);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun #else /* !HAVE_AIO_SUPPORT */
perf_mmap__aio_enabled(struct mmap * map __maybe_unused)213*4882a593Smuzhiyun static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
perf_mmap__aio_mmap(struct mmap * map __maybe_unused,struct mmap_params * mp __maybe_unused)218*4882a593Smuzhiyun static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
219*4882a593Smuzhiyun struct mmap_params *mp __maybe_unused)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
perf_mmap__aio_munmap(struct mmap * map __maybe_unused)224*4882a593Smuzhiyun static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun #endif
228*4882a593Smuzhiyun
mmap__munmap(struct mmap * map)229*4882a593Smuzhiyun void mmap__munmap(struct mmap *map)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun bitmap_free(map->affinity_mask.bits);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun perf_mmap__aio_munmap(map);
234*4882a593Smuzhiyun if (map->data != NULL) {
235*4882a593Smuzhiyun munmap(map->data, mmap__mmap_len(map));
236*4882a593Smuzhiyun map->data = NULL;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun auxtrace_mmap__munmap(&map->auxtrace_mmap);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
build_node_mask(int node,struct mmap_cpu_mask * mask)241*4882a593Smuzhiyun static void build_node_mask(int node, struct mmap_cpu_mask *mask)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun int c, cpu, nr_cpus;
244*4882a593Smuzhiyun const struct perf_cpu_map *cpu_map = NULL;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun cpu_map = cpu_map__online();
247*4882a593Smuzhiyun if (!cpu_map)
248*4882a593Smuzhiyun return;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun nr_cpus = perf_cpu_map__nr(cpu_map);
251*4882a593Smuzhiyun for (c = 0; c < nr_cpus; c++) {
252*4882a593Smuzhiyun cpu = cpu_map->map[c]; /* map c index to online cpu index */
253*4882a593Smuzhiyun if (cpu__get_node(cpu) == node)
254*4882a593Smuzhiyun set_bit(cpu, mask->bits);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
perf_mmap__setup_affinity_mask(struct mmap * map,struct mmap_params * mp)258*4882a593Smuzhiyun static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun map->affinity_mask.nbits = cpu__max_cpu();
261*4882a593Smuzhiyun map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
262*4882a593Smuzhiyun if (!map->affinity_mask.bits)
263*4882a593Smuzhiyun return -1;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
266*4882a593Smuzhiyun build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
267*4882a593Smuzhiyun else if (mp->affinity == PERF_AFFINITY_CPU)
268*4882a593Smuzhiyun set_bit(map->core.cpu, map->affinity_mask.bits);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
mmap__mmap(struct mmap * map,struct mmap_params * mp,int fd,int cpu)273*4882a593Smuzhiyun int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
276*4882a593Smuzhiyun pr_debug2("failed to mmap perf event ring buffer, error %d\n",
277*4882a593Smuzhiyun errno);
278*4882a593Smuzhiyun return -1;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (mp->affinity != PERF_AFFINITY_SYS &&
282*4882a593Smuzhiyun perf_mmap__setup_affinity_mask(map, mp)) {
283*4882a593Smuzhiyun pr_debug2("failed to alloc mmap affinity mask, error %d\n",
284*4882a593Smuzhiyun errno);
285*4882a593Smuzhiyun return -1;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (verbose == 2)
289*4882a593Smuzhiyun mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun map->core.flush = mp->flush;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun map->comp_level = mp->comp_level;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun if (map->comp_level && !perf_mmap__aio_enabled(map)) {
296*4882a593Smuzhiyun map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
297*4882a593Smuzhiyun MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
298*4882a593Smuzhiyun if (map->data == MAP_FAILED) {
299*4882a593Smuzhiyun pr_debug2("failed to mmap data buffer, error %d\n",
300*4882a593Smuzhiyun errno);
301*4882a593Smuzhiyun map->data = NULL;
302*4882a593Smuzhiyun return -1;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
307*4882a593Smuzhiyun &mp->auxtrace_mp, map->core.base, fd))
308*4882a593Smuzhiyun return -1;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return perf_mmap__aio_mmap(map, mp);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
perf_mmap__push(struct mmap * md,void * to,int push (struct mmap * map,void * to,void * buf,size_t size))313*4882a593Smuzhiyun int perf_mmap__push(struct mmap *md, void *to,
314*4882a593Smuzhiyun int push(struct mmap *map, void *to, void *buf, size_t size))
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun u64 head = perf_mmap__read_head(&md->core);
317*4882a593Smuzhiyun unsigned char *data = md->core.base + page_size;
318*4882a593Smuzhiyun unsigned long size;
319*4882a593Smuzhiyun void *buf;
320*4882a593Smuzhiyun int rc = 0;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun rc = perf_mmap__read_init(&md->core);
323*4882a593Smuzhiyun if (rc < 0)
324*4882a593Smuzhiyun return (rc == -EAGAIN) ? 1 : -1;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun size = md->core.end - md->core.start;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
329*4882a593Smuzhiyun buf = &data[md->core.start & md->core.mask];
330*4882a593Smuzhiyun size = md->core.mask + 1 - (md->core.start & md->core.mask);
331*4882a593Smuzhiyun md->core.start += size;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (push(md, to, buf, size) < 0) {
334*4882a593Smuzhiyun rc = -1;
335*4882a593Smuzhiyun goto out;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun buf = &data[md->core.start & md->core.mask];
340*4882a593Smuzhiyun size = md->core.end - md->core.start;
341*4882a593Smuzhiyun md->core.start += size;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (push(md, to, buf, size) < 0) {
344*4882a593Smuzhiyun rc = -1;
345*4882a593Smuzhiyun goto out;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun md->core.prev = head;
349*4882a593Smuzhiyun perf_mmap__consume(&md->core);
350*4882a593Smuzhiyun out:
351*4882a593Smuzhiyun return rc;
352*4882a593Smuzhiyun }
353