xref: /OK3568_Linux_fs/kernel/tools/perf/util/synthetic-events.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include "util/debug.h"
4*4882a593Smuzhiyun #include "util/dso.h"
5*4882a593Smuzhiyun #include "util/event.h"
6*4882a593Smuzhiyun #include "util/evlist.h"
7*4882a593Smuzhiyun #include "util/machine.h"
8*4882a593Smuzhiyun #include "util/map.h"
9*4882a593Smuzhiyun #include "util/map_symbol.h"
10*4882a593Smuzhiyun #include "util/branch.h"
11*4882a593Smuzhiyun #include "util/memswap.h"
12*4882a593Smuzhiyun #include "util/namespaces.h"
13*4882a593Smuzhiyun #include "util/session.h"
14*4882a593Smuzhiyun #include "util/stat.h"
15*4882a593Smuzhiyun #include "util/symbol.h"
16*4882a593Smuzhiyun #include "util/synthetic-events.h"
17*4882a593Smuzhiyun #include "util/target.h"
18*4882a593Smuzhiyun #include "util/time-utils.h"
19*4882a593Smuzhiyun #include "util/cgroup.h"
20*4882a593Smuzhiyun #include <linux/bitops.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/string.h>
23*4882a593Smuzhiyun #include <linux/zalloc.h>
24*4882a593Smuzhiyun #include <linux/perf_event.h>
25*4882a593Smuzhiyun #include <asm/bug.h>
26*4882a593Smuzhiyun #include <perf/evsel.h>
27*4882a593Smuzhiyun #include <internal/cpumap.h>
28*4882a593Smuzhiyun #include <perf/cpumap.h>
29*4882a593Smuzhiyun #include <internal/lib.h> // page_size
30*4882a593Smuzhiyun #include <internal/threadmap.h>
31*4882a593Smuzhiyun #include <perf/threadmap.h>
32*4882a593Smuzhiyun #include <symbol/kallsyms.h>
33*4882a593Smuzhiyun #include <dirent.h>
34*4882a593Smuzhiyun #include <errno.h>
35*4882a593Smuzhiyun #include <inttypes.h>
36*4882a593Smuzhiyun #include <stdio.h>
37*4882a593Smuzhiyun #include <string.h>
38*4882a593Smuzhiyun #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39*4882a593Smuzhiyun #include <api/fs/fs.h>
40*4882a593Smuzhiyun #include <api/io.h>
41*4882a593Smuzhiyun #include <sys/types.h>
42*4882a593Smuzhiyun #include <sys/stat.h>
43*4882a593Smuzhiyun #include <fcntl.h>
44*4882a593Smuzhiyun #include <unistd.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
49*4882a593Smuzhiyun 
perf_tool__process_synth_event(struct perf_tool * tool,union perf_event * event,struct machine * machine,perf_event__handler_t process)50*4882a593Smuzhiyun int perf_tool__process_synth_event(struct perf_tool *tool,
51*4882a593Smuzhiyun 				   union perf_event *event,
52*4882a593Smuzhiyun 				   struct machine *machine,
53*4882a593Smuzhiyun 				   perf_event__handler_t process)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct perf_sample synth_sample = {
56*4882a593Smuzhiyun 		.pid	   = -1,
57*4882a593Smuzhiyun 		.tid	   = -1,
58*4882a593Smuzhiyun 		.time	   = -1,
59*4882a593Smuzhiyun 		.stream_id = -1,
60*4882a593Smuzhiyun 		.cpu	   = -1,
61*4882a593Smuzhiyun 		.period	   = 1,
62*4882a593Smuzhiyun 		.cpumode   = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
63*4882a593Smuzhiyun 	};
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return process(tool, event, &synth_sample, machine);
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * Assumes that the first 4095 bytes of /proc/pid/stat contains
70*4882a593Smuzhiyun  * the comm, tgid and ppid.
71*4882a593Smuzhiyun  */
perf_event__get_comm_ids(pid_t pid,char * comm,size_t len,pid_t * tgid,pid_t * ppid)72*4882a593Smuzhiyun static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len,
73*4882a593Smuzhiyun 				    pid_t *tgid, pid_t *ppid)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	char bf[4096];
76*4882a593Smuzhiyun 	int fd;
77*4882a593Smuzhiyun 	size_t size = 0;
78*4882a593Smuzhiyun 	ssize_t n;
79*4882a593Smuzhiyun 	char *name, *tgids, *ppids;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	*tgid = -1;
82*4882a593Smuzhiyun 	*ppid = -1;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	snprintf(bf, sizeof(bf), "/proc/%d/status", pid);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	fd = open(bf, O_RDONLY);
87*4882a593Smuzhiyun 	if (fd < 0) {
88*4882a593Smuzhiyun 		pr_debug("couldn't open %s\n", bf);
89*4882a593Smuzhiyun 		return -1;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	n = read(fd, bf, sizeof(bf) - 1);
93*4882a593Smuzhiyun 	close(fd);
94*4882a593Smuzhiyun 	if (n <= 0) {
95*4882a593Smuzhiyun 		pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
96*4882a593Smuzhiyun 			   pid);
97*4882a593Smuzhiyun 		return -1;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 	bf[n] = '\0';
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	name = strstr(bf, "Name:");
102*4882a593Smuzhiyun 	tgids = strstr(bf, "Tgid:");
103*4882a593Smuzhiyun 	ppids = strstr(bf, "PPid:");
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	if (name) {
106*4882a593Smuzhiyun 		char *nl;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 		name = skip_spaces(name + 5);  /* strlen("Name:") */
109*4882a593Smuzhiyun 		nl = strchr(name, '\n');
110*4882a593Smuzhiyun 		if (nl)
111*4882a593Smuzhiyun 			*nl = '\0';
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		size = strlen(name);
114*4882a593Smuzhiyun 		if (size >= len)
115*4882a593Smuzhiyun 			size = len - 1;
116*4882a593Smuzhiyun 		memcpy(comm, name, size);
117*4882a593Smuzhiyun 		comm[size] = '\0';
118*4882a593Smuzhiyun 	} else {
119*4882a593Smuzhiyun 		pr_debug("Name: string not found for pid %d\n", pid);
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (tgids) {
123*4882a593Smuzhiyun 		tgids += 5;  /* strlen("Tgid:") */
124*4882a593Smuzhiyun 		*tgid = atoi(tgids);
125*4882a593Smuzhiyun 	} else {
126*4882a593Smuzhiyun 		pr_debug("Tgid: string not found for pid %d\n", pid);
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (ppids) {
130*4882a593Smuzhiyun 		ppids += 5;  /* strlen("PPid:") */
131*4882a593Smuzhiyun 		*ppid = atoi(ppids);
132*4882a593Smuzhiyun 	} else {
133*4882a593Smuzhiyun 		pr_debug("PPid: string not found for pid %d\n", pid);
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
perf_event__prepare_comm(union perf_event * event,pid_t pid,struct machine * machine,pid_t * tgid,pid_t * ppid)139*4882a593Smuzhiyun static int perf_event__prepare_comm(union perf_event *event, pid_t pid,
140*4882a593Smuzhiyun 				    struct machine *machine,
141*4882a593Smuzhiyun 				    pid_t *tgid, pid_t *ppid)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	size_t size;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	*ppid = -1;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	memset(&event->comm, 0, sizeof(event->comm));
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (machine__is_host(machine)) {
150*4882a593Smuzhiyun 		if (perf_event__get_comm_ids(pid, event->comm.comm,
151*4882a593Smuzhiyun 					     sizeof(event->comm.comm),
152*4882a593Smuzhiyun 					     tgid, ppid) != 0) {
153*4882a593Smuzhiyun 			return -1;
154*4882a593Smuzhiyun 		}
155*4882a593Smuzhiyun 	} else {
156*4882a593Smuzhiyun 		*tgid = machine->pid;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (*tgid < 0)
160*4882a593Smuzhiyun 		return -1;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	event->comm.pid = *tgid;
163*4882a593Smuzhiyun 	event->comm.header.type = PERF_RECORD_COMM;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	size = strlen(event->comm.comm) + 1;
166*4882a593Smuzhiyun 	size = PERF_ALIGN(size, sizeof(u64));
167*4882a593Smuzhiyun 	memset(event->comm.comm + size, 0, machine->id_hdr_size);
168*4882a593Smuzhiyun 	event->comm.header.size = (sizeof(event->comm) -
169*4882a593Smuzhiyun 				(sizeof(event->comm.comm) - size) +
170*4882a593Smuzhiyun 				machine->id_hdr_size);
171*4882a593Smuzhiyun 	event->comm.tid = pid;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
perf_event__synthesize_comm(struct perf_tool * tool,union perf_event * event,pid_t pid,perf_event__handler_t process,struct machine * machine)176*4882a593Smuzhiyun pid_t perf_event__synthesize_comm(struct perf_tool *tool,
177*4882a593Smuzhiyun 					 union perf_event *event, pid_t pid,
178*4882a593Smuzhiyun 					 perf_event__handler_t process,
179*4882a593Smuzhiyun 					 struct machine *machine)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	pid_t tgid, ppid;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0)
184*4882a593Smuzhiyun 		return -1;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
187*4882a593Smuzhiyun 		return -1;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	return tgid;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
perf_event__get_ns_link_info(pid_t pid,const char * ns,struct perf_ns_link_info * ns_link_info)192*4882a593Smuzhiyun static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
193*4882a593Smuzhiyun 					 struct perf_ns_link_info *ns_link_info)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	struct stat64 st;
196*4882a593Smuzhiyun 	char proc_ns[128];
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
199*4882a593Smuzhiyun 	if (stat64(proc_ns, &st) == 0) {
200*4882a593Smuzhiyun 		ns_link_info->dev = st.st_dev;
201*4882a593Smuzhiyun 		ns_link_info->ino = st.st_ino;
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
perf_event__synthesize_namespaces(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine)205*4882a593Smuzhiyun int perf_event__synthesize_namespaces(struct perf_tool *tool,
206*4882a593Smuzhiyun 				      union perf_event *event,
207*4882a593Smuzhiyun 				      pid_t pid, pid_t tgid,
208*4882a593Smuzhiyun 				      perf_event__handler_t process,
209*4882a593Smuzhiyun 				      struct machine *machine)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	u32 idx;
212*4882a593Smuzhiyun 	struct perf_ns_link_info *ns_link_info;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (!tool || !tool->namespace_events)
215*4882a593Smuzhiyun 		return 0;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	memset(&event->namespaces, 0, (sizeof(event->namespaces) +
218*4882a593Smuzhiyun 	       (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
219*4882a593Smuzhiyun 	       machine->id_hdr_size));
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	event->namespaces.pid = tgid;
222*4882a593Smuzhiyun 	event->namespaces.tid = pid;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	event->namespaces.nr_namespaces = NR_NAMESPACES;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	ns_link_info = event->namespaces.link_info;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
229*4882a593Smuzhiyun 		perf_event__get_ns_link_info(pid, perf_ns__name(idx),
230*4882a593Smuzhiyun 					     &ns_link_info[idx]);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	event->namespaces.header.type = PERF_RECORD_NAMESPACES;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	event->namespaces.header.size = (sizeof(event->namespaces) +
235*4882a593Smuzhiyun 			(NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
236*4882a593Smuzhiyun 			machine->id_hdr_size);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
239*4882a593Smuzhiyun 		return -1;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return 0;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
perf_event__synthesize_fork(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,pid_t ppid,perf_event__handler_t process,struct machine * machine)244*4882a593Smuzhiyun static int perf_event__synthesize_fork(struct perf_tool *tool,
245*4882a593Smuzhiyun 				       union perf_event *event,
246*4882a593Smuzhiyun 				       pid_t pid, pid_t tgid, pid_t ppid,
247*4882a593Smuzhiyun 				       perf_event__handler_t process,
248*4882a593Smuzhiyun 				       struct machine *machine)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/*
253*4882a593Smuzhiyun 	 * for main thread set parent to ppid from status file. For other
254*4882a593Smuzhiyun 	 * threads set parent pid to main thread. ie., assume main thread
255*4882a593Smuzhiyun 	 * spawns all threads in a process
256*4882a593Smuzhiyun 	*/
257*4882a593Smuzhiyun 	if (tgid == pid) {
258*4882a593Smuzhiyun 		event->fork.ppid = ppid;
259*4882a593Smuzhiyun 		event->fork.ptid = ppid;
260*4882a593Smuzhiyun 	} else {
261*4882a593Smuzhiyun 		event->fork.ppid = tgid;
262*4882a593Smuzhiyun 		event->fork.ptid = tgid;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	event->fork.pid  = tgid;
265*4882a593Smuzhiyun 	event->fork.tid  = pid;
266*4882a593Smuzhiyun 	event->fork.header.type = PERF_RECORD_FORK;
267*4882a593Smuzhiyun 	event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
272*4882a593Smuzhiyun 		return -1;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
read_proc_maps_line(struct io * io,__u64 * start,__u64 * end,u32 * prot,u32 * flags,__u64 * offset,u32 * maj,u32 * min,__u64 * inode,ssize_t pathname_size,char * pathname)277*4882a593Smuzhiyun static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
278*4882a593Smuzhiyun 				u32 *prot, u32 *flags, __u64 *offset,
279*4882a593Smuzhiyun 				u32 *maj, u32 *min,
280*4882a593Smuzhiyun 				__u64 *inode,
281*4882a593Smuzhiyun 				ssize_t pathname_size, char *pathname)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	__u64 temp;
284*4882a593Smuzhiyun 	int ch;
285*4882a593Smuzhiyun 	char *start_pathname = pathname;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (io__get_hex(io, start) != '-')
288*4882a593Smuzhiyun 		return false;
289*4882a593Smuzhiyun 	if (io__get_hex(io, end) != ' ')
290*4882a593Smuzhiyun 		return false;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* map protection and flags bits */
293*4882a593Smuzhiyun 	*prot = 0;
294*4882a593Smuzhiyun 	ch = io__get_char(io);
295*4882a593Smuzhiyun 	if (ch == 'r')
296*4882a593Smuzhiyun 		*prot |= PROT_READ;
297*4882a593Smuzhiyun 	else if (ch != '-')
298*4882a593Smuzhiyun 		return false;
299*4882a593Smuzhiyun 	ch = io__get_char(io);
300*4882a593Smuzhiyun 	if (ch == 'w')
301*4882a593Smuzhiyun 		*prot |= PROT_WRITE;
302*4882a593Smuzhiyun 	else if (ch != '-')
303*4882a593Smuzhiyun 		return false;
304*4882a593Smuzhiyun 	ch = io__get_char(io);
305*4882a593Smuzhiyun 	if (ch == 'x')
306*4882a593Smuzhiyun 		*prot |= PROT_EXEC;
307*4882a593Smuzhiyun 	else if (ch != '-')
308*4882a593Smuzhiyun 		return false;
309*4882a593Smuzhiyun 	ch = io__get_char(io);
310*4882a593Smuzhiyun 	if (ch == 's')
311*4882a593Smuzhiyun 		*flags = MAP_SHARED;
312*4882a593Smuzhiyun 	else if (ch == 'p')
313*4882a593Smuzhiyun 		*flags = MAP_PRIVATE;
314*4882a593Smuzhiyun 	else
315*4882a593Smuzhiyun 		return false;
316*4882a593Smuzhiyun 	if (io__get_char(io) != ' ')
317*4882a593Smuzhiyun 		return false;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (io__get_hex(io, offset) != ' ')
320*4882a593Smuzhiyun 		return false;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (io__get_hex(io, &temp) != ':')
323*4882a593Smuzhiyun 		return false;
324*4882a593Smuzhiyun 	*maj = temp;
325*4882a593Smuzhiyun 	if (io__get_hex(io, &temp) != ' ')
326*4882a593Smuzhiyun 		return false;
327*4882a593Smuzhiyun 	*min = temp;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	ch = io__get_dec(io, inode);
330*4882a593Smuzhiyun 	if (ch != ' ') {
331*4882a593Smuzhiyun 		*pathname = '\0';
332*4882a593Smuzhiyun 		return ch == '\n';
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 	do {
335*4882a593Smuzhiyun 		ch = io__get_char(io);
336*4882a593Smuzhiyun 	} while (ch == ' ');
337*4882a593Smuzhiyun 	while (true) {
338*4882a593Smuzhiyun 		if (ch < 0)
339*4882a593Smuzhiyun 			return false;
340*4882a593Smuzhiyun 		if (ch == '\0' || ch == '\n' ||
341*4882a593Smuzhiyun 		    (pathname + 1 - start_pathname) >= pathname_size) {
342*4882a593Smuzhiyun 			*pathname = '\0';
343*4882a593Smuzhiyun 			return true;
344*4882a593Smuzhiyun 		}
345*4882a593Smuzhiyun 		*pathname++ = ch;
346*4882a593Smuzhiyun 		ch = io__get_char(io);
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
perf_event__synthesize_mmap_events(struct perf_tool * tool,union perf_event * event,pid_t pid,pid_t tgid,perf_event__handler_t process,struct machine * machine,bool mmap_data)350*4882a593Smuzhiyun int perf_event__synthesize_mmap_events(struct perf_tool *tool,
351*4882a593Smuzhiyun 				       union perf_event *event,
352*4882a593Smuzhiyun 				       pid_t pid, pid_t tgid,
353*4882a593Smuzhiyun 				       perf_event__handler_t process,
354*4882a593Smuzhiyun 				       struct machine *machine,
355*4882a593Smuzhiyun 				       bool mmap_data)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	unsigned long long t;
358*4882a593Smuzhiyun 	char bf[BUFSIZ];
359*4882a593Smuzhiyun 	struct io io;
360*4882a593Smuzhiyun 	bool truncation = false;
361*4882a593Smuzhiyun 	unsigned long long timeout = proc_map_timeout * 1000000ULL;
362*4882a593Smuzhiyun 	int rc = 0;
363*4882a593Smuzhiyun 	const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
364*4882a593Smuzhiyun 	int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	if (machine__is_default_guest(machine))
367*4882a593Smuzhiyun 		return 0;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
370*4882a593Smuzhiyun 		machine->root_dir, pid, pid);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	io.fd = open(bf, O_RDONLY, 0);
373*4882a593Smuzhiyun 	if (io.fd < 0) {
374*4882a593Smuzhiyun 		/*
375*4882a593Smuzhiyun 		 * We raced with a task exiting - just return:
376*4882a593Smuzhiyun 		 */
377*4882a593Smuzhiyun 		pr_debug("couldn't open %s\n", bf);
378*4882a593Smuzhiyun 		return -1;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	io__init(&io, io.fd, bf, sizeof(bf));
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	event->header.type = PERF_RECORD_MMAP2;
383*4882a593Smuzhiyun 	t = rdclock();
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	while (!io.eof) {
386*4882a593Smuzhiyun 		static const char anonstr[] = "//anon";
387*4882a593Smuzhiyun 		size_t size, aligned_size;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		/* ensure null termination since stack will be reused. */
390*4882a593Smuzhiyun 		event->mmap2.filename[0] = '\0';
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		/* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
393*4882a593Smuzhiyun 		if (!read_proc_maps_line(&io,
394*4882a593Smuzhiyun 					&event->mmap2.start,
395*4882a593Smuzhiyun 					&event->mmap2.len,
396*4882a593Smuzhiyun 					&event->mmap2.prot,
397*4882a593Smuzhiyun 					&event->mmap2.flags,
398*4882a593Smuzhiyun 					&event->mmap2.pgoff,
399*4882a593Smuzhiyun 					&event->mmap2.maj,
400*4882a593Smuzhiyun 					&event->mmap2.min,
401*4882a593Smuzhiyun 					&event->mmap2.ino,
402*4882a593Smuzhiyun 					sizeof(event->mmap2.filename),
403*4882a593Smuzhiyun 					event->mmap2.filename))
404*4882a593Smuzhiyun 			continue;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		if ((rdclock() - t) > timeout) {
407*4882a593Smuzhiyun 			pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
408*4882a593Smuzhiyun 				   "You may want to increase "
409*4882a593Smuzhiyun 				   "the time limit by --proc-map-timeout\n",
410*4882a593Smuzhiyun 				   machine->root_dir, pid, pid);
411*4882a593Smuzhiyun 			truncation = true;
412*4882a593Smuzhiyun 			goto out;
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		event->mmap2.ino_generation = 0;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		/*
418*4882a593Smuzhiyun 		 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
419*4882a593Smuzhiyun 		 */
420*4882a593Smuzhiyun 		if (machine__is_host(machine))
421*4882a593Smuzhiyun 			event->header.misc = PERF_RECORD_MISC_USER;
422*4882a593Smuzhiyun 		else
423*4882a593Smuzhiyun 			event->header.misc = PERF_RECORD_MISC_GUEST_USER;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		if ((event->mmap2.prot & PROT_EXEC) == 0) {
426*4882a593Smuzhiyun 			if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
427*4882a593Smuzhiyun 				continue;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 			event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun out:
433*4882a593Smuzhiyun 		if (truncation)
434*4882a593Smuzhiyun 			event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		if (!strcmp(event->mmap2.filename, ""))
437*4882a593Smuzhiyun 			strcpy(event->mmap2.filename, anonstr);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		if (hugetlbfs_mnt_len &&
440*4882a593Smuzhiyun 		    !strncmp(event->mmap2.filename, hugetlbfs_mnt,
441*4882a593Smuzhiyun 			     hugetlbfs_mnt_len)) {
442*4882a593Smuzhiyun 			strcpy(event->mmap2.filename, anonstr);
443*4882a593Smuzhiyun 			event->mmap2.flags |= MAP_HUGETLB;
444*4882a593Smuzhiyun 		}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		size = strlen(event->mmap2.filename) + 1;
447*4882a593Smuzhiyun 		aligned_size = PERF_ALIGN(size, sizeof(u64));
448*4882a593Smuzhiyun 		event->mmap2.len -= event->mmap.start;
449*4882a593Smuzhiyun 		event->mmap2.header.size = (sizeof(event->mmap2) -
450*4882a593Smuzhiyun 					(sizeof(event->mmap2.filename) - aligned_size));
451*4882a593Smuzhiyun 		memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
452*4882a593Smuzhiyun 			(aligned_size - size));
453*4882a593Smuzhiyun 		event->mmap2.header.size += machine->id_hdr_size;
454*4882a593Smuzhiyun 		event->mmap2.pid = tgid;
455*4882a593Smuzhiyun 		event->mmap2.tid = pid;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
458*4882a593Smuzhiyun 			rc = -1;
459*4882a593Smuzhiyun 			break;
460*4882a593Smuzhiyun 		}
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		if (truncation)
463*4882a593Smuzhiyun 			break;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	close(io.fd);
467*4882a593Smuzhiyun 	return rc;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun #ifdef HAVE_FILE_HANDLE
perf_event__synthesize_cgroup(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)471*4882a593Smuzhiyun static int perf_event__synthesize_cgroup(struct perf_tool *tool,
472*4882a593Smuzhiyun 					 union perf_event *event,
473*4882a593Smuzhiyun 					 char *path, size_t mount_len,
474*4882a593Smuzhiyun 					 perf_event__handler_t process,
475*4882a593Smuzhiyun 					 struct machine *machine)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
478*4882a593Smuzhiyun 	size_t path_len = strlen(path) - mount_len + 1;
479*4882a593Smuzhiyun 	struct {
480*4882a593Smuzhiyun 		struct file_handle fh;
481*4882a593Smuzhiyun 		uint64_t cgroup_id;
482*4882a593Smuzhiyun 	} handle;
483*4882a593Smuzhiyun 	int mount_id;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	while (path_len % sizeof(u64))
486*4882a593Smuzhiyun 		path[mount_len + path_len++] = '\0';
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	memset(&event->cgroup, 0, event_size);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	event->cgroup.header.type = PERF_RECORD_CGROUP;
491*4882a593Smuzhiyun 	event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	handle.fh.handle_bytes = sizeof(handle.cgroup_id);
494*4882a593Smuzhiyun 	if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
495*4882a593Smuzhiyun 		pr_debug("stat failed: %s\n", path);
496*4882a593Smuzhiyun 		return -1;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	event->cgroup.id = handle.cgroup_id;
500*4882a593Smuzhiyun 	strncpy(event->cgroup.path, path + mount_len, path_len);
501*4882a593Smuzhiyun 	memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
504*4882a593Smuzhiyun 		pr_debug("process synth event failed\n");
505*4882a593Smuzhiyun 		return -1;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
perf_event__walk_cgroup_tree(struct perf_tool * tool,union perf_event * event,char * path,size_t mount_len,perf_event__handler_t process,struct machine * machine)511*4882a593Smuzhiyun static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
512*4882a593Smuzhiyun 					union perf_event *event,
513*4882a593Smuzhiyun 					char *path, size_t mount_len,
514*4882a593Smuzhiyun 					perf_event__handler_t process,
515*4882a593Smuzhiyun 					struct machine *machine)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	size_t pos = strlen(path);
518*4882a593Smuzhiyun 	DIR *d;
519*4882a593Smuzhiyun 	struct dirent *dent;
520*4882a593Smuzhiyun 	int ret = 0;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
523*4882a593Smuzhiyun 					  process, machine) < 0)
524*4882a593Smuzhiyun 		return -1;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	d = opendir(path);
527*4882a593Smuzhiyun 	if (d == NULL) {
528*4882a593Smuzhiyun 		pr_debug("failed to open directory: %s\n", path);
529*4882a593Smuzhiyun 		return -1;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	while ((dent = readdir(d)) != NULL) {
533*4882a593Smuzhiyun 		if (dent->d_type != DT_DIR)
534*4882a593Smuzhiyun 			continue;
535*4882a593Smuzhiyun 		if (!strcmp(dent->d_name, ".") ||
536*4882a593Smuzhiyun 		    !strcmp(dent->d_name, ".."))
537*4882a593Smuzhiyun 			continue;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		/* any sane path should be less than PATH_MAX */
540*4882a593Smuzhiyun 		if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
541*4882a593Smuzhiyun 			continue;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		if (path[pos - 1] != '/')
544*4882a593Smuzhiyun 			strcat(path, "/");
545*4882a593Smuzhiyun 		strcat(path, dent->d_name);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		ret = perf_event__walk_cgroup_tree(tool, event, path,
548*4882a593Smuzhiyun 						   mount_len, process, machine);
549*4882a593Smuzhiyun 		if (ret < 0)
550*4882a593Smuzhiyun 			break;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		path[pos] = '\0';
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	closedir(d);
556*4882a593Smuzhiyun 	return ret;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
perf_event__synthesize_cgroups(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)559*4882a593Smuzhiyun int perf_event__synthesize_cgroups(struct perf_tool *tool,
560*4882a593Smuzhiyun 				   perf_event__handler_t process,
561*4882a593Smuzhiyun 				   struct machine *machine)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	union perf_event event;
564*4882a593Smuzhiyun 	char cgrp_root[PATH_MAX];
565*4882a593Smuzhiyun 	size_t mount_len;  /* length of mount point in the path */
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (!tool || !tool->cgroup_events)
568*4882a593Smuzhiyun 		return 0;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
571*4882a593Smuzhiyun 		pr_debug("cannot find cgroup mount point\n");
572*4882a593Smuzhiyun 		return -1;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	mount_len = strlen(cgrp_root);
576*4882a593Smuzhiyun 	/* make sure the path starts with a slash (after mount point) */
577*4882a593Smuzhiyun 	strcat(cgrp_root, "/");
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
580*4882a593Smuzhiyun 					 process, machine) < 0)
581*4882a593Smuzhiyun 		return -1;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun #else
perf_event__synthesize_cgroups(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)586*4882a593Smuzhiyun int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
587*4882a593Smuzhiyun 				   perf_event__handler_t process __maybe_unused,
588*4882a593Smuzhiyun 				   struct machine *machine __maybe_unused)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	return -1;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun #endif
593*4882a593Smuzhiyun 
perf_event__synthesize_modules(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)594*4882a593Smuzhiyun int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
595*4882a593Smuzhiyun 				   struct machine *machine)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun 	int rc = 0;
598*4882a593Smuzhiyun 	struct map *pos;
599*4882a593Smuzhiyun 	struct maps *maps = machine__kernel_maps(machine);
600*4882a593Smuzhiyun 	union perf_event *event = zalloc((sizeof(event->mmap) +
601*4882a593Smuzhiyun 					  machine->id_hdr_size));
602*4882a593Smuzhiyun 	if (event == NULL) {
603*4882a593Smuzhiyun 		pr_debug("Not enough memory synthesizing mmap event "
604*4882a593Smuzhiyun 			 "for kernel modules\n");
605*4882a593Smuzhiyun 		return -1;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	event->header.type = PERF_RECORD_MMAP;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/*
611*4882a593Smuzhiyun 	 * kernel uses 0 for user space maps, see kernel/perf_event.c
612*4882a593Smuzhiyun 	 * __perf_event_mmap
613*4882a593Smuzhiyun 	 */
614*4882a593Smuzhiyun 	if (machine__is_host(machine))
615*4882a593Smuzhiyun 		event->header.misc = PERF_RECORD_MISC_KERNEL;
616*4882a593Smuzhiyun 	else
617*4882a593Smuzhiyun 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	maps__for_each_entry(maps, pos) {
620*4882a593Smuzhiyun 		size_t size;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 		if (!__map__is_kmodule(pos))
623*4882a593Smuzhiyun 			continue;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
626*4882a593Smuzhiyun 		event->mmap.header.type = PERF_RECORD_MMAP;
627*4882a593Smuzhiyun 		event->mmap.header.size = (sizeof(event->mmap) -
628*4882a593Smuzhiyun 				        (sizeof(event->mmap.filename) - size));
629*4882a593Smuzhiyun 		memset(event->mmap.filename + size, 0, machine->id_hdr_size);
630*4882a593Smuzhiyun 		event->mmap.header.size += machine->id_hdr_size;
631*4882a593Smuzhiyun 		event->mmap.start = pos->start;
632*4882a593Smuzhiyun 		event->mmap.len   = pos->end - pos->start;
633*4882a593Smuzhiyun 		event->mmap.pid   = machine->pid;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		memcpy(event->mmap.filename, pos->dso->long_name,
636*4882a593Smuzhiyun 		       pos->dso->long_name_len + 1);
637*4882a593Smuzhiyun 		if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
638*4882a593Smuzhiyun 			rc = -1;
639*4882a593Smuzhiyun 			break;
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	free(event);
644*4882a593Smuzhiyun 	return rc;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
__event__synthesize_thread(union perf_event * comm_event,union perf_event * mmap_event,union perf_event * fork_event,union perf_event * namespaces_event,pid_t pid,int full,perf_event__handler_t process,struct perf_tool * tool,struct machine * machine,bool mmap_data)647*4882a593Smuzhiyun static int __event__synthesize_thread(union perf_event *comm_event,
648*4882a593Smuzhiyun 				      union perf_event *mmap_event,
649*4882a593Smuzhiyun 				      union perf_event *fork_event,
650*4882a593Smuzhiyun 				      union perf_event *namespaces_event,
651*4882a593Smuzhiyun 				      pid_t pid, int full, perf_event__handler_t process,
652*4882a593Smuzhiyun 				      struct perf_tool *tool, struct machine *machine, bool mmap_data)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	char filename[PATH_MAX];
655*4882a593Smuzhiyun 	DIR *tasks;
656*4882a593Smuzhiyun 	struct dirent *dirent;
657*4882a593Smuzhiyun 	pid_t tgid, ppid;
658*4882a593Smuzhiyun 	int rc = 0;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* special case: only send one comm event using passed in pid */
661*4882a593Smuzhiyun 	if (!full) {
662*4882a593Smuzhiyun 		tgid = perf_event__synthesize_comm(tool, comm_event, pid,
663*4882a593Smuzhiyun 						   process, machine);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		if (tgid == -1)
666*4882a593Smuzhiyun 			return -1;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
669*4882a593Smuzhiyun 						      tgid, process, machine) < 0)
670*4882a593Smuzhiyun 			return -1;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 		/*
673*4882a593Smuzhiyun 		 * send mmap only for thread group leader
674*4882a593Smuzhiyun 		 * see thread__init_maps()
675*4882a593Smuzhiyun 		 */
676*4882a593Smuzhiyun 		if (pid == tgid &&
677*4882a593Smuzhiyun 		    perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
678*4882a593Smuzhiyun 						       process, machine, mmap_data))
679*4882a593Smuzhiyun 			return -1;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		return 0;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	if (machine__is_default_guest(machine))
685*4882a593Smuzhiyun 		return 0;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	snprintf(filename, sizeof(filename), "%s/proc/%d/task",
688*4882a593Smuzhiyun 		 machine->root_dir, pid);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	tasks = opendir(filename);
691*4882a593Smuzhiyun 	if (tasks == NULL) {
692*4882a593Smuzhiyun 		pr_debug("couldn't open %s\n", filename);
693*4882a593Smuzhiyun 		return 0;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	while ((dirent = readdir(tasks)) != NULL) {
697*4882a593Smuzhiyun 		char *end;
698*4882a593Smuzhiyun 		pid_t _pid;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 		_pid = strtol(dirent->d_name, &end, 10);
701*4882a593Smuzhiyun 		if (*end)
702*4882a593Smuzhiyun 			continue;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		rc = -1;
705*4882a593Smuzhiyun 		if (perf_event__prepare_comm(comm_event, _pid, machine,
706*4882a593Smuzhiyun 					     &tgid, &ppid) != 0)
707*4882a593Smuzhiyun 			break;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
710*4882a593Smuzhiyun 						ppid, process, machine) < 0)
711*4882a593Smuzhiyun 			break;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
714*4882a593Smuzhiyun 						      tgid, process, machine) < 0)
715*4882a593Smuzhiyun 			break;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		/*
718*4882a593Smuzhiyun 		 * Send the prepared comm event
719*4882a593Smuzhiyun 		 */
720*4882a593Smuzhiyun 		if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
721*4882a593Smuzhiyun 			break;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		rc = 0;
724*4882a593Smuzhiyun 		if (_pid == pid) {
725*4882a593Smuzhiyun 			/* process the parent's maps too */
726*4882a593Smuzhiyun 			rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
727*4882a593Smuzhiyun 						process, machine, mmap_data);
728*4882a593Smuzhiyun 			if (rc)
729*4882a593Smuzhiyun 				break;
730*4882a593Smuzhiyun 		}
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	closedir(tasks);
734*4882a593Smuzhiyun 	return rc;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
perf_event__synthesize_thread_map(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine,bool mmap_data)737*4882a593Smuzhiyun int perf_event__synthesize_thread_map(struct perf_tool *tool,
738*4882a593Smuzhiyun 				      struct perf_thread_map *threads,
739*4882a593Smuzhiyun 				      perf_event__handler_t process,
740*4882a593Smuzhiyun 				      struct machine *machine,
741*4882a593Smuzhiyun 				      bool mmap_data)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	union perf_event *comm_event, *mmap_event, *fork_event;
744*4882a593Smuzhiyun 	union perf_event *namespaces_event;
745*4882a593Smuzhiyun 	int err = -1, thread, j;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
748*4882a593Smuzhiyun 	if (comm_event == NULL)
749*4882a593Smuzhiyun 		goto out;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
752*4882a593Smuzhiyun 	if (mmap_event == NULL)
753*4882a593Smuzhiyun 		goto out_free_comm;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
756*4882a593Smuzhiyun 	if (fork_event == NULL)
757*4882a593Smuzhiyun 		goto out_free_mmap;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
760*4882a593Smuzhiyun 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
761*4882a593Smuzhiyun 				  machine->id_hdr_size);
762*4882a593Smuzhiyun 	if (namespaces_event == NULL)
763*4882a593Smuzhiyun 		goto out_free_fork;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	err = 0;
766*4882a593Smuzhiyun 	for (thread = 0; thread < threads->nr; ++thread) {
767*4882a593Smuzhiyun 		if (__event__synthesize_thread(comm_event, mmap_event,
768*4882a593Smuzhiyun 					       fork_event, namespaces_event,
769*4882a593Smuzhiyun 					       perf_thread_map__pid(threads, thread), 0,
770*4882a593Smuzhiyun 					       process, tool, machine,
771*4882a593Smuzhiyun 					       mmap_data)) {
772*4882a593Smuzhiyun 			err = -1;
773*4882a593Smuzhiyun 			break;
774*4882a593Smuzhiyun 		}
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 		/*
777*4882a593Smuzhiyun 		 * comm.pid is set to thread group id by
778*4882a593Smuzhiyun 		 * perf_event__synthesize_comm
779*4882a593Smuzhiyun 		 */
780*4882a593Smuzhiyun 		if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
781*4882a593Smuzhiyun 			bool need_leader = true;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 			/* is thread group leader in thread_map? */
784*4882a593Smuzhiyun 			for (j = 0; j < threads->nr; ++j) {
785*4882a593Smuzhiyun 				if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
786*4882a593Smuzhiyun 					need_leader = false;
787*4882a593Smuzhiyun 					break;
788*4882a593Smuzhiyun 				}
789*4882a593Smuzhiyun 			}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 			/* if not, generate events for it */
792*4882a593Smuzhiyun 			if (need_leader &&
793*4882a593Smuzhiyun 			    __event__synthesize_thread(comm_event, mmap_event,
794*4882a593Smuzhiyun 						       fork_event, namespaces_event,
795*4882a593Smuzhiyun 						       comm_event->comm.pid, 0,
796*4882a593Smuzhiyun 						       process, tool, machine,
797*4882a593Smuzhiyun 						       mmap_data)) {
798*4882a593Smuzhiyun 				err = -1;
799*4882a593Smuzhiyun 				break;
800*4882a593Smuzhiyun 			}
801*4882a593Smuzhiyun 		}
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 	free(namespaces_event);
804*4882a593Smuzhiyun out_free_fork:
805*4882a593Smuzhiyun 	free(fork_event);
806*4882a593Smuzhiyun out_free_mmap:
807*4882a593Smuzhiyun 	free(mmap_event);
808*4882a593Smuzhiyun out_free_comm:
809*4882a593Smuzhiyun 	free(comm_event);
810*4882a593Smuzhiyun out:
811*4882a593Smuzhiyun 	return err;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
__perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool mmap_data,struct dirent ** dirent,int start,int num)814*4882a593Smuzhiyun static int __perf_event__synthesize_threads(struct perf_tool *tool,
815*4882a593Smuzhiyun 					    perf_event__handler_t process,
816*4882a593Smuzhiyun 					    struct machine *machine,
817*4882a593Smuzhiyun 					    bool mmap_data,
818*4882a593Smuzhiyun 					    struct dirent **dirent,
819*4882a593Smuzhiyun 					    int start,
820*4882a593Smuzhiyun 					    int num)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	union perf_event *comm_event, *mmap_event, *fork_event;
823*4882a593Smuzhiyun 	union perf_event *namespaces_event;
824*4882a593Smuzhiyun 	int err = -1;
825*4882a593Smuzhiyun 	char *end;
826*4882a593Smuzhiyun 	pid_t pid;
827*4882a593Smuzhiyun 	int i;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
830*4882a593Smuzhiyun 	if (comm_event == NULL)
831*4882a593Smuzhiyun 		goto out;
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
834*4882a593Smuzhiyun 	if (mmap_event == NULL)
835*4882a593Smuzhiyun 		goto out_free_comm;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
838*4882a593Smuzhiyun 	if (fork_event == NULL)
839*4882a593Smuzhiyun 		goto out_free_mmap;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
842*4882a593Smuzhiyun 				  (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
843*4882a593Smuzhiyun 				  machine->id_hdr_size);
844*4882a593Smuzhiyun 	if (namespaces_event == NULL)
845*4882a593Smuzhiyun 		goto out_free_fork;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	for (i = start; i < start + num; i++) {
848*4882a593Smuzhiyun 		if (!isdigit(dirent[i]->d_name[0]))
849*4882a593Smuzhiyun 			continue;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
852*4882a593Smuzhiyun 		/* only interested in proper numerical dirents */
853*4882a593Smuzhiyun 		if (*end)
854*4882a593Smuzhiyun 			continue;
855*4882a593Smuzhiyun 		/*
856*4882a593Smuzhiyun 		 * We may race with exiting thread, so don't stop just because
857*4882a593Smuzhiyun 		 * one thread couldn't be synthesized.
858*4882a593Smuzhiyun 		 */
859*4882a593Smuzhiyun 		__event__synthesize_thread(comm_event, mmap_event, fork_event,
860*4882a593Smuzhiyun 					   namespaces_event, pid, 1, process,
861*4882a593Smuzhiyun 					   tool, machine, mmap_data);
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	err = 0;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	free(namespaces_event);
866*4882a593Smuzhiyun out_free_fork:
867*4882a593Smuzhiyun 	free(fork_event);
868*4882a593Smuzhiyun out_free_mmap:
869*4882a593Smuzhiyun 	free(mmap_event);
870*4882a593Smuzhiyun out_free_comm:
871*4882a593Smuzhiyun 	free(comm_event);
872*4882a593Smuzhiyun out:
873*4882a593Smuzhiyun 	return err;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun struct synthesize_threads_arg {
877*4882a593Smuzhiyun 	struct perf_tool *tool;
878*4882a593Smuzhiyun 	perf_event__handler_t process;
879*4882a593Smuzhiyun 	struct machine *machine;
880*4882a593Smuzhiyun 	bool mmap_data;
881*4882a593Smuzhiyun 	struct dirent **dirent;
882*4882a593Smuzhiyun 	int num;
883*4882a593Smuzhiyun 	int start;
884*4882a593Smuzhiyun };
885*4882a593Smuzhiyun 
synthesize_threads_worker(void * arg)886*4882a593Smuzhiyun static void *synthesize_threads_worker(void *arg)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	struct synthesize_threads_arg *args = arg;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	__perf_event__synthesize_threads(args->tool, args->process,
891*4882a593Smuzhiyun 					 args->machine, args->mmap_data,
892*4882a593Smuzhiyun 					 args->dirent,
893*4882a593Smuzhiyun 					 args->start, args->num);
894*4882a593Smuzhiyun 	return NULL;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
perf_event__synthesize_threads(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine,bool mmap_data,unsigned int nr_threads_synthesize)897*4882a593Smuzhiyun int perf_event__synthesize_threads(struct perf_tool *tool,
898*4882a593Smuzhiyun 				   perf_event__handler_t process,
899*4882a593Smuzhiyun 				   struct machine *machine,
900*4882a593Smuzhiyun 				   bool mmap_data,
901*4882a593Smuzhiyun 				   unsigned int nr_threads_synthesize)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	struct synthesize_threads_arg *args = NULL;
904*4882a593Smuzhiyun 	pthread_t *synthesize_threads = NULL;
905*4882a593Smuzhiyun 	char proc_path[PATH_MAX];
906*4882a593Smuzhiyun 	struct dirent **dirent;
907*4882a593Smuzhiyun 	int num_per_thread;
908*4882a593Smuzhiyun 	int m, n, i, j;
909*4882a593Smuzhiyun 	int thread_nr;
910*4882a593Smuzhiyun 	int base = 0;
911*4882a593Smuzhiyun 	int err = -1;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	if (machine__is_default_guest(machine))
915*4882a593Smuzhiyun 		return 0;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
918*4882a593Smuzhiyun 	n = scandir(proc_path, &dirent, 0, alphasort);
919*4882a593Smuzhiyun 	if (n < 0)
920*4882a593Smuzhiyun 		return err;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	if (nr_threads_synthesize == UINT_MAX)
923*4882a593Smuzhiyun 		thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
924*4882a593Smuzhiyun 	else
925*4882a593Smuzhiyun 		thread_nr = nr_threads_synthesize;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	if (thread_nr <= 1) {
928*4882a593Smuzhiyun 		err = __perf_event__synthesize_threads(tool, process,
929*4882a593Smuzhiyun 						       machine, mmap_data,
930*4882a593Smuzhiyun 						       dirent, base, n);
931*4882a593Smuzhiyun 		goto free_dirent;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 	if (thread_nr > n)
934*4882a593Smuzhiyun 		thread_nr = n;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
937*4882a593Smuzhiyun 	if (synthesize_threads == NULL)
938*4882a593Smuzhiyun 		goto free_dirent;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	args = calloc(sizeof(*args), thread_nr);
941*4882a593Smuzhiyun 	if (args == NULL)
942*4882a593Smuzhiyun 		goto free_threads;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	num_per_thread = n / thread_nr;
945*4882a593Smuzhiyun 	m = n % thread_nr;
946*4882a593Smuzhiyun 	for (i = 0; i < thread_nr; i++) {
947*4882a593Smuzhiyun 		args[i].tool = tool;
948*4882a593Smuzhiyun 		args[i].process = process;
949*4882a593Smuzhiyun 		args[i].machine = machine;
950*4882a593Smuzhiyun 		args[i].mmap_data = mmap_data;
951*4882a593Smuzhiyun 		args[i].dirent = dirent;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 	for (i = 0; i < m; i++) {
954*4882a593Smuzhiyun 		args[i].num = num_per_thread + 1;
955*4882a593Smuzhiyun 		args[i].start = i * args[i].num;
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 	if (i != 0)
958*4882a593Smuzhiyun 		base = args[i-1].start + args[i-1].num;
959*4882a593Smuzhiyun 	for (j = i; j < thread_nr; j++) {
960*4882a593Smuzhiyun 		args[j].num = num_per_thread;
961*4882a593Smuzhiyun 		args[j].start = base + (j - i) * args[i].num;
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	for (i = 0; i < thread_nr; i++) {
965*4882a593Smuzhiyun 		if (pthread_create(&synthesize_threads[i], NULL,
966*4882a593Smuzhiyun 				   synthesize_threads_worker, &args[i]))
967*4882a593Smuzhiyun 			goto out_join;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 	err = 0;
970*4882a593Smuzhiyun out_join:
971*4882a593Smuzhiyun 	for (i = 0; i < thread_nr; i++)
972*4882a593Smuzhiyun 		pthread_join(synthesize_threads[i], NULL);
973*4882a593Smuzhiyun 	free(args);
974*4882a593Smuzhiyun free_threads:
975*4882a593Smuzhiyun 	free(synthesize_threads);
976*4882a593Smuzhiyun free_dirent:
977*4882a593Smuzhiyun 	for (i = 0; i < n; i++)
978*4882a593Smuzhiyun 		zfree(&dirent[i]);
979*4882a593Smuzhiyun 	free(dirent);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	return err;
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun 
perf_event__synthesize_extra_kmaps(struct perf_tool * tool __maybe_unused,perf_event__handler_t process __maybe_unused,struct machine * machine __maybe_unused)984*4882a593Smuzhiyun int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
985*4882a593Smuzhiyun 					      perf_event__handler_t process __maybe_unused,
986*4882a593Smuzhiyun 					      struct machine *machine __maybe_unused)
987*4882a593Smuzhiyun {
988*4882a593Smuzhiyun 	return 0;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
__perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)991*4882a593Smuzhiyun static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
992*4882a593Smuzhiyun 						perf_event__handler_t process,
993*4882a593Smuzhiyun 						struct machine *machine)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	size_t size;
996*4882a593Smuzhiyun 	struct map *map = machine__kernel_map(machine);
997*4882a593Smuzhiyun 	struct kmap *kmap;
998*4882a593Smuzhiyun 	int err;
999*4882a593Smuzhiyun 	union perf_event *event;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	if (map == NULL)
1002*4882a593Smuzhiyun 		return -1;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	kmap = map__kmap(map);
1005*4882a593Smuzhiyun 	if (!kmap->ref_reloc_sym)
1006*4882a593Smuzhiyun 		return -1;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	/*
1009*4882a593Smuzhiyun 	 * We should get this from /sys/kernel/sections/.text, but till that is
1010*4882a593Smuzhiyun 	 * available use this, and after it is use this as a fallback for older
1011*4882a593Smuzhiyun 	 * kernels.
1012*4882a593Smuzhiyun 	 */
1013*4882a593Smuzhiyun 	event = zalloc((sizeof(event->mmap) + machine->id_hdr_size));
1014*4882a593Smuzhiyun 	if (event == NULL) {
1015*4882a593Smuzhiyun 		pr_debug("Not enough memory synthesizing mmap event "
1016*4882a593Smuzhiyun 			 "for kernel modules\n");
1017*4882a593Smuzhiyun 		return -1;
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	if (machine__is_host(machine)) {
1021*4882a593Smuzhiyun 		/*
1022*4882a593Smuzhiyun 		 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1023*4882a593Smuzhiyun 		 * see kernel/perf_event.c __perf_event_mmap
1024*4882a593Smuzhiyun 		 */
1025*4882a593Smuzhiyun 		event->header.misc = PERF_RECORD_MISC_KERNEL;
1026*4882a593Smuzhiyun 	} else {
1027*4882a593Smuzhiyun 		event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1031*4882a593Smuzhiyun 			"%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1032*4882a593Smuzhiyun 	size = PERF_ALIGN(size, sizeof(u64));
1033*4882a593Smuzhiyun 	event->mmap.header.type = PERF_RECORD_MMAP;
1034*4882a593Smuzhiyun 	event->mmap.header.size = (sizeof(event->mmap) -
1035*4882a593Smuzhiyun 			(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1036*4882a593Smuzhiyun 	event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1037*4882a593Smuzhiyun 	event->mmap.start = map->start;
1038*4882a593Smuzhiyun 	event->mmap.len   = map->end - event->mmap.start;
1039*4882a593Smuzhiyun 	event->mmap.pid   = machine->pid;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	err = perf_tool__process_synth_event(tool, event, machine, process);
1042*4882a593Smuzhiyun 	free(event);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	return err;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
perf_event__synthesize_kernel_mmap(struct perf_tool * tool,perf_event__handler_t process,struct machine * machine)1047*4882a593Smuzhiyun int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1048*4882a593Smuzhiyun 				       perf_event__handler_t process,
1049*4882a593Smuzhiyun 				       struct machine *machine)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	int err;
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1054*4882a593Smuzhiyun 	if (err < 0)
1055*4882a593Smuzhiyun 		return err;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	return perf_event__synthesize_extra_kmaps(tool, process, machine);
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
perf_event__synthesize_thread_map2(struct perf_tool * tool,struct perf_thread_map * threads,perf_event__handler_t process,struct machine * machine)1060*4882a593Smuzhiyun int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1061*4882a593Smuzhiyun 				      struct perf_thread_map *threads,
1062*4882a593Smuzhiyun 				      perf_event__handler_t process,
1063*4882a593Smuzhiyun 				      struct machine *machine)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	union perf_event *event;
1066*4882a593Smuzhiyun 	int i, err, size;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	size  = sizeof(event->thread_map);
1069*4882a593Smuzhiyun 	size +=	threads->nr * sizeof(event->thread_map.entries[0]);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	event = zalloc(size);
1072*4882a593Smuzhiyun 	if (!event)
1073*4882a593Smuzhiyun 		return -ENOMEM;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	event->header.type = PERF_RECORD_THREAD_MAP;
1076*4882a593Smuzhiyun 	event->header.size = size;
1077*4882a593Smuzhiyun 	event->thread_map.nr = threads->nr;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	for (i = 0; i < threads->nr; i++) {
1080*4882a593Smuzhiyun 		struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1081*4882a593Smuzhiyun 		char *comm = perf_thread_map__comm(threads, i);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		if (!comm)
1084*4882a593Smuzhiyun 			comm = (char *) "";
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 		entry->pid = perf_thread_map__pid(threads, i);
1087*4882a593Smuzhiyun 		strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1088*4882a593Smuzhiyun 	}
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	err = process(tool, event, NULL, machine);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	free(event);
1093*4882a593Smuzhiyun 	return err;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun 
synthesize_cpus(struct cpu_map_entries * cpus,struct perf_cpu_map * map)1096*4882a593Smuzhiyun static void synthesize_cpus(struct cpu_map_entries *cpus,
1097*4882a593Smuzhiyun 			    struct perf_cpu_map *map)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun 	int i;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	cpus->nr = map->nr;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	for (i = 0; i < map->nr; i++)
1104*4882a593Smuzhiyun 		cpus->cpu[i] = map->map[i];
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
synthesize_mask(struct perf_record_record_cpu_map * mask,struct perf_cpu_map * map,int max)1107*4882a593Smuzhiyun static void synthesize_mask(struct perf_record_record_cpu_map *mask,
1108*4882a593Smuzhiyun 			    struct perf_cpu_map *map, int max)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	int i;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	mask->nr = BITS_TO_LONGS(max);
1113*4882a593Smuzhiyun 	mask->long_size = sizeof(long);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	for (i = 0; i < map->nr; i++)
1116*4882a593Smuzhiyun 		set_bit(map->map[i], mask->mask);
1117*4882a593Smuzhiyun }
1118*4882a593Smuzhiyun 
cpus_size(struct perf_cpu_map * map)1119*4882a593Smuzhiyun static size_t cpus_size(struct perf_cpu_map *map)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
mask_size(struct perf_cpu_map * map,int * max)1124*4882a593Smuzhiyun static size_t mask_size(struct perf_cpu_map *map, int *max)
1125*4882a593Smuzhiyun {
1126*4882a593Smuzhiyun 	int i;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	*max = 0;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	for (i = 0; i < map->nr; i++) {
1131*4882a593Smuzhiyun 		/* bit possition of the cpu is + 1 */
1132*4882a593Smuzhiyun 		int bit = map->map[i] + 1;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 		if (bit > *max)
1135*4882a593Smuzhiyun 			*max = bit;
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	return sizeof(struct perf_record_record_cpu_map) + BITS_TO_LONGS(*max) * sizeof(long);
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
cpu_map_data__alloc(struct perf_cpu_map * map,size_t * size,u16 * type,int * max)1141*4882a593Smuzhiyun void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	size_t size_cpus, size_mask;
1144*4882a593Smuzhiyun 	bool is_dummy = perf_cpu_map__empty(map);
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	/*
1147*4882a593Smuzhiyun 	 * Both array and mask data have variable size based
1148*4882a593Smuzhiyun 	 * on the number of cpus and their actual values.
1149*4882a593Smuzhiyun 	 * The size of the 'struct perf_record_cpu_map_data' is:
1150*4882a593Smuzhiyun 	 *
1151*4882a593Smuzhiyun 	 *   array = size of 'struct cpu_map_entries' +
1152*4882a593Smuzhiyun 	 *           number of cpus * sizeof(u64)
1153*4882a593Smuzhiyun 	 *
1154*4882a593Smuzhiyun 	 *   mask  = size of 'struct perf_record_record_cpu_map' +
1155*4882a593Smuzhiyun 	 *           maximum cpu bit converted to size of longs
1156*4882a593Smuzhiyun 	 *
1157*4882a593Smuzhiyun 	 * and finaly + the size of 'struct perf_record_cpu_map_data'.
1158*4882a593Smuzhiyun 	 */
1159*4882a593Smuzhiyun 	size_cpus = cpus_size(map);
1160*4882a593Smuzhiyun 	size_mask = mask_size(map, max);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	if (is_dummy || (size_cpus < size_mask)) {
1163*4882a593Smuzhiyun 		*size += size_cpus;
1164*4882a593Smuzhiyun 		*type  = PERF_CPU_MAP__CPUS;
1165*4882a593Smuzhiyun 	} else {
1166*4882a593Smuzhiyun 		*size += size_mask;
1167*4882a593Smuzhiyun 		*type  = PERF_CPU_MAP__MASK;
1168*4882a593Smuzhiyun 	}
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	*size += sizeof(struct perf_record_cpu_map_data);
1171*4882a593Smuzhiyun 	*size = PERF_ALIGN(*size, sizeof(u64));
1172*4882a593Smuzhiyun 	return zalloc(*size);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun 
cpu_map_data__synthesize(struct perf_record_cpu_map_data * data,struct perf_cpu_map * map,u16 type,int max)1175*4882a593Smuzhiyun void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map,
1176*4882a593Smuzhiyun 			      u16 type, int max)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	data->type = type;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	switch (type) {
1181*4882a593Smuzhiyun 	case PERF_CPU_MAP__CPUS:
1182*4882a593Smuzhiyun 		synthesize_cpus((struct cpu_map_entries *) data->data, map);
1183*4882a593Smuzhiyun 		break;
1184*4882a593Smuzhiyun 	case PERF_CPU_MAP__MASK:
1185*4882a593Smuzhiyun 		synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max);
1186*4882a593Smuzhiyun 	default:
1187*4882a593Smuzhiyun 		break;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
cpu_map_event__new(struct perf_cpu_map * map)1191*4882a593Smuzhiyun static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun 	size_t size = sizeof(struct perf_record_cpu_map);
1194*4882a593Smuzhiyun 	struct perf_record_cpu_map *event;
1195*4882a593Smuzhiyun 	int max;
1196*4882a593Smuzhiyun 	u16 type;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	event = cpu_map_data__alloc(map, &size, &type, &max);
1199*4882a593Smuzhiyun 	if (!event)
1200*4882a593Smuzhiyun 		return NULL;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	event->header.type = PERF_RECORD_CPU_MAP;
1203*4882a593Smuzhiyun 	event->header.size = size;
1204*4882a593Smuzhiyun 	event->data.type   = type;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	cpu_map_data__synthesize(&event->data, map, type, max);
1207*4882a593Smuzhiyun 	return event;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun 
perf_event__synthesize_cpu_map(struct perf_tool * tool,struct perf_cpu_map * map,perf_event__handler_t process,struct machine * machine)1210*4882a593Smuzhiyun int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1211*4882a593Smuzhiyun 				   struct perf_cpu_map *map,
1212*4882a593Smuzhiyun 				   perf_event__handler_t process,
1213*4882a593Smuzhiyun 				   struct machine *machine)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	struct perf_record_cpu_map *event;
1216*4882a593Smuzhiyun 	int err;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	event = cpu_map_event__new(map);
1219*4882a593Smuzhiyun 	if (!event)
1220*4882a593Smuzhiyun 		return -ENOMEM;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	err = process(tool, (union perf_event *) event, NULL, machine);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	free(event);
1225*4882a593Smuzhiyun 	return err;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
perf_event__synthesize_stat_config(struct perf_tool * tool,struct perf_stat_config * config,perf_event__handler_t process,struct machine * machine)1228*4882a593Smuzhiyun int perf_event__synthesize_stat_config(struct perf_tool *tool,
1229*4882a593Smuzhiyun 				       struct perf_stat_config *config,
1230*4882a593Smuzhiyun 				       perf_event__handler_t process,
1231*4882a593Smuzhiyun 				       struct machine *machine)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun 	struct perf_record_stat_config *event;
1234*4882a593Smuzhiyun 	int size, i = 0, err;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	size  = sizeof(*event);
1237*4882a593Smuzhiyun 	size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	event = zalloc(size);
1240*4882a593Smuzhiyun 	if (!event)
1241*4882a593Smuzhiyun 		return -ENOMEM;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	event->header.type = PERF_RECORD_STAT_CONFIG;
1244*4882a593Smuzhiyun 	event->header.size = size;
1245*4882a593Smuzhiyun 	event->nr          = PERF_STAT_CONFIG_TERM__MAX;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun #define ADD(__term, __val)					\
1248*4882a593Smuzhiyun 	event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term;	\
1249*4882a593Smuzhiyun 	event->data[i].val = __val;				\
1250*4882a593Smuzhiyun 	i++;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	ADD(AGGR_MODE,	config->aggr_mode)
1253*4882a593Smuzhiyun 	ADD(INTERVAL,	config->interval)
1254*4882a593Smuzhiyun 	ADD(SCALE,	config->scale)
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1257*4882a593Smuzhiyun 		  "stat config terms unbalanced\n");
1258*4882a593Smuzhiyun #undef ADD
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	err = process(tool, (union perf_event *) event, NULL, machine);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	free(event);
1263*4882a593Smuzhiyun 	return err;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
perf_event__synthesize_stat(struct perf_tool * tool,u32 cpu,u32 thread,u64 id,struct perf_counts_values * count,perf_event__handler_t process,struct machine * machine)1266*4882a593Smuzhiyun int perf_event__synthesize_stat(struct perf_tool *tool,
1267*4882a593Smuzhiyun 				u32 cpu, u32 thread, u64 id,
1268*4882a593Smuzhiyun 				struct perf_counts_values *count,
1269*4882a593Smuzhiyun 				perf_event__handler_t process,
1270*4882a593Smuzhiyun 				struct machine *machine)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	struct perf_record_stat event;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	event.header.type = PERF_RECORD_STAT;
1275*4882a593Smuzhiyun 	event.header.size = sizeof(event);
1276*4882a593Smuzhiyun 	event.header.misc = 0;
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	event.id        = id;
1279*4882a593Smuzhiyun 	event.cpu       = cpu;
1280*4882a593Smuzhiyun 	event.thread    = thread;
1281*4882a593Smuzhiyun 	event.val       = count->val;
1282*4882a593Smuzhiyun 	event.ena       = count->ena;
1283*4882a593Smuzhiyun 	event.run       = count->run;
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	return process(tool, (union perf_event *) &event, NULL, machine);
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
perf_event__synthesize_stat_round(struct perf_tool * tool,u64 evtime,u64 type,perf_event__handler_t process,struct machine * machine)1288*4882a593Smuzhiyun int perf_event__synthesize_stat_round(struct perf_tool *tool,
1289*4882a593Smuzhiyun 				      u64 evtime, u64 type,
1290*4882a593Smuzhiyun 				      perf_event__handler_t process,
1291*4882a593Smuzhiyun 				      struct machine *machine)
1292*4882a593Smuzhiyun {
1293*4882a593Smuzhiyun 	struct perf_record_stat_round event;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	event.header.type = PERF_RECORD_STAT_ROUND;
1296*4882a593Smuzhiyun 	event.header.size = sizeof(event);
1297*4882a593Smuzhiyun 	event.header.misc = 0;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	event.time = evtime;
1300*4882a593Smuzhiyun 	event.type = type;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	return process(tool, (union perf_event *) &event, NULL, machine);
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun 
perf_event__sample_event_size(const struct perf_sample * sample,u64 type,u64 read_format)1305*4882a593Smuzhiyun size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1306*4882a593Smuzhiyun {
1307*4882a593Smuzhiyun 	size_t sz, result = sizeof(struct perf_record_sample);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_IDENTIFIER)
1310*4882a593Smuzhiyun 		result += sizeof(u64);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_IP)
1313*4882a593Smuzhiyun 		result += sizeof(u64);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TID)
1316*4882a593Smuzhiyun 		result += sizeof(u64);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TIME)
1319*4882a593Smuzhiyun 		result += sizeof(u64);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_ADDR)
1322*4882a593Smuzhiyun 		result += sizeof(u64);
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_ID)
1325*4882a593Smuzhiyun 		result += sizeof(u64);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_STREAM_ID)
1328*4882a593Smuzhiyun 		result += sizeof(u64);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CPU)
1331*4882a593Smuzhiyun 		result += sizeof(u64);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_PERIOD)
1334*4882a593Smuzhiyun 		result += sizeof(u64);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_READ) {
1337*4882a593Smuzhiyun 		result += sizeof(u64);
1338*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1339*4882a593Smuzhiyun 			result += sizeof(u64);
1340*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1341*4882a593Smuzhiyun 			result += sizeof(u64);
1342*4882a593Smuzhiyun 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1343*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_GROUP) {
1344*4882a593Smuzhiyun 			sz = sample->read.group.nr *
1345*4882a593Smuzhiyun 			     sizeof(struct sample_read_value);
1346*4882a593Smuzhiyun 			result += sz;
1347*4882a593Smuzhiyun 		} else {
1348*4882a593Smuzhiyun 			result += sizeof(u64);
1349*4882a593Smuzhiyun 		}
1350*4882a593Smuzhiyun 	}
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CALLCHAIN) {
1353*4882a593Smuzhiyun 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1354*4882a593Smuzhiyun 		result += sz;
1355*4882a593Smuzhiyun 	}
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_RAW) {
1358*4882a593Smuzhiyun 		result += sizeof(u32);
1359*4882a593Smuzhiyun 		result += sample->raw_size;
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1363*4882a593Smuzhiyun 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1364*4882a593Smuzhiyun 		/* nr, hw_idx */
1365*4882a593Smuzhiyun 		sz += 2 * sizeof(u64);
1366*4882a593Smuzhiyun 		result += sz;
1367*4882a593Smuzhiyun 	}
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_REGS_USER) {
1370*4882a593Smuzhiyun 		if (sample->user_regs.abi) {
1371*4882a593Smuzhiyun 			result += sizeof(u64);
1372*4882a593Smuzhiyun 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1373*4882a593Smuzhiyun 			result += sz;
1374*4882a593Smuzhiyun 		} else {
1375*4882a593Smuzhiyun 			result += sizeof(u64);
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_STACK_USER) {
1380*4882a593Smuzhiyun 		sz = sample->user_stack.size;
1381*4882a593Smuzhiyun 		result += sizeof(u64);
1382*4882a593Smuzhiyun 		if (sz) {
1383*4882a593Smuzhiyun 			result += sz;
1384*4882a593Smuzhiyun 			result += sizeof(u64);
1385*4882a593Smuzhiyun 		}
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_WEIGHT)
1389*4882a593Smuzhiyun 		result += sizeof(u64);
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_DATA_SRC)
1392*4882a593Smuzhiyun 		result += sizeof(u64);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TRANSACTION)
1395*4882a593Smuzhiyun 		result += sizeof(u64);
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_REGS_INTR) {
1398*4882a593Smuzhiyun 		if (sample->intr_regs.abi) {
1399*4882a593Smuzhiyun 			result += sizeof(u64);
1400*4882a593Smuzhiyun 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1401*4882a593Smuzhiyun 			result += sz;
1402*4882a593Smuzhiyun 		} else {
1403*4882a593Smuzhiyun 			result += sizeof(u64);
1404*4882a593Smuzhiyun 		}
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_PHYS_ADDR)
1408*4882a593Smuzhiyun 		result += sizeof(u64);
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CGROUP)
1411*4882a593Smuzhiyun 		result += sizeof(u64);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_AUX) {
1414*4882a593Smuzhiyun 		result += sizeof(u64);
1415*4882a593Smuzhiyun 		result += sample->aux_sample.size;
1416*4882a593Smuzhiyun 	}
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	return result;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun 
perf_event__synthesize_sample(union perf_event * event,u64 type,u64 read_format,const struct perf_sample * sample)1421*4882a593Smuzhiyun int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1422*4882a593Smuzhiyun 				  const struct perf_sample *sample)
1423*4882a593Smuzhiyun {
1424*4882a593Smuzhiyun 	__u64 *array;
1425*4882a593Smuzhiyun 	size_t sz;
1426*4882a593Smuzhiyun 	/*
1427*4882a593Smuzhiyun 	 * used for cross-endian analysis. See git commit 65014ab3
1428*4882a593Smuzhiyun 	 * for why this goofiness is needed.
1429*4882a593Smuzhiyun 	 */
1430*4882a593Smuzhiyun 	union u64_swap u;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	array = event->sample.array;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_IDENTIFIER) {
1435*4882a593Smuzhiyun 		*array = sample->id;
1436*4882a593Smuzhiyun 		array++;
1437*4882a593Smuzhiyun 	}
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_IP) {
1440*4882a593Smuzhiyun 		*array = sample->ip;
1441*4882a593Smuzhiyun 		array++;
1442*4882a593Smuzhiyun 	}
1443*4882a593Smuzhiyun 
1444*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TID) {
1445*4882a593Smuzhiyun 		u.val32[0] = sample->pid;
1446*4882a593Smuzhiyun 		u.val32[1] = sample->tid;
1447*4882a593Smuzhiyun 		*array = u.val64;
1448*4882a593Smuzhiyun 		array++;
1449*4882a593Smuzhiyun 	}
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TIME) {
1452*4882a593Smuzhiyun 		*array = sample->time;
1453*4882a593Smuzhiyun 		array++;
1454*4882a593Smuzhiyun 	}
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_ADDR) {
1457*4882a593Smuzhiyun 		*array = sample->addr;
1458*4882a593Smuzhiyun 		array++;
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_ID) {
1462*4882a593Smuzhiyun 		*array = sample->id;
1463*4882a593Smuzhiyun 		array++;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_STREAM_ID) {
1467*4882a593Smuzhiyun 		*array = sample->stream_id;
1468*4882a593Smuzhiyun 		array++;
1469*4882a593Smuzhiyun 	}
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CPU) {
1472*4882a593Smuzhiyun 		u.val32[0] = sample->cpu;
1473*4882a593Smuzhiyun 		u.val32[1] = 0;
1474*4882a593Smuzhiyun 		*array = u.val64;
1475*4882a593Smuzhiyun 		array++;
1476*4882a593Smuzhiyun 	}
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_PERIOD) {
1479*4882a593Smuzhiyun 		*array = sample->period;
1480*4882a593Smuzhiyun 		array++;
1481*4882a593Smuzhiyun 	}
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_READ) {
1484*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_GROUP)
1485*4882a593Smuzhiyun 			*array = sample->read.group.nr;
1486*4882a593Smuzhiyun 		else
1487*4882a593Smuzhiyun 			*array = sample->read.one.value;
1488*4882a593Smuzhiyun 		array++;
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1491*4882a593Smuzhiyun 			*array = sample->read.time_enabled;
1492*4882a593Smuzhiyun 			array++;
1493*4882a593Smuzhiyun 		}
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1496*4882a593Smuzhiyun 			*array = sample->read.time_running;
1497*4882a593Smuzhiyun 			array++;
1498*4882a593Smuzhiyun 		}
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1501*4882a593Smuzhiyun 		if (read_format & PERF_FORMAT_GROUP) {
1502*4882a593Smuzhiyun 			sz = sample->read.group.nr *
1503*4882a593Smuzhiyun 			     sizeof(struct sample_read_value);
1504*4882a593Smuzhiyun 			memcpy(array, sample->read.group.values, sz);
1505*4882a593Smuzhiyun 			array = (void *)array + sz;
1506*4882a593Smuzhiyun 		} else {
1507*4882a593Smuzhiyun 			*array = sample->read.one.id;
1508*4882a593Smuzhiyun 			array++;
1509*4882a593Smuzhiyun 		}
1510*4882a593Smuzhiyun 	}
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CALLCHAIN) {
1513*4882a593Smuzhiyun 		sz = (sample->callchain->nr + 1) * sizeof(u64);
1514*4882a593Smuzhiyun 		memcpy(array, sample->callchain, sz);
1515*4882a593Smuzhiyun 		array = (void *)array + sz;
1516*4882a593Smuzhiyun 	}
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_RAW) {
1519*4882a593Smuzhiyun 		u.val32[0] = sample->raw_size;
1520*4882a593Smuzhiyun 		*array = u.val64;
1521*4882a593Smuzhiyun 		array = (void *)array + sizeof(u32);
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 		memcpy(array, sample->raw_data, sample->raw_size);
1524*4882a593Smuzhiyun 		array = (void *)array + sample->raw_size;
1525*4882a593Smuzhiyun 	}
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_BRANCH_STACK) {
1528*4882a593Smuzhiyun 		sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1529*4882a593Smuzhiyun 		/* nr, hw_idx */
1530*4882a593Smuzhiyun 		sz += 2 * sizeof(u64);
1531*4882a593Smuzhiyun 		memcpy(array, sample->branch_stack, sz);
1532*4882a593Smuzhiyun 		array = (void *)array + sz;
1533*4882a593Smuzhiyun 	}
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_REGS_USER) {
1536*4882a593Smuzhiyun 		if (sample->user_regs.abi) {
1537*4882a593Smuzhiyun 			*array++ = sample->user_regs.abi;
1538*4882a593Smuzhiyun 			sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1539*4882a593Smuzhiyun 			memcpy(array, sample->user_regs.regs, sz);
1540*4882a593Smuzhiyun 			array = (void *)array + sz;
1541*4882a593Smuzhiyun 		} else {
1542*4882a593Smuzhiyun 			*array++ = 0;
1543*4882a593Smuzhiyun 		}
1544*4882a593Smuzhiyun 	}
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_STACK_USER) {
1547*4882a593Smuzhiyun 		sz = sample->user_stack.size;
1548*4882a593Smuzhiyun 		*array++ = sz;
1549*4882a593Smuzhiyun 		if (sz) {
1550*4882a593Smuzhiyun 			memcpy(array, sample->user_stack.data, sz);
1551*4882a593Smuzhiyun 			array = (void *)array + sz;
1552*4882a593Smuzhiyun 			*array++ = sz;
1553*4882a593Smuzhiyun 		}
1554*4882a593Smuzhiyun 	}
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_WEIGHT) {
1557*4882a593Smuzhiyun 		*array = sample->weight;
1558*4882a593Smuzhiyun 		array++;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_DATA_SRC) {
1562*4882a593Smuzhiyun 		*array = sample->data_src;
1563*4882a593Smuzhiyun 		array++;
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_TRANSACTION) {
1567*4882a593Smuzhiyun 		*array = sample->transaction;
1568*4882a593Smuzhiyun 		array++;
1569*4882a593Smuzhiyun 	}
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_REGS_INTR) {
1572*4882a593Smuzhiyun 		if (sample->intr_regs.abi) {
1573*4882a593Smuzhiyun 			*array++ = sample->intr_regs.abi;
1574*4882a593Smuzhiyun 			sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1575*4882a593Smuzhiyun 			memcpy(array, sample->intr_regs.regs, sz);
1576*4882a593Smuzhiyun 			array = (void *)array + sz;
1577*4882a593Smuzhiyun 		} else {
1578*4882a593Smuzhiyun 			*array++ = 0;
1579*4882a593Smuzhiyun 		}
1580*4882a593Smuzhiyun 	}
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_PHYS_ADDR) {
1583*4882a593Smuzhiyun 		*array = sample->phys_addr;
1584*4882a593Smuzhiyun 		array++;
1585*4882a593Smuzhiyun 	}
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_CGROUP) {
1588*4882a593Smuzhiyun 		*array = sample->cgroup;
1589*4882a593Smuzhiyun 		array++;
1590*4882a593Smuzhiyun 	}
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	if (type & PERF_SAMPLE_AUX) {
1593*4882a593Smuzhiyun 		sz = sample->aux_sample.size;
1594*4882a593Smuzhiyun 		*array++ = sz;
1595*4882a593Smuzhiyun 		memcpy(array, sample->aux_sample.data, sz);
1596*4882a593Smuzhiyun 		array = (void *)array + sz;
1597*4882a593Smuzhiyun 	}
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	return 0;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun 
perf_event__synthesize_id_index(struct perf_tool * tool,perf_event__handler_t process,struct evlist * evlist,struct machine * machine)1602*4882a593Smuzhiyun int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1603*4882a593Smuzhiyun 				    struct evlist *evlist, struct machine *machine)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	union perf_event *ev;
1606*4882a593Smuzhiyun 	struct evsel *evsel;
1607*4882a593Smuzhiyun 	size_t nr = 0, i = 0, sz, max_nr, n;
1608*4882a593Smuzhiyun 	int err;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	pr_debug2("Synthesizing id index\n");
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) /
1613*4882a593Smuzhiyun 		 sizeof(struct id_index_entry);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel)
1616*4882a593Smuzhiyun 		nr += evsel->core.ids;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	n = nr > max_nr ? max_nr : nr;
1619*4882a593Smuzhiyun 	sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry);
1620*4882a593Smuzhiyun 	ev = zalloc(sz);
1621*4882a593Smuzhiyun 	if (!ev)
1622*4882a593Smuzhiyun 		return -ENOMEM;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1625*4882a593Smuzhiyun 	ev->id_index.header.size = sz;
1626*4882a593Smuzhiyun 	ev->id_index.nr = n;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1629*4882a593Smuzhiyun 		u32 j;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 		for (j = 0; j < evsel->core.ids; j++) {
1632*4882a593Smuzhiyun 			struct id_index_entry *e;
1633*4882a593Smuzhiyun 			struct perf_sample_id *sid;
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 			if (i >= n) {
1636*4882a593Smuzhiyun 				err = process(tool, ev, NULL, machine);
1637*4882a593Smuzhiyun 				if (err)
1638*4882a593Smuzhiyun 					goto out_err;
1639*4882a593Smuzhiyun 				nr -= n;
1640*4882a593Smuzhiyun 				i = 0;
1641*4882a593Smuzhiyun 			}
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 			e = &ev->id_index.entries[i++];
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 			e->id = evsel->core.id[j];
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 			sid = perf_evlist__id2sid(evlist, e->id);
1648*4882a593Smuzhiyun 			if (!sid) {
1649*4882a593Smuzhiyun 				free(ev);
1650*4882a593Smuzhiyun 				return -ENOENT;
1651*4882a593Smuzhiyun 			}
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 			e->idx = sid->idx;
1654*4882a593Smuzhiyun 			e->cpu = sid->cpu;
1655*4882a593Smuzhiyun 			e->tid = sid->tid;
1656*4882a593Smuzhiyun 		}
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry);
1660*4882a593Smuzhiyun 	ev->id_index.header.size = sz;
1661*4882a593Smuzhiyun 	ev->id_index.nr = nr;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	err = process(tool, ev, NULL, machine);
1664*4882a593Smuzhiyun out_err:
1665*4882a593Smuzhiyun 	free(ev);
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	return err;
1668*4882a593Smuzhiyun }
1669*4882a593Smuzhiyun 
__machine__synthesize_threads(struct machine * machine,struct perf_tool * tool,struct target * target,struct perf_thread_map * threads,perf_event__handler_t process,bool data_mmap,unsigned int nr_threads_synthesize)1670*4882a593Smuzhiyun int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1671*4882a593Smuzhiyun 				  struct target *target, struct perf_thread_map *threads,
1672*4882a593Smuzhiyun 				  perf_event__handler_t process, bool data_mmap,
1673*4882a593Smuzhiyun 				  unsigned int nr_threads_synthesize)
1674*4882a593Smuzhiyun {
1675*4882a593Smuzhiyun 	if (target__has_task(target))
1676*4882a593Smuzhiyun 		return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
1677*4882a593Smuzhiyun 	else if (target__has_cpu(target))
1678*4882a593Smuzhiyun 		return perf_event__synthesize_threads(tool, process,
1679*4882a593Smuzhiyun 						      machine, data_mmap,
1680*4882a593Smuzhiyun 						      nr_threads_synthesize);
1681*4882a593Smuzhiyun 	/* command specified */
1682*4882a593Smuzhiyun 	return 0;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
machine__synthesize_threads(struct machine * machine,struct target * target,struct perf_thread_map * threads,bool data_mmap,unsigned int nr_threads_synthesize)1685*4882a593Smuzhiyun int machine__synthesize_threads(struct machine *machine, struct target *target,
1686*4882a593Smuzhiyun 				struct perf_thread_map *threads, bool data_mmap,
1687*4882a593Smuzhiyun 				unsigned int nr_threads_synthesize)
1688*4882a593Smuzhiyun {
1689*4882a593Smuzhiyun 	return __machine__synthesize_threads(machine, NULL, target, threads,
1690*4882a593Smuzhiyun 					     perf_event__process, data_mmap,
1691*4882a593Smuzhiyun 					     nr_threads_synthesize);
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun 
event_update_event__new(size_t size,u64 type,u64 id)1694*4882a593Smuzhiyun static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1695*4882a593Smuzhiyun {
1696*4882a593Smuzhiyun 	struct perf_record_event_update *ev;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	size += sizeof(*ev);
1699*4882a593Smuzhiyun 	size  = PERF_ALIGN(size, sizeof(u64));
1700*4882a593Smuzhiyun 
1701*4882a593Smuzhiyun 	ev = zalloc(size);
1702*4882a593Smuzhiyun 	if (ev) {
1703*4882a593Smuzhiyun 		ev->header.type = PERF_RECORD_EVENT_UPDATE;
1704*4882a593Smuzhiyun 		ev->header.size = (u16)size;
1705*4882a593Smuzhiyun 		ev->type	= type;
1706*4882a593Smuzhiyun 		ev->id		= id;
1707*4882a593Smuzhiyun 	}
1708*4882a593Smuzhiyun 	return ev;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun 
perf_event__synthesize_event_update_unit(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1711*4882a593Smuzhiyun int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1712*4882a593Smuzhiyun 					     perf_event__handler_t process)
1713*4882a593Smuzhiyun {
1714*4882a593Smuzhiyun 	size_t size = strlen(evsel->unit);
1715*4882a593Smuzhiyun 	struct perf_record_event_update *ev;
1716*4882a593Smuzhiyun 	int err;
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1719*4882a593Smuzhiyun 	if (ev == NULL)
1720*4882a593Smuzhiyun 		return -ENOMEM;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	strlcpy(ev->data, evsel->unit, size + 1);
1723*4882a593Smuzhiyun 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1724*4882a593Smuzhiyun 	free(ev);
1725*4882a593Smuzhiyun 	return err;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun 
perf_event__synthesize_event_update_scale(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1728*4882a593Smuzhiyun int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1729*4882a593Smuzhiyun 					      perf_event__handler_t process)
1730*4882a593Smuzhiyun {
1731*4882a593Smuzhiyun 	struct perf_record_event_update *ev;
1732*4882a593Smuzhiyun 	struct perf_record_event_update_scale *ev_data;
1733*4882a593Smuzhiyun 	int err;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
1736*4882a593Smuzhiyun 	if (ev == NULL)
1737*4882a593Smuzhiyun 		return -ENOMEM;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	ev_data = (struct perf_record_event_update_scale *)ev->data;
1740*4882a593Smuzhiyun 	ev_data->scale = evsel->scale;
1741*4882a593Smuzhiyun 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1742*4882a593Smuzhiyun 	free(ev);
1743*4882a593Smuzhiyun 	return err;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun 
perf_event__synthesize_event_update_name(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1746*4882a593Smuzhiyun int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
1747*4882a593Smuzhiyun 					     perf_event__handler_t process)
1748*4882a593Smuzhiyun {
1749*4882a593Smuzhiyun 	struct perf_record_event_update *ev;
1750*4882a593Smuzhiyun 	size_t len = strlen(evsel->name);
1751*4882a593Smuzhiyun 	int err;
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
1754*4882a593Smuzhiyun 	if (ev == NULL)
1755*4882a593Smuzhiyun 		return -ENOMEM;
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	strlcpy(ev->data, evsel->name, len + 1);
1758*4882a593Smuzhiyun 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1759*4882a593Smuzhiyun 	free(ev);
1760*4882a593Smuzhiyun 	return err;
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun 
perf_event__synthesize_event_update_cpus(struct perf_tool * tool,struct evsel * evsel,perf_event__handler_t process)1763*4882a593Smuzhiyun int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
1764*4882a593Smuzhiyun 					     perf_event__handler_t process)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun 	size_t size = sizeof(struct perf_record_event_update);
1767*4882a593Smuzhiyun 	struct perf_record_event_update *ev;
1768*4882a593Smuzhiyun 	int max, err;
1769*4882a593Smuzhiyun 	u16 type;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	if (!evsel->core.own_cpus)
1772*4882a593Smuzhiyun 		return 0;
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun 	ev = cpu_map_data__alloc(evsel->core.own_cpus, &size, &type, &max);
1775*4882a593Smuzhiyun 	if (!ev)
1776*4882a593Smuzhiyun 		return -ENOMEM;
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	ev->header.type = PERF_RECORD_EVENT_UPDATE;
1779*4882a593Smuzhiyun 	ev->header.size = (u16)size;
1780*4882a593Smuzhiyun 	ev->type	= PERF_EVENT_UPDATE__CPUS;
1781*4882a593Smuzhiyun 	ev->id		= evsel->core.id[0];
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 	cpu_map_data__synthesize((struct perf_record_cpu_map_data *)ev->data,
1784*4882a593Smuzhiyun 				 evsel->core.own_cpus, type, max);
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	err = process(tool, (union perf_event *)ev, NULL, NULL);
1787*4882a593Smuzhiyun 	free(ev);
1788*4882a593Smuzhiyun 	return err;
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun 
perf_event__synthesize_attrs(struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process)1791*4882a593Smuzhiyun int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
1792*4882a593Smuzhiyun 				 perf_event__handler_t process)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun 	struct evsel *evsel;
1795*4882a593Smuzhiyun 	int err = 0;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	evlist__for_each_entry(evlist, evsel) {
1798*4882a593Smuzhiyun 		err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
1799*4882a593Smuzhiyun 						  evsel->core.id, process);
1800*4882a593Smuzhiyun 		if (err) {
1801*4882a593Smuzhiyun 			pr_debug("failed to create perf header attribute\n");
1802*4882a593Smuzhiyun 			return err;
1803*4882a593Smuzhiyun 		}
1804*4882a593Smuzhiyun 	}
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	return err;
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun 
has_unit(struct evsel * evsel)1809*4882a593Smuzhiyun static bool has_unit(struct evsel *evsel)
1810*4882a593Smuzhiyun {
1811*4882a593Smuzhiyun 	return evsel->unit && *evsel->unit;
1812*4882a593Smuzhiyun }
1813*4882a593Smuzhiyun 
has_scale(struct evsel * evsel)1814*4882a593Smuzhiyun static bool has_scale(struct evsel *evsel)
1815*4882a593Smuzhiyun {
1816*4882a593Smuzhiyun 	return evsel->scale != 1;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
perf_event__synthesize_extra_attr(struct perf_tool * tool,struct evlist * evsel_list,perf_event__handler_t process,bool is_pipe)1819*4882a593Smuzhiyun int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
1820*4882a593Smuzhiyun 				      perf_event__handler_t process, bool is_pipe)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun 	struct evsel *evsel;
1823*4882a593Smuzhiyun 	int err;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	/*
1826*4882a593Smuzhiyun 	 * Synthesize other events stuff not carried within
1827*4882a593Smuzhiyun 	 * attr event - unit, scale, name
1828*4882a593Smuzhiyun 	 */
1829*4882a593Smuzhiyun 	evlist__for_each_entry(evsel_list, evsel) {
1830*4882a593Smuzhiyun 		if (!evsel->supported)
1831*4882a593Smuzhiyun 			continue;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 		/*
1834*4882a593Smuzhiyun 		 * Synthesize unit and scale only if it's defined.
1835*4882a593Smuzhiyun 		 */
1836*4882a593Smuzhiyun 		if (has_unit(evsel)) {
1837*4882a593Smuzhiyun 			err = perf_event__synthesize_event_update_unit(tool, evsel, process);
1838*4882a593Smuzhiyun 			if (err < 0) {
1839*4882a593Smuzhiyun 				pr_err("Couldn't synthesize evsel unit.\n");
1840*4882a593Smuzhiyun 				return err;
1841*4882a593Smuzhiyun 			}
1842*4882a593Smuzhiyun 		}
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 		if (has_scale(evsel)) {
1845*4882a593Smuzhiyun 			err = perf_event__synthesize_event_update_scale(tool, evsel, process);
1846*4882a593Smuzhiyun 			if (err < 0) {
1847*4882a593Smuzhiyun 				pr_err("Couldn't synthesize evsel evsel.\n");
1848*4882a593Smuzhiyun 				return err;
1849*4882a593Smuzhiyun 			}
1850*4882a593Smuzhiyun 		}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 		if (evsel->core.own_cpus) {
1853*4882a593Smuzhiyun 			err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
1854*4882a593Smuzhiyun 			if (err < 0) {
1855*4882a593Smuzhiyun 				pr_err("Couldn't synthesize evsel cpus.\n");
1856*4882a593Smuzhiyun 				return err;
1857*4882a593Smuzhiyun 			}
1858*4882a593Smuzhiyun 		}
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 		/*
1861*4882a593Smuzhiyun 		 * Name is needed only for pipe output,
1862*4882a593Smuzhiyun 		 * perf.data carries event names.
1863*4882a593Smuzhiyun 		 */
1864*4882a593Smuzhiyun 		if (is_pipe) {
1865*4882a593Smuzhiyun 			err = perf_event__synthesize_event_update_name(tool, evsel, process);
1866*4882a593Smuzhiyun 			if (err < 0) {
1867*4882a593Smuzhiyun 				pr_err("Couldn't synthesize evsel name.\n");
1868*4882a593Smuzhiyun 				return err;
1869*4882a593Smuzhiyun 			}
1870*4882a593Smuzhiyun 		}
1871*4882a593Smuzhiyun 	}
1872*4882a593Smuzhiyun 	return 0;
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun 
perf_event__synthesize_attr(struct perf_tool * tool,struct perf_event_attr * attr,u32 ids,u64 * id,perf_event__handler_t process)1875*4882a593Smuzhiyun int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
1876*4882a593Smuzhiyun 				u32 ids, u64 *id, perf_event__handler_t process)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun 	union perf_event *ev;
1879*4882a593Smuzhiyun 	size_t size;
1880*4882a593Smuzhiyun 	int err;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	size = sizeof(struct perf_event_attr);
1883*4882a593Smuzhiyun 	size = PERF_ALIGN(size, sizeof(u64));
1884*4882a593Smuzhiyun 	size += sizeof(struct perf_event_header);
1885*4882a593Smuzhiyun 	size += ids * sizeof(u64);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	ev = zalloc(size);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 	if (ev == NULL)
1890*4882a593Smuzhiyun 		return -ENOMEM;
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	ev->attr.attr = *attr;
1893*4882a593Smuzhiyun 	memcpy(ev->attr.id, id, ids * sizeof(u64));
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
1896*4882a593Smuzhiyun 	ev->attr.header.size = (u16)size;
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 	if (ev->attr.header.size == size)
1899*4882a593Smuzhiyun 		err = process(tool, ev, NULL, NULL);
1900*4882a593Smuzhiyun 	else
1901*4882a593Smuzhiyun 		err = -E2BIG;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	free(ev);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	return err;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun 
perf_event__synthesize_tracing_data(struct perf_tool * tool,int fd,struct evlist * evlist,perf_event__handler_t process)1908*4882a593Smuzhiyun int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
1909*4882a593Smuzhiyun 					perf_event__handler_t process)
1910*4882a593Smuzhiyun {
1911*4882a593Smuzhiyun 	union perf_event ev;
1912*4882a593Smuzhiyun 	struct tracing_data *tdata;
1913*4882a593Smuzhiyun 	ssize_t size = 0, aligned_size = 0, padding;
1914*4882a593Smuzhiyun 	struct feat_fd ff;
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	/*
1917*4882a593Smuzhiyun 	 * We are going to store the size of the data followed
1918*4882a593Smuzhiyun 	 * by the data contents. Since the fd descriptor is a pipe,
1919*4882a593Smuzhiyun 	 * we cannot seek back to store the size of the data once
1920*4882a593Smuzhiyun 	 * we know it. Instead we:
1921*4882a593Smuzhiyun 	 *
1922*4882a593Smuzhiyun 	 * - write the tracing data to the temp file
1923*4882a593Smuzhiyun 	 * - get/write the data size to pipe
1924*4882a593Smuzhiyun 	 * - write the tracing data from the temp file
1925*4882a593Smuzhiyun 	 *   to the pipe
1926*4882a593Smuzhiyun 	 */
1927*4882a593Smuzhiyun 	tdata = tracing_data_get(&evlist->core.entries, fd, true);
1928*4882a593Smuzhiyun 	if (!tdata)
1929*4882a593Smuzhiyun 		return -1;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	memset(&ev, 0, sizeof(ev));
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1934*4882a593Smuzhiyun 	size = tdata->size;
1935*4882a593Smuzhiyun 	aligned_size = PERF_ALIGN(size, sizeof(u64));
1936*4882a593Smuzhiyun 	padding = aligned_size - size;
1937*4882a593Smuzhiyun 	ev.tracing_data.header.size = sizeof(ev.tracing_data);
1938*4882a593Smuzhiyun 	ev.tracing_data.size = aligned_size;
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun 	process(tool, &ev, NULL, NULL);
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	/*
1943*4882a593Smuzhiyun 	 * The put function will copy all the tracing data
1944*4882a593Smuzhiyun 	 * stored in temp file to the pipe.
1945*4882a593Smuzhiyun 	 */
1946*4882a593Smuzhiyun 	tracing_data_put(tdata);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	ff = (struct feat_fd){ .fd = fd };
1949*4882a593Smuzhiyun 	if (write_padded(&ff, NULL, 0, padding))
1950*4882a593Smuzhiyun 		return -1;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	return aligned_size;
1953*4882a593Smuzhiyun }
1954*4882a593Smuzhiyun 
perf_event__synthesize_build_id(struct perf_tool * tool,struct dso * pos,u16 misc,perf_event__handler_t process,struct machine * machine)1955*4882a593Smuzhiyun int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
1956*4882a593Smuzhiyun 				    perf_event__handler_t process, struct machine *machine)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun 	union perf_event ev;
1959*4882a593Smuzhiyun 	size_t len;
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	if (!pos->hit)
1962*4882a593Smuzhiyun 		return 0;
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	memset(&ev, 0, sizeof(ev));
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	len = pos->long_name_len + 1;
1967*4882a593Smuzhiyun 	len = PERF_ALIGN(len, NAME_ALIGN);
1968*4882a593Smuzhiyun 	memcpy(&ev.build_id.build_id, pos->bid.data, sizeof(pos->bid.data));
1969*4882a593Smuzhiyun 	ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1970*4882a593Smuzhiyun 	ev.build_id.header.misc = misc;
1971*4882a593Smuzhiyun 	ev.build_id.pid = machine->pid;
1972*4882a593Smuzhiyun 	ev.build_id.header.size = sizeof(ev.build_id) + len;
1973*4882a593Smuzhiyun 	memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	return process(tool, &ev, NULL, machine);
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun 
perf_event__synthesize_stat_events(struct perf_stat_config * config,struct perf_tool * tool,struct evlist * evlist,perf_event__handler_t process,bool attrs)1978*4882a593Smuzhiyun int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
1979*4882a593Smuzhiyun 				       struct evlist *evlist, perf_event__handler_t process, bool attrs)
1980*4882a593Smuzhiyun {
1981*4882a593Smuzhiyun 	int err;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	if (attrs) {
1984*4882a593Smuzhiyun 		err = perf_event__synthesize_attrs(tool, evlist, process);
1985*4882a593Smuzhiyun 		if (err < 0) {
1986*4882a593Smuzhiyun 			pr_err("Couldn't synthesize attrs.\n");
1987*4882a593Smuzhiyun 			return err;
1988*4882a593Smuzhiyun 		}
1989*4882a593Smuzhiyun 	}
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
1992*4882a593Smuzhiyun 	err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
1993*4882a593Smuzhiyun 	if (err < 0) {
1994*4882a593Smuzhiyun 		pr_err("Couldn't synthesize thread map.\n");
1995*4882a593Smuzhiyun 		return err;
1996*4882a593Smuzhiyun 	}
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
1999*4882a593Smuzhiyun 	if (err < 0) {
2000*4882a593Smuzhiyun 		pr_err("Couldn't synthesize thread map.\n");
2001*4882a593Smuzhiyun 		return err;
2002*4882a593Smuzhiyun 	}
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 	err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2005*4882a593Smuzhiyun 	if (err < 0) {
2006*4882a593Smuzhiyun 		pr_err("Couldn't synthesize config.\n");
2007*4882a593Smuzhiyun 		return err;
2008*4882a593Smuzhiyun 	}
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	return 0;
2011*4882a593Smuzhiyun }
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2014*4882a593Smuzhiyun 
perf_event__synthesize_features(struct perf_tool * tool,struct perf_session * session,struct evlist * evlist,perf_event__handler_t process)2015*4882a593Smuzhiyun int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2016*4882a593Smuzhiyun 				    struct evlist *evlist, perf_event__handler_t process)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun 	struct perf_header *header = &session->header;
2019*4882a593Smuzhiyun 	struct perf_record_header_feature *fe;
2020*4882a593Smuzhiyun 	struct feat_fd ff;
2021*4882a593Smuzhiyun 	size_t sz, sz_hdr;
2022*4882a593Smuzhiyun 	int feat, ret;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	sz_hdr = sizeof(fe->header);
2025*4882a593Smuzhiyun 	sz = sizeof(union perf_event);
2026*4882a593Smuzhiyun 	/* get a nice alignment */
2027*4882a593Smuzhiyun 	sz = PERF_ALIGN(sz, page_size);
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	memset(&ff, 0, sizeof(ff));
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 	ff.buf = malloc(sz);
2032*4882a593Smuzhiyun 	if (!ff.buf)
2033*4882a593Smuzhiyun 		return -ENOMEM;
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 	ff.size = sz - sz_hdr;
2036*4882a593Smuzhiyun 	ff.ph = &session->header;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2039*4882a593Smuzhiyun 		if (!feat_ops[feat].synthesize) {
2040*4882a593Smuzhiyun 			pr_debug("No record header feature for header :%d\n", feat);
2041*4882a593Smuzhiyun 			continue;
2042*4882a593Smuzhiyun 		}
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 		ff.offset = sizeof(*fe);
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 		ret = feat_ops[feat].write(&ff, evlist);
2047*4882a593Smuzhiyun 		if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2048*4882a593Smuzhiyun 			pr_debug("Error writing feature\n");
2049*4882a593Smuzhiyun 			continue;
2050*4882a593Smuzhiyun 		}
2051*4882a593Smuzhiyun 		/* ff.buf may have changed due to realloc in do_write() */
2052*4882a593Smuzhiyun 		fe = ff.buf;
2053*4882a593Smuzhiyun 		memset(fe, 0, sizeof(*fe));
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 		fe->feat_id = feat;
2056*4882a593Smuzhiyun 		fe->header.type = PERF_RECORD_HEADER_FEATURE;
2057*4882a593Smuzhiyun 		fe->header.size = ff.offset;
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 		ret = process(tool, ff.buf, NULL, NULL);
2060*4882a593Smuzhiyun 		if (ret) {
2061*4882a593Smuzhiyun 			free(ff.buf);
2062*4882a593Smuzhiyun 			return ret;
2063*4882a593Smuzhiyun 		}
2064*4882a593Smuzhiyun 	}
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 	/* Send HEADER_LAST_FEATURE mark. */
2067*4882a593Smuzhiyun 	fe = ff.buf;
2068*4882a593Smuzhiyun 	fe->feat_id     = HEADER_LAST_FEATURE;
2069*4882a593Smuzhiyun 	fe->header.type = PERF_RECORD_HEADER_FEATURE;
2070*4882a593Smuzhiyun 	fe->header.size = sizeof(*fe);
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	ret = process(tool, ff.buf, NULL, NULL);
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	free(ff.buf);
2075*4882a593Smuzhiyun 	return ret;
2076*4882a593Smuzhiyun }
2077