1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <errno.h>
3*4882a593Smuzhiyun #include <stdlib.h>
4*4882a593Smuzhiyun #include <bpf/bpf.h>
5*4882a593Smuzhiyun #include <bpf/btf.h>
6*4882a593Smuzhiyun #include <bpf/libbpf.h>
7*4882a593Smuzhiyun #include <linux/btf.h>
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/string.h>
10*4882a593Smuzhiyun #include <internal/lib.h>
11*4882a593Smuzhiyun #include <symbol/kallsyms.h>
12*4882a593Smuzhiyun #include "bpf-event.h"
13*4882a593Smuzhiyun #include "debug.h"
14*4882a593Smuzhiyun #include "dso.h"
15*4882a593Smuzhiyun #include "symbol.h"
16*4882a593Smuzhiyun #include "machine.h"
17*4882a593Smuzhiyun #include "env.h"
18*4882a593Smuzhiyun #include "session.h"
19*4882a593Smuzhiyun #include "map.h"
20*4882a593Smuzhiyun #include "evlist.h"
21*4882a593Smuzhiyun #include "record.h"
22*4882a593Smuzhiyun #include "util/synthetic-events.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
25*4882a593Smuzhiyun
snprintf_hex(char * buf,size_t size,unsigned char * data,size_t len)26*4882a593Smuzhiyun static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun int ret = 0;
29*4882a593Smuzhiyun size_t i;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun for (i = 0; i < len; i++)
32*4882a593Smuzhiyun ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
33*4882a593Smuzhiyun return ret;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun
machine__process_bpf_event_load(struct machine * machine,union perf_event * event,struct perf_sample * sample __maybe_unused)36*4882a593Smuzhiyun static int machine__process_bpf_event_load(struct machine *machine,
37*4882a593Smuzhiyun union perf_event *event,
38*4882a593Smuzhiyun struct perf_sample *sample __maybe_unused)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun struct bpf_prog_info_linear *info_linear;
41*4882a593Smuzhiyun struct bpf_prog_info_node *info_node;
42*4882a593Smuzhiyun struct perf_env *env = machine->env;
43*4882a593Smuzhiyun int id = event->bpf.id;
44*4882a593Smuzhiyun unsigned int i;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* perf-record, no need to handle bpf-event */
47*4882a593Smuzhiyun if (env == NULL)
48*4882a593Smuzhiyun return 0;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun info_node = perf_env__find_bpf_prog_info(env, id);
51*4882a593Smuzhiyun if (!info_node)
52*4882a593Smuzhiyun return 0;
53*4882a593Smuzhiyun info_linear = info_node->info_linear;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
56*4882a593Smuzhiyun u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
57*4882a593Smuzhiyun u64 addr = addrs[i];
58*4882a593Smuzhiyun struct map *map = maps__find(&machine->kmaps, addr);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (map) {
61*4882a593Smuzhiyun map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
62*4882a593Smuzhiyun map->dso->bpf_prog.id = id;
63*4882a593Smuzhiyun map->dso->bpf_prog.sub_id = i;
64*4882a593Smuzhiyun map->dso->bpf_prog.env = env;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
machine__process_bpf(struct machine * machine,union perf_event * event,struct perf_sample * sample)70*4882a593Smuzhiyun int machine__process_bpf(struct machine *machine, union perf_event *event,
71*4882a593Smuzhiyun struct perf_sample *sample)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun if (dump_trace)
74*4882a593Smuzhiyun perf_event__fprintf_bpf(event, stdout);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun switch (event->bpf.type) {
77*4882a593Smuzhiyun case PERF_BPF_EVENT_PROG_LOAD:
78*4882a593Smuzhiyun return machine__process_bpf_event_load(machine, event, sample);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun case PERF_BPF_EVENT_PROG_UNLOAD:
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Do not free bpf_prog_info and btf of the program here,
83*4882a593Smuzhiyun * as annotation still need them. They will be freed at
84*4882a593Smuzhiyun * the end of the session.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun break;
87*4882a593Smuzhiyun default:
88*4882a593Smuzhiyun pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
89*4882a593Smuzhiyun break;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
perf_env__fetch_btf(struct perf_env * env,u32 btf_id,struct btf * btf)94*4882a593Smuzhiyun static int perf_env__fetch_btf(struct perf_env *env,
95*4882a593Smuzhiyun u32 btf_id,
96*4882a593Smuzhiyun struct btf *btf)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct btf_node *node;
99*4882a593Smuzhiyun u32 data_size;
100*4882a593Smuzhiyun const void *data;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun data = btf__get_raw_data(btf, &data_size);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun node = malloc(data_size + sizeof(struct btf_node));
105*4882a593Smuzhiyun if (!node)
106*4882a593Smuzhiyun return -1;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun node->id = btf_id;
109*4882a593Smuzhiyun node->data_size = data_size;
110*4882a593Smuzhiyun memcpy(node->data, data, data_size);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (!perf_env__insert_btf(env, node)) {
113*4882a593Smuzhiyun /* Insertion failed because of a duplicate. */
114*4882a593Smuzhiyun free(node);
115*4882a593Smuzhiyun return -1;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
synthesize_bpf_prog_name(char * buf,int size,struct bpf_prog_info * info,struct btf * btf,u32 sub_id)120*4882a593Smuzhiyun static int synthesize_bpf_prog_name(char *buf, int size,
121*4882a593Smuzhiyun struct bpf_prog_info *info,
122*4882a593Smuzhiyun struct btf *btf,
123*4882a593Smuzhiyun u32 sub_id)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
126*4882a593Smuzhiyun void *func_infos = (void *)(uintptr_t)(info->func_info);
127*4882a593Smuzhiyun u32 sub_prog_cnt = info->nr_jited_ksyms;
128*4882a593Smuzhiyun const struct bpf_func_info *finfo;
129*4882a593Smuzhiyun const char *short_name = NULL;
130*4882a593Smuzhiyun const struct btf_type *t;
131*4882a593Smuzhiyun int name_len;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun name_len = snprintf(buf, size, "bpf_prog_");
134*4882a593Smuzhiyun name_len += snprintf_hex(buf + name_len, size - name_len,
135*4882a593Smuzhiyun prog_tags[sub_id], BPF_TAG_SIZE);
136*4882a593Smuzhiyun if (btf) {
137*4882a593Smuzhiyun finfo = func_infos + sub_id * info->func_info_rec_size;
138*4882a593Smuzhiyun t = btf__type_by_id(btf, finfo->type_id);
139*4882a593Smuzhiyun short_name = btf__name_by_offset(btf, t->name_off);
140*4882a593Smuzhiyun } else if (sub_id == 0 && sub_prog_cnt == 1) {
141*4882a593Smuzhiyun /* no subprog */
142*4882a593Smuzhiyun if (info->name[0])
143*4882a593Smuzhiyun short_name = info->name;
144*4882a593Smuzhiyun } else
145*4882a593Smuzhiyun short_name = "F";
146*4882a593Smuzhiyun if (short_name)
147*4882a593Smuzhiyun name_len += snprintf(buf + name_len, size - name_len,
148*4882a593Smuzhiyun "_%s", short_name);
149*4882a593Smuzhiyun return name_len;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
154*4882a593Smuzhiyun * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
155*4882a593Smuzhiyun * one PERF_RECORD_KSYMBOL is generated for each sub program.
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun * Returns:
158*4882a593Smuzhiyun * 0 for success;
159*4882a593Smuzhiyun * -1 for failures;
160*4882a593Smuzhiyun * -2 for lack of kernel support.
161*4882a593Smuzhiyun */
perf_event__synthesize_one_bpf_prog(struct perf_session * session,perf_event__handler_t process,struct machine * machine,int fd,union perf_event * event,struct record_opts * opts)162*4882a593Smuzhiyun static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
163*4882a593Smuzhiyun perf_event__handler_t process,
164*4882a593Smuzhiyun struct machine *machine,
165*4882a593Smuzhiyun int fd,
166*4882a593Smuzhiyun union perf_event *event,
167*4882a593Smuzhiyun struct record_opts *opts)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct perf_record_ksymbol *ksymbol_event = &event->ksymbol;
170*4882a593Smuzhiyun struct perf_record_bpf_event *bpf_event = &event->bpf;
171*4882a593Smuzhiyun struct bpf_prog_info_linear *info_linear;
172*4882a593Smuzhiyun struct perf_tool *tool = session->tool;
173*4882a593Smuzhiyun struct bpf_prog_info_node *info_node;
174*4882a593Smuzhiyun struct bpf_prog_info *info;
175*4882a593Smuzhiyun struct btf *btf = NULL;
176*4882a593Smuzhiyun struct perf_env *env;
177*4882a593Smuzhiyun u32 sub_prog_cnt, i;
178*4882a593Smuzhiyun int err = 0;
179*4882a593Smuzhiyun u64 arrays;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * for perf-record and perf-report use header.env;
183*4882a593Smuzhiyun * otherwise, use global perf_env.
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun env = session->data ? &session->header.env : &perf_env;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
188*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
189*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
190*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
191*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
192*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
193*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun info_linear = bpf_program__get_prog_info_linear(fd, arrays);
196*4882a593Smuzhiyun if (IS_ERR_OR_NULL(info_linear)) {
197*4882a593Smuzhiyun info_linear = NULL;
198*4882a593Smuzhiyun pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
199*4882a593Smuzhiyun return -1;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
203*4882a593Smuzhiyun pr_debug("%s: the kernel is too old, aborting\n", __func__);
204*4882a593Smuzhiyun return -2;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun info = &info_linear->info;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* number of ksyms, func_lengths, and tags should match */
210*4882a593Smuzhiyun sub_prog_cnt = info->nr_jited_ksyms;
211*4882a593Smuzhiyun if (sub_prog_cnt != info->nr_prog_tags ||
212*4882a593Smuzhiyun sub_prog_cnt != info->nr_jited_func_lens)
213*4882a593Smuzhiyun return -1;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* check BTF func info support */
216*4882a593Smuzhiyun if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
217*4882a593Smuzhiyun /* btf func info number should be same as sub_prog_cnt */
218*4882a593Smuzhiyun if (sub_prog_cnt != info->nr_func_info) {
219*4882a593Smuzhiyun pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
220*4882a593Smuzhiyun err = -1;
221*4882a593Smuzhiyun goto out;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun if (btf__get_from_id(info->btf_id, &btf)) {
224*4882a593Smuzhiyun pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
225*4882a593Smuzhiyun err = -1;
226*4882a593Smuzhiyun btf = NULL;
227*4882a593Smuzhiyun goto out;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun perf_env__fetch_btf(env, info->btf_id, btf);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Synthesize PERF_RECORD_KSYMBOL */
233*4882a593Smuzhiyun for (i = 0; i < sub_prog_cnt; i++) {
234*4882a593Smuzhiyun __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
235*4882a593Smuzhiyun __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
236*4882a593Smuzhiyun int name_len;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun *ksymbol_event = (struct perf_record_ksymbol) {
239*4882a593Smuzhiyun .header = {
240*4882a593Smuzhiyun .type = PERF_RECORD_KSYMBOL,
241*4882a593Smuzhiyun .size = offsetof(struct perf_record_ksymbol, name),
242*4882a593Smuzhiyun },
243*4882a593Smuzhiyun .addr = prog_addrs[i],
244*4882a593Smuzhiyun .len = prog_lens[i],
245*4882a593Smuzhiyun .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
246*4882a593Smuzhiyun .flags = 0,
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun name_len = synthesize_bpf_prog_name(ksymbol_event->name,
250*4882a593Smuzhiyun KSYM_NAME_LEN, info, btf, i);
251*4882a593Smuzhiyun ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
252*4882a593Smuzhiyun sizeof(u64));
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun memset((void *)event + event->header.size, 0, machine->id_hdr_size);
255*4882a593Smuzhiyun event->header.size += machine->id_hdr_size;
256*4882a593Smuzhiyun err = perf_tool__process_synth_event(tool, event,
257*4882a593Smuzhiyun machine, process);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (!opts->no_bpf_event) {
261*4882a593Smuzhiyun /* Synthesize PERF_RECORD_BPF_EVENT */
262*4882a593Smuzhiyun *bpf_event = (struct perf_record_bpf_event) {
263*4882a593Smuzhiyun .header = {
264*4882a593Smuzhiyun .type = PERF_RECORD_BPF_EVENT,
265*4882a593Smuzhiyun .size = sizeof(struct perf_record_bpf_event),
266*4882a593Smuzhiyun },
267*4882a593Smuzhiyun .type = PERF_BPF_EVENT_PROG_LOAD,
268*4882a593Smuzhiyun .flags = 0,
269*4882a593Smuzhiyun .id = info->id,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
272*4882a593Smuzhiyun memset((void *)event + event->header.size, 0, machine->id_hdr_size);
273*4882a593Smuzhiyun event->header.size += machine->id_hdr_size;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* save bpf_prog_info to env */
276*4882a593Smuzhiyun info_node = malloc(sizeof(struct bpf_prog_info_node));
277*4882a593Smuzhiyun if (!info_node) {
278*4882a593Smuzhiyun err = -1;
279*4882a593Smuzhiyun goto out;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun info_node->info_linear = info_linear;
283*4882a593Smuzhiyun perf_env__insert_bpf_prog_info(env, info_node);
284*4882a593Smuzhiyun info_linear = NULL;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * process after saving bpf_prog_info to env, so that
288*4882a593Smuzhiyun * required information is ready for look up
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun err = perf_tool__process_synth_event(tool, event,
291*4882a593Smuzhiyun machine, process);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun out:
295*4882a593Smuzhiyun free(info_linear);
296*4882a593Smuzhiyun free(btf);
297*4882a593Smuzhiyun return err ? -1 : 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun struct kallsyms_parse {
301*4882a593Smuzhiyun union perf_event *event;
302*4882a593Smuzhiyun perf_event__handler_t process;
303*4882a593Smuzhiyun struct machine *machine;
304*4882a593Smuzhiyun struct perf_tool *tool;
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun static int
process_bpf_image(char * name,u64 addr,struct kallsyms_parse * data)308*4882a593Smuzhiyun process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct machine *machine = data->machine;
311*4882a593Smuzhiyun union perf_event *event = data->event;
312*4882a593Smuzhiyun struct perf_record_ksymbol *ksymbol;
313*4882a593Smuzhiyun int len;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun ksymbol = &event->ksymbol;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun *ksymbol = (struct perf_record_ksymbol) {
318*4882a593Smuzhiyun .header = {
319*4882a593Smuzhiyun .type = PERF_RECORD_KSYMBOL,
320*4882a593Smuzhiyun .size = offsetof(struct perf_record_ksymbol, name),
321*4882a593Smuzhiyun },
322*4882a593Smuzhiyun .addr = addr,
323*4882a593Smuzhiyun .len = page_size,
324*4882a593Smuzhiyun .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
325*4882a593Smuzhiyun .flags = 0,
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name);
329*4882a593Smuzhiyun ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64));
330*4882a593Smuzhiyun memset((void *) event + event->header.size, 0, machine->id_hdr_size);
331*4882a593Smuzhiyun event->header.size += machine->id_hdr_size;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return perf_tool__process_synth_event(data->tool, event, machine,
334*4882a593Smuzhiyun data->process);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun static int
kallsyms_process_symbol(void * data,const char * _name,char type __maybe_unused,u64 start)338*4882a593Smuzhiyun kallsyms_process_symbol(void *data, const char *_name,
339*4882a593Smuzhiyun char type __maybe_unused, u64 start)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun char disp[KSYM_NAME_LEN];
342*4882a593Smuzhiyun char *module, *name;
343*4882a593Smuzhiyun unsigned long id;
344*4882a593Smuzhiyun int err = 0;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun module = strchr(_name, '\t');
347*4882a593Smuzhiyun if (!module)
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* We are going after [bpf] module ... */
351*4882a593Smuzhiyun if (strcmp(module + 1, "[bpf]"))
352*4882a593Smuzhiyun return 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun name = memdup(_name, (module - _name) + 1);
355*4882a593Smuzhiyun if (!name)
356*4882a593Smuzhiyun return -ENOMEM;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun name[module - _name] = 0;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* .. and only for trampolines and dispatchers */
361*4882a593Smuzhiyun if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) ||
362*4882a593Smuzhiyun (sscanf(name, "bpf_dispatcher_%s", disp) == 1))
363*4882a593Smuzhiyun err = process_bpf_image(name, start, data);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun free(name);
366*4882a593Smuzhiyun return err;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
perf_event__synthesize_bpf_events(struct perf_session * session,perf_event__handler_t process,struct machine * machine,struct record_opts * opts)369*4882a593Smuzhiyun int perf_event__synthesize_bpf_events(struct perf_session *session,
370*4882a593Smuzhiyun perf_event__handler_t process,
371*4882a593Smuzhiyun struct machine *machine,
372*4882a593Smuzhiyun struct record_opts *opts)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun const char *kallsyms_filename = "/proc/kallsyms";
375*4882a593Smuzhiyun struct kallsyms_parse arg;
376*4882a593Smuzhiyun union perf_event *event;
377*4882a593Smuzhiyun __u32 id = 0;
378*4882a593Smuzhiyun int err;
379*4882a593Smuzhiyun int fd;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size);
382*4882a593Smuzhiyun if (!event)
383*4882a593Smuzhiyun return -1;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Synthesize all the bpf programs in system. */
386*4882a593Smuzhiyun while (true) {
387*4882a593Smuzhiyun err = bpf_prog_get_next_id(id, &id);
388*4882a593Smuzhiyun if (err) {
389*4882a593Smuzhiyun if (errno == ENOENT) {
390*4882a593Smuzhiyun err = 0;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun pr_debug("%s: can't get next program: %s%s\n",
394*4882a593Smuzhiyun __func__, strerror(errno),
395*4882a593Smuzhiyun errno == EINVAL ? " -- kernel too old?" : "");
396*4882a593Smuzhiyun /* don't report error on old kernel or EPERM */
397*4882a593Smuzhiyun err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun fd = bpf_prog_get_fd_by_id(id);
401*4882a593Smuzhiyun if (fd < 0) {
402*4882a593Smuzhiyun pr_debug("%s: failed to get fd for prog_id %u\n",
403*4882a593Smuzhiyun __func__, id);
404*4882a593Smuzhiyun continue;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun err = perf_event__synthesize_one_bpf_prog(session, process,
408*4882a593Smuzhiyun machine, fd,
409*4882a593Smuzhiyun event, opts);
410*4882a593Smuzhiyun close(fd);
411*4882a593Smuzhiyun if (err) {
412*4882a593Smuzhiyun /* do not return error for old kernel */
413*4882a593Smuzhiyun if (err == -2)
414*4882a593Smuzhiyun err = 0;
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Synthesize all the bpf images - trampolines/dispatchers. */
420*4882a593Smuzhiyun if (symbol_conf.kallsyms_name != NULL)
421*4882a593Smuzhiyun kallsyms_filename = symbol_conf.kallsyms_name;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun arg = (struct kallsyms_parse) {
424*4882a593Smuzhiyun .event = event,
425*4882a593Smuzhiyun .process = process,
426*4882a593Smuzhiyun .machine = machine,
427*4882a593Smuzhiyun .tool = session->tool,
428*4882a593Smuzhiyun };
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) {
431*4882a593Smuzhiyun pr_err("%s: failed to synthesize bpf images: %s\n",
432*4882a593Smuzhiyun __func__, strerror(errno));
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun free(event);
436*4882a593Smuzhiyun return err;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
perf_env__add_bpf_info(struct perf_env * env,u32 id)439*4882a593Smuzhiyun static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct bpf_prog_info_linear *info_linear;
442*4882a593Smuzhiyun struct bpf_prog_info_node *info_node;
443*4882a593Smuzhiyun struct btf *btf = NULL;
444*4882a593Smuzhiyun u64 arrays;
445*4882a593Smuzhiyun u32 btf_id;
446*4882a593Smuzhiyun int fd;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun fd = bpf_prog_get_fd_by_id(id);
449*4882a593Smuzhiyun if (fd < 0)
450*4882a593Smuzhiyun return;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
453*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
454*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
455*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
456*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
457*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
458*4882a593Smuzhiyun arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun info_linear = bpf_program__get_prog_info_linear(fd, arrays);
461*4882a593Smuzhiyun if (IS_ERR_OR_NULL(info_linear)) {
462*4882a593Smuzhiyun pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
463*4882a593Smuzhiyun goto out;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun btf_id = info_linear->info.btf_id;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun info_node = malloc(sizeof(struct bpf_prog_info_node));
469*4882a593Smuzhiyun if (info_node) {
470*4882a593Smuzhiyun info_node->info_linear = info_linear;
471*4882a593Smuzhiyun perf_env__insert_bpf_prog_info(env, info_node);
472*4882a593Smuzhiyun } else
473*4882a593Smuzhiyun free(info_linear);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (btf_id == 0)
476*4882a593Smuzhiyun goto out;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (btf__get_from_id(btf_id, &btf)) {
479*4882a593Smuzhiyun pr_debug("%s: failed to get BTF of id %u, aborting\n",
480*4882a593Smuzhiyun __func__, btf_id);
481*4882a593Smuzhiyun goto out;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun perf_env__fetch_btf(env, btf_id, btf);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun out:
486*4882a593Smuzhiyun free(btf);
487*4882a593Smuzhiyun close(fd);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
bpf_event__sb_cb(union perf_event * event,void * data)490*4882a593Smuzhiyun static int bpf_event__sb_cb(union perf_event *event, void *data)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun struct perf_env *env = data;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (event->header.type != PERF_RECORD_BPF_EVENT)
495*4882a593Smuzhiyun return -1;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun switch (event->bpf.type) {
498*4882a593Smuzhiyun case PERF_BPF_EVENT_PROG_LOAD:
499*4882a593Smuzhiyun perf_env__add_bpf_info(env, event->bpf.id);
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun case PERF_BPF_EVENT_PROG_UNLOAD:
502*4882a593Smuzhiyun /*
503*4882a593Smuzhiyun * Do not free bpf_prog_info and btf of the program here,
504*4882a593Smuzhiyun * as annotation still need them. They will be freed at
505*4882a593Smuzhiyun * the end of the session.
506*4882a593Smuzhiyun */
507*4882a593Smuzhiyun break;
508*4882a593Smuzhiyun default:
509*4882a593Smuzhiyun pr_debug("unexpected bpf event type of %d\n", event->bpf.type);
510*4882a593Smuzhiyun break;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return 0;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
evlist__add_bpf_sb_event(struct evlist * evlist,struct perf_env * env)516*4882a593Smuzhiyun int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun struct perf_event_attr attr = {
519*4882a593Smuzhiyun .type = PERF_TYPE_SOFTWARE,
520*4882a593Smuzhiyun .config = PERF_COUNT_SW_DUMMY,
521*4882a593Smuzhiyun .sample_id_all = 1,
522*4882a593Smuzhiyun .watermark = 1,
523*4882a593Smuzhiyun .bpf_event = 1,
524*4882a593Smuzhiyun .size = sizeof(attr), /* to capture ABI version */
525*4882a593Smuzhiyun };
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * Older gcc versions don't support designated initializers, like above,
529*4882a593Smuzhiyun * for unnamed union members, such as the following:
530*4882a593Smuzhiyun */
531*4882a593Smuzhiyun attr.wakeup_watermark = 1;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
bpf_event__print_bpf_prog_info(struct bpf_prog_info * info,struct perf_env * env,FILE * fp)536*4882a593Smuzhiyun void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info,
537*4882a593Smuzhiyun struct perf_env *env,
538*4882a593Smuzhiyun FILE *fp)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens);
541*4882a593Smuzhiyun __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
542*4882a593Smuzhiyun char name[KSYM_NAME_LEN];
543*4882a593Smuzhiyun struct btf *btf = NULL;
544*4882a593Smuzhiyun u32 sub_prog_cnt, i;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun sub_prog_cnt = info->nr_jited_ksyms;
547*4882a593Smuzhiyun if (sub_prog_cnt != info->nr_prog_tags ||
548*4882a593Smuzhiyun sub_prog_cnt != info->nr_jited_func_lens)
549*4882a593Smuzhiyun return;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun if (info->btf_id) {
552*4882a593Smuzhiyun struct btf_node *node;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun node = perf_env__find_btf(env, info->btf_id);
555*4882a593Smuzhiyun if (node)
556*4882a593Smuzhiyun btf = btf__new((__u8 *)(node->data),
557*4882a593Smuzhiyun node->data_size);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun if (sub_prog_cnt == 1) {
561*4882a593Smuzhiyun synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0);
562*4882a593Smuzhiyun fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n",
563*4882a593Smuzhiyun info->id, name, prog_addrs[0], prog_lens[0]);
564*4882a593Smuzhiyun goto out;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun fprintf(fp, "# bpf_prog_info %u:\n", info->id);
568*4882a593Smuzhiyun for (i = 0; i < sub_prog_cnt; i++) {
569*4882a593Smuzhiyun synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n",
572*4882a593Smuzhiyun i, name, prog_addrs[i], prog_lens[i]);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun out:
575*4882a593Smuzhiyun btf__free(btf);
576*4882a593Smuzhiyun }
577