xref: /OK3568_Linux_fs/kernel/tools/testing/selftests/bpf/progs/profiler.inc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2020 Facebook */
3*4882a593Smuzhiyun #include <vmlinux.h>
4*4882a593Smuzhiyun #include <bpf/bpf_core_read.h>
5*4882a593Smuzhiyun #include <bpf/bpf_helpers.h>
6*4882a593Smuzhiyun #include <bpf/bpf_tracing.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "profiler.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef NULL
11*4882a593Smuzhiyun #define NULL 0
12*4882a593Smuzhiyun #endif
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define O_WRONLY 00000001
15*4882a593Smuzhiyun #define O_RDWR 00000002
16*4882a593Smuzhiyun #define O_DIRECTORY 00200000
17*4882a593Smuzhiyun #define __O_TMPFILE 020000000
18*4882a593Smuzhiyun #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
19*4882a593Smuzhiyun #define MAX_ERRNO 4095
20*4882a593Smuzhiyun #define S_IFMT 00170000
21*4882a593Smuzhiyun #define S_IFSOCK 0140000
22*4882a593Smuzhiyun #define S_IFLNK 0120000
23*4882a593Smuzhiyun #define S_IFREG 0100000
24*4882a593Smuzhiyun #define S_IFBLK 0060000
25*4882a593Smuzhiyun #define S_IFDIR 0040000
26*4882a593Smuzhiyun #define S_IFCHR 0020000
27*4882a593Smuzhiyun #define S_IFIFO 0010000
28*4882a593Smuzhiyun #define S_ISUID 0004000
29*4882a593Smuzhiyun #define S_ISGID 0002000
30*4882a593Smuzhiyun #define S_ISVTX 0001000
31*4882a593Smuzhiyun #define S_ISLNK(m) (((m)&S_IFMT) == S_IFLNK)
32*4882a593Smuzhiyun #define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR)
33*4882a593Smuzhiyun #define S_ISCHR(m) (((m)&S_IFMT) == S_IFCHR)
34*4882a593Smuzhiyun #define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK)
35*4882a593Smuzhiyun #define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO)
36*4882a593Smuzhiyun #define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK)
37*4882a593Smuzhiyun #define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define KILL_DATA_ARRAY_SIZE 8
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct var_kill_data_arr_t {
42*4882a593Smuzhiyun 	struct var_kill_data_t array[KILL_DATA_ARRAY_SIZE];
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun union any_profiler_data_t {
46*4882a593Smuzhiyun 	struct var_exec_data_t var_exec;
47*4882a593Smuzhiyun 	struct var_kill_data_t var_kill;
48*4882a593Smuzhiyun 	struct var_sysctl_data_t var_sysctl;
49*4882a593Smuzhiyun 	struct var_filemod_data_t var_filemod;
50*4882a593Smuzhiyun 	struct var_fork_data_t var_fork;
51*4882a593Smuzhiyun 	struct var_kill_data_arr_t var_kill_data_arr;
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun volatile struct profiler_config_struct bpf_config = {};
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define FETCH_CGROUPS_FROM_BPF (bpf_config.fetch_cgroups_from_bpf)
57*4882a593Smuzhiyun #define CGROUP_FS_INODE (bpf_config.cgroup_fs_inode)
58*4882a593Smuzhiyun #define CGROUP_LOGIN_SESSION_INODE \
59*4882a593Smuzhiyun 	(bpf_config.cgroup_login_session_inode)
60*4882a593Smuzhiyun #define KILL_SIGNALS (bpf_config.kill_signals_mask)
61*4882a593Smuzhiyun #define STALE_INFO (bpf_config.stale_info_secs)
62*4882a593Smuzhiyun #define INODE_FILTER (bpf_config.inode_filter)
63*4882a593Smuzhiyun #define READ_ENVIRON_FROM_EXEC (bpf_config.read_environ_from_exec)
64*4882a593Smuzhiyun #define ENABLE_CGROUP_V1_RESOLVER (bpf_config.enable_cgroup_v1_resolver)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct kernfs_iattrs___52 {
67*4882a593Smuzhiyun 	struct iattr ia_iattr;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct kernfs_node___52 {
71*4882a593Smuzhiyun 	union /* kernfs_node_id */ {
72*4882a593Smuzhiyun 		struct {
73*4882a593Smuzhiyun 			u32 ino;
74*4882a593Smuzhiyun 			u32 generation;
75*4882a593Smuzhiyun 		};
76*4882a593Smuzhiyun 		u64 id;
77*4882a593Smuzhiyun 	} id;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun struct {
81*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
82*4882a593Smuzhiyun 	__uint(max_entries, 1);
83*4882a593Smuzhiyun 	__type(key, u32);
84*4882a593Smuzhiyun 	__type(value, union any_profiler_data_t);
85*4882a593Smuzhiyun } data_heap SEC(".maps");
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct {
88*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
89*4882a593Smuzhiyun 	__uint(key_size, sizeof(int));
90*4882a593Smuzhiyun 	__uint(value_size, sizeof(int));
91*4882a593Smuzhiyun } events SEC(".maps");
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct {
94*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_HASH);
95*4882a593Smuzhiyun 	__uint(max_entries, KILL_DATA_ARRAY_SIZE);
96*4882a593Smuzhiyun 	__type(key, u32);
97*4882a593Smuzhiyun 	__type(value, struct var_kill_data_arr_t);
98*4882a593Smuzhiyun } var_tpid_to_data SEC(".maps");
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun struct {
101*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
102*4882a593Smuzhiyun 	__uint(max_entries, profiler_bpf_max_function_id);
103*4882a593Smuzhiyun 	__type(key, u32);
104*4882a593Smuzhiyun 	__type(value, struct bpf_func_stats_data);
105*4882a593Smuzhiyun } bpf_func_stats SEC(".maps");
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun struct {
108*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_HASH);
109*4882a593Smuzhiyun 	__type(key, u32);
110*4882a593Smuzhiyun 	__type(value, bool);
111*4882a593Smuzhiyun 	__uint(max_entries, 16);
112*4882a593Smuzhiyun } allowed_devices SEC(".maps");
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun struct {
115*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_HASH);
116*4882a593Smuzhiyun 	__type(key, u64);
117*4882a593Smuzhiyun 	__type(value, bool);
118*4882a593Smuzhiyun 	__uint(max_entries, 1024);
119*4882a593Smuzhiyun } allowed_file_inodes SEC(".maps");
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun struct {
122*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_HASH);
123*4882a593Smuzhiyun 	__type(key, u64);
124*4882a593Smuzhiyun 	__type(value, bool);
125*4882a593Smuzhiyun 	__uint(max_entries, 1024);
126*4882a593Smuzhiyun } allowed_directory_inodes SEC(".maps");
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun struct {
129*4882a593Smuzhiyun 	__uint(type, BPF_MAP_TYPE_HASH);
130*4882a593Smuzhiyun 	__type(key, u32);
131*4882a593Smuzhiyun 	__type(value, bool);
132*4882a593Smuzhiyun 	__uint(max_entries, 16);
133*4882a593Smuzhiyun } disallowed_exec_inodes SEC(".maps");
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun #ifndef ARRAY_SIZE
136*4882a593Smuzhiyun #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun 
IS_ERR(const void * ptr)139*4882a593Smuzhiyun static INLINE bool IS_ERR(const void* ptr)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	return IS_ERR_VALUE((unsigned long)ptr);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
get_userspace_pid()144*4882a593Smuzhiyun static INLINE u32 get_userspace_pid()
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	return bpf_get_current_pid_tgid() >> 32;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
is_init_process(u32 tgid)149*4882a593Smuzhiyun static INLINE bool is_init_process(u32 tgid)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	return tgid == 1 || tgid == 0;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun static INLINE unsigned long
probe_read_lim(void * dst,void * src,unsigned long len,unsigned long max)155*4882a593Smuzhiyun probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	len = len < max ? len : max;
158*4882a593Smuzhiyun 	if (len > 1) {
159*4882a593Smuzhiyun 		if (bpf_probe_read(dst, len, src))
160*4882a593Smuzhiyun 			return 0;
161*4882a593Smuzhiyun 	} else if (len == 1) {
162*4882a593Smuzhiyun 		if (bpf_probe_read(dst, 1, src))
163*4882a593Smuzhiyun 			return 0;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	return len;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
get_var_spid_index(struct var_kill_data_arr_t * arr_struct,int spid)168*4882a593Smuzhiyun static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
169*4882a593Smuzhiyun 				     int spid)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun #ifdef UNROLL
172*4882a593Smuzhiyun #pragma unroll
173*4882a593Smuzhiyun #endif
174*4882a593Smuzhiyun 	for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
175*4882a593Smuzhiyun 		if (arr_struct->array[i].meta.pid == spid)
176*4882a593Smuzhiyun 			return i;
177*4882a593Smuzhiyun 	return -1;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
populate_ancestors(struct task_struct * task,struct ancestors_data_t * ancestors_data)180*4882a593Smuzhiyun static INLINE void populate_ancestors(struct task_struct* task,
181*4882a593Smuzhiyun 				      struct ancestors_data_t* ancestors_data)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct task_struct* parent = task;
184*4882a593Smuzhiyun 	u32 num_ancestors, ppid;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	ancestors_data->num_ancestors = 0;
187*4882a593Smuzhiyun #ifdef UNROLL
188*4882a593Smuzhiyun #pragma unroll
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun 	for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
191*4882a593Smuzhiyun 		parent = BPF_CORE_READ(parent, real_parent);
192*4882a593Smuzhiyun 		if (parent == NULL)
193*4882a593Smuzhiyun 			break;
194*4882a593Smuzhiyun 		ppid = BPF_CORE_READ(parent, tgid);
195*4882a593Smuzhiyun 		if (is_init_process(ppid))
196*4882a593Smuzhiyun 			break;
197*4882a593Smuzhiyun 		ancestors_data->ancestor_pids[num_ancestors] = ppid;
198*4882a593Smuzhiyun 		ancestors_data->ancestor_exec_ids[num_ancestors] =
199*4882a593Smuzhiyun 			BPF_CORE_READ(parent, self_exec_id);
200*4882a593Smuzhiyun 		ancestors_data->ancestor_start_times[num_ancestors] =
201*4882a593Smuzhiyun 			BPF_CORE_READ(parent, start_time);
202*4882a593Smuzhiyun 		ancestors_data->num_ancestors = num_ancestors;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
read_full_cgroup_path(struct kernfs_node * cgroup_node,struct kernfs_node * cgroup_root_node,void * payload,int * root_pos)206*4882a593Smuzhiyun static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
207*4882a593Smuzhiyun 					  struct kernfs_node* cgroup_root_node,
208*4882a593Smuzhiyun 					  void* payload,
209*4882a593Smuzhiyun 					  int* root_pos)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	void* payload_start = payload;
212*4882a593Smuzhiyun 	size_t filepart_length;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun #ifdef UNROLL
215*4882a593Smuzhiyun #pragma unroll
216*4882a593Smuzhiyun #endif
217*4882a593Smuzhiyun 	for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
218*4882a593Smuzhiyun 		filepart_length =
219*4882a593Smuzhiyun 			bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
220*4882a593Smuzhiyun 		if (!cgroup_node)
221*4882a593Smuzhiyun 			return payload;
222*4882a593Smuzhiyun 		if (cgroup_node == cgroup_root_node)
223*4882a593Smuzhiyun 			*root_pos = payload - payload_start;
224*4882a593Smuzhiyun 		if (filepart_length <= MAX_PATH) {
225*4882a593Smuzhiyun 			barrier_var(filepart_length);
226*4882a593Smuzhiyun 			payload += filepart_length;
227*4882a593Smuzhiyun 		}
228*4882a593Smuzhiyun 		cgroup_node = BPF_CORE_READ(cgroup_node, parent);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 	return payload;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
get_inode_from_kernfs(struct kernfs_node * node)233*4882a593Smuzhiyun static ino_t get_inode_from_kernfs(struct kernfs_node* node)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	struct kernfs_node___52* node52 = (void*)node;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (bpf_core_field_exists(node52->id.ino)) {
238*4882a593Smuzhiyun 		barrier_var(node52);
239*4882a593Smuzhiyun 		return BPF_CORE_READ(node52, id.ino);
240*4882a593Smuzhiyun 	} else {
241*4882a593Smuzhiyun 		barrier_var(node);
242*4882a593Smuzhiyun 		return (u64)BPF_CORE_READ(node, id);
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun extern bool CONFIG_CGROUP_PIDS __kconfig __weak;
247*4882a593Smuzhiyun enum cgroup_subsys_id___local {
248*4882a593Smuzhiyun 	pids_cgrp_id___local = 123, /* value doesn't matter */
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
populate_cgroup_info(struct cgroup_data_t * cgroup_data,struct task_struct * task,void * payload)251*4882a593Smuzhiyun static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
252*4882a593Smuzhiyun 					 struct task_struct* task,
253*4882a593Smuzhiyun 					 void* payload)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct kernfs_node* root_kernfs =
256*4882a593Smuzhiyun 		BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
257*4882a593Smuzhiyun 	struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #if __has_builtin(__builtin_preserve_enum_value)
260*4882a593Smuzhiyun 	if (ENABLE_CGROUP_V1_RESOLVER && CONFIG_CGROUP_PIDS) {
261*4882a593Smuzhiyun 		int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
262*4882a593Smuzhiyun 						  pids_cgrp_id___local);
263*4882a593Smuzhiyun #ifdef UNROLL
264*4882a593Smuzhiyun #pragma unroll
265*4882a593Smuzhiyun #endif
266*4882a593Smuzhiyun 		for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
267*4882a593Smuzhiyun 			struct cgroup_subsys_state* subsys =
268*4882a593Smuzhiyun 				BPF_CORE_READ(task, cgroups, subsys[i]);
269*4882a593Smuzhiyun 			if (subsys != NULL) {
270*4882a593Smuzhiyun 				int subsys_id = BPF_CORE_READ(subsys, ss, id);
271*4882a593Smuzhiyun 				if (subsys_id == cgrp_id) {
272*4882a593Smuzhiyun 					proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn);
273*4882a593Smuzhiyun 					root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn);
274*4882a593Smuzhiyun 					break;
275*4882a593Smuzhiyun 				}
276*4882a593Smuzhiyun 			}
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun #endif
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	cgroup_data->cgroup_root_inode = get_inode_from_kernfs(root_kernfs);
282*4882a593Smuzhiyun 	cgroup_data->cgroup_proc_inode = get_inode_from_kernfs(proc_kernfs);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (bpf_core_field_exists(root_kernfs->iattr->ia_mtime)) {
285*4882a593Smuzhiyun 		cgroup_data->cgroup_root_mtime =
286*4882a593Smuzhiyun 			BPF_CORE_READ(root_kernfs, iattr, ia_mtime.tv_nsec);
287*4882a593Smuzhiyun 		cgroup_data->cgroup_proc_mtime =
288*4882a593Smuzhiyun 			BPF_CORE_READ(proc_kernfs, iattr, ia_mtime.tv_nsec);
289*4882a593Smuzhiyun 	} else {
290*4882a593Smuzhiyun 		struct kernfs_iattrs___52* root_iattr =
291*4882a593Smuzhiyun 			(struct kernfs_iattrs___52*)BPF_CORE_READ(root_kernfs, iattr);
292*4882a593Smuzhiyun 		cgroup_data->cgroup_root_mtime =
293*4882a593Smuzhiyun 			BPF_CORE_READ(root_iattr, ia_iattr.ia_mtime.tv_nsec);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		struct kernfs_iattrs___52* proc_iattr =
296*4882a593Smuzhiyun 			(struct kernfs_iattrs___52*)BPF_CORE_READ(proc_kernfs, iattr);
297*4882a593Smuzhiyun 		cgroup_data->cgroup_proc_mtime =
298*4882a593Smuzhiyun 			BPF_CORE_READ(proc_iattr, ia_iattr.ia_mtime.tv_nsec);
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	cgroup_data->cgroup_root_length = 0;
302*4882a593Smuzhiyun 	cgroup_data->cgroup_proc_length = 0;
303*4882a593Smuzhiyun 	cgroup_data->cgroup_full_length = 0;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	size_t cgroup_root_length =
306*4882a593Smuzhiyun 		bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
307*4882a593Smuzhiyun 	barrier_var(cgroup_root_length);
308*4882a593Smuzhiyun 	if (cgroup_root_length <= MAX_PATH) {
309*4882a593Smuzhiyun 		barrier_var(cgroup_root_length);
310*4882a593Smuzhiyun 		cgroup_data->cgroup_root_length = cgroup_root_length;
311*4882a593Smuzhiyun 		payload += cgroup_root_length;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	size_t cgroup_proc_length =
315*4882a593Smuzhiyun 		bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
316*4882a593Smuzhiyun 	barrier_var(cgroup_proc_length);
317*4882a593Smuzhiyun 	if (cgroup_proc_length <= MAX_PATH) {
318*4882a593Smuzhiyun 		barrier_var(cgroup_proc_length);
319*4882a593Smuzhiyun 		cgroup_data->cgroup_proc_length = cgroup_proc_length;
320*4882a593Smuzhiyun 		payload += cgroup_proc_length;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (FETCH_CGROUPS_FROM_BPF) {
324*4882a593Smuzhiyun 		cgroup_data->cgroup_full_path_root_pos = -1;
325*4882a593Smuzhiyun 		void* payload_end_pos = read_full_cgroup_path(proc_kernfs, root_kernfs, payload,
326*4882a593Smuzhiyun 							      &cgroup_data->cgroup_full_path_root_pos);
327*4882a593Smuzhiyun 		cgroup_data->cgroup_full_length = payload_end_pos - payload;
328*4882a593Smuzhiyun 		payload = payload_end_pos;
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	return (void*)payload;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
populate_var_metadata(struct var_metadata_t * metadata,struct task_struct * task,u32 pid,void * payload)334*4882a593Smuzhiyun static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
335*4882a593Smuzhiyun 					  struct task_struct* task,
336*4882a593Smuzhiyun 					  u32 pid, void* payload)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	u64 uid_gid = bpf_get_current_uid_gid();
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	metadata->uid = (u32)uid_gid;
341*4882a593Smuzhiyun 	metadata->gid = uid_gid >> 32;
342*4882a593Smuzhiyun 	metadata->pid = pid;
343*4882a593Smuzhiyun 	metadata->exec_id = BPF_CORE_READ(task, self_exec_id);
344*4882a593Smuzhiyun 	metadata->start_time = BPF_CORE_READ(task, start_time);
345*4882a593Smuzhiyun 	metadata->comm_length = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
348*4882a593Smuzhiyun 	barrier_var(comm_length);
349*4882a593Smuzhiyun 	if (comm_length <= TASK_COMM_LEN) {
350*4882a593Smuzhiyun 		barrier_var(comm_length);
351*4882a593Smuzhiyun 		metadata->comm_length = comm_length;
352*4882a593Smuzhiyun 		payload += comm_length;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return (void*)payload;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun static INLINE struct var_kill_data_t*
get_var_kill_data(struct pt_regs * ctx,int spid,int tpid,int sig)359*4882a593Smuzhiyun get_var_kill_data(struct pt_regs* ctx, int spid, int tpid, int sig)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	int zero = 0;
362*4882a593Smuzhiyun 	struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (kill_data == NULL)
365*4882a593Smuzhiyun 		return NULL;
366*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&kill_data->meta, task, spid, kill_data->payload);
369*4882a593Smuzhiyun 	payload = populate_cgroup_info(&kill_data->cgroup_data, task, payload);
370*4882a593Smuzhiyun 	size_t payload_length = payload - (void*)kill_data->payload;
371*4882a593Smuzhiyun 	kill_data->payload_length = payload_length;
372*4882a593Smuzhiyun 	populate_ancestors(task, &kill_data->ancestors_info);
373*4882a593Smuzhiyun 	kill_data->meta.type = KILL_EVENT;
374*4882a593Smuzhiyun 	kill_data->kill_target_pid = tpid;
375*4882a593Smuzhiyun 	kill_data->kill_sig = sig;
376*4882a593Smuzhiyun 	kill_data->kill_count = 1;
377*4882a593Smuzhiyun 	kill_data->last_kill_time = bpf_ktime_get_ns();
378*4882a593Smuzhiyun 	return kill_data;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
trace_var_sys_kill(void * ctx,int tpid,int sig)381*4882a593Smuzhiyun static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	if ((KILL_SIGNALS & (1ULL << sig)) == 0)
384*4882a593Smuzhiyun 		return 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	u32 spid = get_userspace_pid();
387*4882a593Smuzhiyun 	struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (arr_struct == NULL) {
390*4882a593Smuzhiyun 		struct var_kill_data_t* kill_data = get_var_kill_data(ctx, spid, tpid, sig);
391*4882a593Smuzhiyun 		int zero = 0;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		if (kill_data == NULL)
394*4882a593Smuzhiyun 			return 0;
395*4882a593Smuzhiyun 		arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
396*4882a593Smuzhiyun 		if (arr_struct == NULL)
397*4882a593Smuzhiyun 			return 0;
398*4882a593Smuzhiyun 		bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
399*4882a593Smuzhiyun 	} else {
400*4882a593Smuzhiyun 		int index = get_var_spid_index(arr_struct, spid);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		if (index == -1) {
403*4882a593Smuzhiyun 			struct var_kill_data_t* kill_data =
404*4882a593Smuzhiyun 				get_var_kill_data(ctx, spid, tpid, sig);
405*4882a593Smuzhiyun 			if (kill_data == NULL)
406*4882a593Smuzhiyun 				return 0;
407*4882a593Smuzhiyun #ifdef UNROLL
408*4882a593Smuzhiyun #pragma unroll
409*4882a593Smuzhiyun #endif
410*4882a593Smuzhiyun 			for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
411*4882a593Smuzhiyun 				if (arr_struct->array[i].meta.pid == 0) {
412*4882a593Smuzhiyun 					bpf_probe_read(&arr_struct->array[i],
413*4882a593Smuzhiyun 						       sizeof(arr_struct->array[i]), kill_data);
414*4882a593Smuzhiyun 					bpf_map_update_elem(&var_tpid_to_data, &tpid,
415*4882a593Smuzhiyun 							    arr_struct, 0);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 					return 0;
418*4882a593Smuzhiyun 				}
419*4882a593Smuzhiyun 			return 0;
420*4882a593Smuzhiyun 		}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		struct var_kill_data_t* kill_data = &arr_struct->array[index];
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		u64 delta_sec =
425*4882a593Smuzhiyun 			(bpf_ktime_get_ns() - kill_data->last_kill_time) / 1000000000;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		if (delta_sec < STALE_INFO) {
428*4882a593Smuzhiyun 			kill_data->kill_count++;
429*4882a593Smuzhiyun 			kill_data->last_kill_time = bpf_ktime_get_ns();
430*4882a593Smuzhiyun 			bpf_probe_read(&arr_struct->array[index],
431*4882a593Smuzhiyun 				       sizeof(arr_struct->array[index]),
432*4882a593Smuzhiyun 				       kill_data);
433*4882a593Smuzhiyun 		} else {
434*4882a593Smuzhiyun 			struct var_kill_data_t* kill_data =
435*4882a593Smuzhiyun 				get_var_kill_data(ctx, spid, tpid, sig);
436*4882a593Smuzhiyun 			if (kill_data == NULL)
437*4882a593Smuzhiyun 				return 0;
438*4882a593Smuzhiyun 			bpf_probe_read(&arr_struct->array[index],
439*4882a593Smuzhiyun 				       sizeof(arr_struct->array[index]),
440*4882a593Smuzhiyun 				       kill_data);
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
444*4882a593Smuzhiyun 	return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
bpf_stats_enter(struct bpf_func_stats_ctx * bpf_stat_ctx,enum bpf_function_id func_id)447*4882a593Smuzhiyun static INLINE void bpf_stats_enter(struct bpf_func_stats_ctx* bpf_stat_ctx,
448*4882a593Smuzhiyun 				   enum bpf_function_id func_id)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	int func_id_key = func_id;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	bpf_stat_ctx->start_time_ns = bpf_ktime_get_ns();
453*4882a593Smuzhiyun 	bpf_stat_ctx->bpf_func_stats_data_val =
454*4882a593Smuzhiyun 		bpf_map_lookup_elem(&bpf_func_stats, &func_id_key);
455*4882a593Smuzhiyun 	if (bpf_stat_ctx->bpf_func_stats_data_val)
456*4882a593Smuzhiyun 		bpf_stat_ctx->bpf_func_stats_data_val->num_executions++;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
bpf_stats_exit(struct bpf_func_stats_ctx * bpf_stat_ctx)459*4882a593Smuzhiyun static INLINE void bpf_stats_exit(struct bpf_func_stats_ctx* bpf_stat_ctx)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	if (bpf_stat_ctx->bpf_func_stats_data_val)
462*4882a593Smuzhiyun 		bpf_stat_ctx->bpf_func_stats_data_val->time_elapsed_ns +=
463*4882a593Smuzhiyun 			bpf_ktime_get_ns() - bpf_stat_ctx->start_time_ns;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun static INLINE void
bpf_stats_pre_submit_var_perf_event(struct bpf_func_stats_ctx * bpf_stat_ctx,struct var_metadata_t * meta)467*4882a593Smuzhiyun bpf_stats_pre_submit_var_perf_event(struct bpf_func_stats_ctx* bpf_stat_ctx,
468*4882a593Smuzhiyun 				    struct var_metadata_t* meta)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	if (bpf_stat_ctx->bpf_func_stats_data_val) {
471*4882a593Smuzhiyun 		bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events++;
472*4882a593Smuzhiyun 		meta->bpf_stats_num_perf_events =
473*4882a593Smuzhiyun 			bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 	meta->bpf_stats_start_ktime_ns = bpf_stat_ctx->start_time_ns;
476*4882a593Smuzhiyun 	meta->cpu_id = bpf_get_smp_processor_id();
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun static INLINE size_t
read_absolute_file_path_from_dentry(struct dentry * filp_dentry,void * payload)480*4882a593Smuzhiyun read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	size_t length = 0;
483*4882a593Smuzhiyun 	size_t filepart_length;
484*4882a593Smuzhiyun 	struct dentry* parent_dentry;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun #ifdef UNROLL
487*4882a593Smuzhiyun #pragma unroll
488*4882a593Smuzhiyun #endif
489*4882a593Smuzhiyun 	for (int i = 0; i < MAX_PATH_DEPTH; i++) {
490*4882a593Smuzhiyun 		filepart_length = bpf_probe_read_str(payload, MAX_PATH,
491*4882a593Smuzhiyun 						     BPF_CORE_READ(filp_dentry, d_name.name));
492*4882a593Smuzhiyun 		barrier_var(filepart_length);
493*4882a593Smuzhiyun 		if (filepart_length > MAX_PATH)
494*4882a593Smuzhiyun 			break;
495*4882a593Smuzhiyun 		barrier_var(filepart_length);
496*4882a593Smuzhiyun 		payload += filepart_length;
497*4882a593Smuzhiyun 		length += filepart_length;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
500*4882a593Smuzhiyun 		if (filp_dentry == parent_dentry)
501*4882a593Smuzhiyun 			break;
502*4882a593Smuzhiyun 		filp_dentry = parent_dentry;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return length;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun static INLINE bool
is_ancestor_in_allowed_inodes(struct dentry * filp_dentry)509*4882a593Smuzhiyun is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	struct dentry* parent_dentry;
512*4882a593Smuzhiyun #ifdef UNROLL
513*4882a593Smuzhiyun #pragma unroll
514*4882a593Smuzhiyun #endif
515*4882a593Smuzhiyun 	for (int i = 0; i < MAX_PATH_DEPTH; i++) {
516*4882a593Smuzhiyun 		u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
517*4882a593Smuzhiyun 		bool* allowed_dir = bpf_map_lookup_elem(&allowed_directory_inodes, &dir_ino);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		if (allowed_dir != NULL)
520*4882a593Smuzhiyun 			return true;
521*4882a593Smuzhiyun 		parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
522*4882a593Smuzhiyun 		if (filp_dentry == parent_dentry)
523*4882a593Smuzhiyun 			break;
524*4882a593Smuzhiyun 		filp_dentry = parent_dentry;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 	return false;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
is_dentry_allowed_for_filemod(struct dentry * file_dentry,u32 * device_id,u64 * file_ino)529*4882a593Smuzhiyun static INLINE bool is_dentry_allowed_for_filemod(struct dentry* file_dentry,
530*4882a593Smuzhiyun 						 u32* device_id,
531*4882a593Smuzhiyun 						 u64* file_ino)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	u32 dev_id = BPF_CORE_READ(file_dentry, d_sb, s_dev);
534*4882a593Smuzhiyun 	*device_id = dev_id;
535*4882a593Smuzhiyun 	bool* allowed_device = bpf_map_lookup_elem(&allowed_devices, &dev_id);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (allowed_device == NULL)
538*4882a593Smuzhiyun 		return false;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	u64 ino = BPF_CORE_READ(file_dentry, d_inode, i_ino);
541*4882a593Smuzhiyun 	*file_ino = ino;
542*4882a593Smuzhiyun 	bool* allowed_file = bpf_map_lookup_elem(&allowed_file_inodes, &ino);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (allowed_file == NULL)
545*4882a593Smuzhiyun 		if (!is_ancestor_in_allowed_inodes(BPF_CORE_READ(file_dentry, d_parent)))
546*4882a593Smuzhiyun 			return false;
547*4882a593Smuzhiyun 	return true;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun SEC("kprobe/proc_sys_write")
BPF_KPROBE(kprobe__proc_sys_write,struct file * filp,const char * buf,size_t count,loff_t * ppos)551*4882a593Smuzhiyun ssize_t BPF_KPROBE(kprobe__proc_sys_write,
552*4882a593Smuzhiyun 		   struct file* filp, const char* buf,
553*4882a593Smuzhiyun 		   size_t count, loff_t* ppos)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
556*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_proc_sys_write);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	u32 pid = get_userspace_pid();
559*4882a593Smuzhiyun 	int zero = 0;
560*4882a593Smuzhiyun 	struct var_sysctl_data_t* sysctl_data =
561*4882a593Smuzhiyun 		bpf_map_lookup_elem(&data_heap, &zero);
562*4882a593Smuzhiyun 	if (!sysctl_data)
563*4882a593Smuzhiyun 		goto out;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
566*4882a593Smuzhiyun 	sysctl_data->meta.type = SYSCTL_EVENT;
567*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&sysctl_data->meta, task, pid, sysctl_data->payload);
568*4882a593Smuzhiyun 	payload = populate_cgroup_info(&sysctl_data->cgroup_data, task, payload);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	populate_ancestors(task, &sysctl_data->ancestors_info);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	sysctl_data->sysctl_val_length = 0;
573*4882a593Smuzhiyun 	sysctl_data->sysctl_path_length = 0;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
576*4882a593Smuzhiyun 	barrier_var(sysctl_val_length);
577*4882a593Smuzhiyun 	if (sysctl_val_length <= CTL_MAXNAME) {
578*4882a593Smuzhiyun 		barrier_var(sysctl_val_length);
579*4882a593Smuzhiyun 		sysctl_data->sysctl_val_length = sysctl_val_length;
580*4882a593Smuzhiyun 		payload += sysctl_val_length;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
584*4882a593Smuzhiyun 						       BPF_CORE_READ(filp, f_path.dentry, d_name.name));
585*4882a593Smuzhiyun 	barrier_var(sysctl_path_length);
586*4882a593Smuzhiyun 	if (sysctl_path_length <= MAX_PATH) {
587*4882a593Smuzhiyun 		barrier_var(sysctl_path_length);
588*4882a593Smuzhiyun 		sysctl_data->sysctl_path_length = sysctl_path_length;
589*4882a593Smuzhiyun 		payload += sysctl_path_length;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &sysctl_data->meta);
593*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)sysctl_data;
594*4882a593Smuzhiyun 	data_len = data_len > sizeof(struct var_sysctl_data_t)
595*4882a593Smuzhiyun 		? sizeof(struct var_sysctl_data_t)
596*4882a593Smuzhiyun 		: data_len;
597*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, sysctl_data, data_len);
598*4882a593Smuzhiyun out:
599*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
600*4882a593Smuzhiyun 	return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun SEC("tracepoint/syscalls/sys_enter_kill")
tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter * ctx)604*4882a593Smuzhiyun int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_sys_enter_kill);
609*4882a593Smuzhiyun 	int pid = ctx->args[0];
610*4882a593Smuzhiyun 	int sig = ctx->args[1];
611*4882a593Smuzhiyun 	int ret = trace_var_sys_kill(ctx, pid, sig);
612*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
613*4882a593Smuzhiyun 	return ret;
614*4882a593Smuzhiyun };
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun SEC("raw_tracepoint/sched_process_exit")
raw_tracepoint__sched_process_exit(void * ctx)617*4882a593Smuzhiyun int raw_tracepoint__sched_process_exit(void* ctx)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	int zero = 0;
620*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
621*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exit);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	u32 tpid = get_userspace_pid();
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
626*4882a593Smuzhiyun 	struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (arr_struct == NULL || kill_data == NULL)
629*4882a593Smuzhiyun 		goto out;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
632*4882a593Smuzhiyun 	struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun #ifdef UNROLL
635*4882a593Smuzhiyun #pragma unroll
636*4882a593Smuzhiyun #endif
637*4882a593Smuzhiyun 	for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
638*4882a593Smuzhiyun 		struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
641*4882a593Smuzhiyun 			bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
642*4882a593Smuzhiyun 			void* payload = kill_data->payload;
643*4882a593Smuzhiyun 			size_t offset = kill_data->payload_length;
644*4882a593Smuzhiyun 			if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
645*4882a593Smuzhiyun 				return 0;
646*4882a593Smuzhiyun 			payload += offset;
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 			kill_data->kill_target_name_length = 0;
649*4882a593Smuzhiyun 			kill_data->kill_target_cgroup_proc_length = 0;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 			size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
652*4882a593Smuzhiyun 			barrier_var(comm_length);
653*4882a593Smuzhiyun 			if (comm_length <= TASK_COMM_LEN) {
654*4882a593Smuzhiyun 				barrier_var(comm_length);
655*4882a593Smuzhiyun 				kill_data->kill_target_name_length = comm_length;
656*4882a593Smuzhiyun 				payload += comm_length;
657*4882a593Smuzhiyun 			}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 			size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
660*4882a593Smuzhiyun 								       BPF_CORE_READ(proc_kernfs, name));
661*4882a593Smuzhiyun 			barrier_var(cgroup_proc_length);
662*4882a593Smuzhiyun 			if (cgroup_proc_length <= KILL_TARGET_LEN) {
663*4882a593Smuzhiyun 				barrier_var(cgroup_proc_length);
664*4882a593Smuzhiyun 				kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
665*4882a593Smuzhiyun 				payload += cgroup_proc_length;
666*4882a593Smuzhiyun 			}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 			bpf_stats_pre_submit_var_perf_event(&stats_ctx, &kill_data->meta);
669*4882a593Smuzhiyun 			unsigned long data_len = (void*)payload - (void*)kill_data;
670*4882a593Smuzhiyun 			data_len = data_len > sizeof(struct var_kill_data_t)
671*4882a593Smuzhiyun 				? sizeof(struct var_kill_data_t)
672*4882a593Smuzhiyun 				: data_len;
673*4882a593Smuzhiyun 			bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kill_data, data_len);
674*4882a593Smuzhiyun 		}
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 	bpf_map_delete_elem(&var_tpid_to_data, &tpid);
677*4882a593Smuzhiyun out:
678*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun SEC("raw_tracepoint/sched_process_exec")
raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args * ctx)683*4882a593Smuzhiyun int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
686*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exec);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	struct linux_binprm* bprm = (struct linux_binprm*)ctx->args[2];
689*4882a593Smuzhiyun 	u64 inode = BPF_CORE_READ(bprm, file, f_inode, i_ino);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	bool* should_filter_binprm = bpf_map_lookup_elem(&disallowed_exec_inodes, &inode);
692*4882a593Smuzhiyun 	if (should_filter_binprm != NULL)
693*4882a593Smuzhiyun 		goto out;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	int zero = 0;
696*4882a593Smuzhiyun 	struct var_exec_data_t* proc_exec_data = bpf_map_lookup_elem(&data_heap, &zero);
697*4882a593Smuzhiyun 	if (!proc_exec_data)
698*4882a593Smuzhiyun 		goto out;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (INODE_FILTER && inode != INODE_FILTER)
701*4882a593Smuzhiyun 		return 0;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	u32 pid = get_userspace_pid();
704*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	proc_exec_data->meta.type = EXEC_EVENT;
707*4882a593Smuzhiyun 	proc_exec_data->bin_path_length = 0;
708*4882a593Smuzhiyun 	proc_exec_data->cmdline_length = 0;
709*4882a593Smuzhiyun 	proc_exec_data->environment_length = 0;
710*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&proc_exec_data->meta, task, pid,
711*4882a593Smuzhiyun 					      proc_exec_data->payload);
712*4882a593Smuzhiyun 	payload = populate_cgroup_info(&proc_exec_data->cgroup_data, task, payload);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	struct task_struct* parent_task = BPF_CORE_READ(task, real_parent);
715*4882a593Smuzhiyun 	proc_exec_data->parent_pid = BPF_CORE_READ(parent_task, tgid);
716*4882a593Smuzhiyun 	proc_exec_data->parent_uid = BPF_CORE_READ(parent_task, real_cred, uid.val);
717*4882a593Smuzhiyun 	proc_exec_data->parent_exec_id = BPF_CORE_READ(parent_task, self_exec_id);
718*4882a593Smuzhiyun 	proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	const char* filename = BPF_CORE_READ(bprm, filename);
721*4882a593Smuzhiyun 	size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
722*4882a593Smuzhiyun 	barrier_var(bin_path_length);
723*4882a593Smuzhiyun 	if (bin_path_length <= MAX_FILENAME_LEN) {
724*4882a593Smuzhiyun 		barrier_var(bin_path_length);
725*4882a593Smuzhiyun 		proc_exec_data->bin_path_length = bin_path_length;
726*4882a593Smuzhiyun 		payload += bin_path_length;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	void* arg_start = (void*)BPF_CORE_READ(task, mm, arg_start);
730*4882a593Smuzhiyun 	void* arg_end = (void*)BPF_CORE_READ(task, mm, arg_end);
731*4882a593Smuzhiyun 	unsigned int cmdline_length = probe_read_lim(payload, arg_start,
732*4882a593Smuzhiyun 						     arg_end - arg_start, MAX_ARGS_LEN);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	if (cmdline_length <= MAX_ARGS_LEN) {
735*4882a593Smuzhiyun 		barrier_var(cmdline_length);
736*4882a593Smuzhiyun 		proc_exec_data->cmdline_length = cmdline_length;
737*4882a593Smuzhiyun 		payload += cmdline_length;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (READ_ENVIRON_FROM_EXEC) {
741*4882a593Smuzhiyun 		void* env_start = (void*)BPF_CORE_READ(task, mm, env_start);
742*4882a593Smuzhiyun 		void* env_end = (void*)BPF_CORE_READ(task, mm, env_end);
743*4882a593Smuzhiyun 		unsigned long env_len = probe_read_lim(payload, env_start,
744*4882a593Smuzhiyun 						       env_end - env_start, MAX_ENVIRON_LEN);
745*4882a593Smuzhiyun 		if (cmdline_length <= MAX_ENVIRON_LEN) {
746*4882a593Smuzhiyun 			proc_exec_data->environment_length = env_len;
747*4882a593Smuzhiyun 			payload += env_len;
748*4882a593Smuzhiyun 		}
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &proc_exec_data->meta);
752*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)proc_exec_data;
753*4882a593Smuzhiyun 	data_len = data_len > sizeof(struct var_exec_data_t)
754*4882a593Smuzhiyun 		? sizeof(struct var_exec_data_t)
755*4882a593Smuzhiyun 		: data_len;
756*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, proc_exec_data, data_len);
757*4882a593Smuzhiyun out:
758*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
759*4882a593Smuzhiyun 	return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun SEC("kretprobe/do_filp_open")
kprobe_ret__do_filp_open(struct pt_regs * ctx)763*4882a593Smuzhiyun int kprobe_ret__do_filp_open(struct pt_regs* ctx)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
766*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_do_filp_open_ret);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	struct file* filp = (struct file*)PT_REGS_RC_CORE(ctx);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	if (filp == NULL || IS_ERR(filp))
771*4882a593Smuzhiyun 		goto out;
772*4882a593Smuzhiyun 	unsigned int flags = BPF_CORE_READ(filp, f_flags);
773*4882a593Smuzhiyun 	if ((flags & (O_RDWR | O_WRONLY)) == 0)
774*4882a593Smuzhiyun 		goto out;
775*4882a593Smuzhiyun 	if ((flags & O_TMPFILE) > 0)
776*4882a593Smuzhiyun 		goto out;
777*4882a593Smuzhiyun 	struct inode* file_inode = BPF_CORE_READ(filp, f_inode);
778*4882a593Smuzhiyun 	umode_t mode = BPF_CORE_READ(file_inode, i_mode);
779*4882a593Smuzhiyun 	if (S_ISDIR(mode) || S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
780*4882a593Smuzhiyun 	    S_ISSOCK(mode))
781*4882a593Smuzhiyun 		goto out;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	struct dentry* filp_dentry = BPF_CORE_READ(filp, f_path.dentry);
784*4882a593Smuzhiyun 	u32 device_id = 0;
785*4882a593Smuzhiyun 	u64 file_ino = 0;
786*4882a593Smuzhiyun 	if (!is_dentry_allowed_for_filemod(filp_dentry, &device_id, &file_ino))
787*4882a593Smuzhiyun 		goto out;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	int zero = 0;
790*4882a593Smuzhiyun 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
791*4882a593Smuzhiyun 	if (!filemod_data)
792*4882a593Smuzhiyun 		goto out;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	u32 pid = get_userspace_pid();
795*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	filemod_data->meta.type = FILEMOD_EVENT;
798*4882a593Smuzhiyun 	filemod_data->fmod_type = FMOD_OPEN;
799*4882a593Smuzhiyun 	filemod_data->dst_flags = flags;
800*4882a593Smuzhiyun 	filemod_data->src_inode = 0;
801*4882a593Smuzhiyun 	filemod_data->dst_inode = file_ino;
802*4882a593Smuzhiyun 	filemod_data->src_device_id = 0;
803*4882a593Smuzhiyun 	filemod_data->dst_device_id = device_id;
804*4882a593Smuzhiyun 	filemod_data->src_filepath_length = 0;
805*4882a593Smuzhiyun 	filemod_data->dst_filepath_length = 0;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
808*4882a593Smuzhiyun 					      filemod_data->payload);
809*4882a593Smuzhiyun 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
812*4882a593Smuzhiyun 	barrier_var(len);
813*4882a593Smuzhiyun 	if (len <= MAX_FILEPATH_LENGTH) {
814*4882a593Smuzhiyun 		barrier_var(len);
815*4882a593Smuzhiyun 		payload += len;
816*4882a593Smuzhiyun 		filemod_data->dst_filepath_length = len;
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
819*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)filemod_data;
820*4882a593Smuzhiyun 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
821*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
822*4882a593Smuzhiyun out:
823*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
824*4882a593Smuzhiyun 	return 0;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun SEC("kprobe/vfs_link")
BPF_KPROBE(kprobe__vfs_link,struct dentry * old_dentry,struct inode * dir,struct dentry * new_dentry,struct inode ** delegated_inode)828*4882a593Smuzhiyun int BPF_KPROBE(kprobe__vfs_link,
829*4882a593Smuzhiyun 	       struct dentry* old_dentry, struct inode* dir,
830*4882a593Smuzhiyun 	       struct dentry* new_dentry, struct inode** delegated_inode)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
833*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	u32 src_device_id = 0;
836*4882a593Smuzhiyun 	u64 src_file_ino = 0;
837*4882a593Smuzhiyun 	u32 dst_device_id = 0;
838*4882a593Smuzhiyun 	u64 dst_file_ino = 0;
839*4882a593Smuzhiyun 	if (!is_dentry_allowed_for_filemod(old_dentry, &src_device_id, &src_file_ino) &&
840*4882a593Smuzhiyun 	    !is_dentry_allowed_for_filemod(new_dentry, &dst_device_id, &dst_file_ino))
841*4882a593Smuzhiyun 		goto out;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	int zero = 0;
844*4882a593Smuzhiyun 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
845*4882a593Smuzhiyun 	if (!filemod_data)
846*4882a593Smuzhiyun 		goto out;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	u32 pid = get_userspace_pid();
849*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	filemod_data->meta.type = FILEMOD_EVENT;
852*4882a593Smuzhiyun 	filemod_data->fmod_type = FMOD_LINK;
853*4882a593Smuzhiyun 	filemod_data->dst_flags = 0;
854*4882a593Smuzhiyun 	filemod_data->src_inode = src_file_ino;
855*4882a593Smuzhiyun 	filemod_data->dst_inode = dst_file_ino;
856*4882a593Smuzhiyun 	filemod_data->src_device_id = src_device_id;
857*4882a593Smuzhiyun 	filemod_data->dst_device_id = dst_device_id;
858*4882a593Smuzhiyun 	filemod_data->src_filepath_length = 0;
859*4882a593Smuzhiyun 	filemod_data->dst_filepath_length = 0;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
862*4882a593Smuzhiyun 					      filemod_data->payload);
863*4882a593Smuzhiyun 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
866*4882a593Smuzhiyun 	barrier_var(len);
867*4882a593Smuzhiyun 	if (len <= MAX_FILEPATH_LENGTH) {
868*4882a593Smuzhiyun 		barrier_var(len);
869*4882a593Smuzhiyun 		payload += len;
870*4882a593Smuzhiyun 		filemod_data->src_filepath_length = len;
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	len = read_absolute_file_path_from_dentry(new_dentry, payload);
874*4882a593Smuzhiyun 	barrier_var(len);
875*4882a593Smuzhiyun 	if (len <= MAX_FILEPATH_LENGTH) {
876*4882a593Smuzhiyun 		barrier_var(len);
877*4882a593Smuzhiyun 		payload += len;
878*4882a593Smuzhiyun 		filemod_data->dst_filepath_length = len;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
882*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)filemod_data;
883*4882a593Smuzhiyun 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
884*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
885*4882a593Smuzhiyun out:
886*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
887*4882a593Smuzhiyun 	return 0;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun SEC("kprobe/vfs_symlink")
BPF_KPROBE(kprobe__vfs_symlink,struct inode * dir,struct dentry * dentry,const char * oldname)891*4882a593Smuzhiyun int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
892*4882a593Smuzhiyun 	       const char* oldname)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
895*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_symlink);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	u32 dst_device_id = 0;
898*4882a593Smuzhiyun 	u64 dst_file_ino = 0;
899*4882a593Smuzhiyun 	if (!is_dentry_allowed_for_filemod(dentry, &dst_device_id, &dst_file_ino))
900*4882a593Smuzhiyun 		goto out;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	int zero = 0;
903*4882a593Smuzhiyun 	struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
904*4882a593Smuzhiyun 	if (!filemod_data)
905*4882a593Smuzhiyun 		goto out;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	u32 pid = get_userspace_pid();
908*4882a593Smuzhiyun 	struct task_struct* task = (struct task_struct*)bpf_get_current_task();
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	filemod_data->meta.type = FILEMOD_EVENT;
911*4882a593Smuzhiyun 	filemod_data->fmod_type = FMOD_SYMLINK;
912*4882a593Smuzhiyun 	filemod_data->dst_flags = 0;
913*4882a593Smuzhiyun 	filemod_data->src_inode = 0;
914*4882a593Smuzhiyun 	filemod_data->dst_inode = dst_file_ino;
915*4882a593Smuzhiyun 	filemod_data->src_device_id = 0;
916*4882a593Smuzhiyun 	filemod_data->dst_device_id = dst_device_id;
917*4882a593Smuzhiyun 	filemod_data->src_filepath_length = 0;
918*4882a593Smuzhiyun 	filemod_data->dst_filepath_length = 0;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
921*4882a593Smuzhiyun 					      filemod_data->payload);
922*4882a593Smuzhiyun 	payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
925*4882a593Smuzhiyun 	barrier_var(len);
926*4882a593Smuzhiyun 	if (len <= MAX_FILEPATH_LENGTH) {
927*4882a593Smuzhiyun 		barrier_var(len);
928*4882a593Smuzhiyun 		payload += len;
929*4882a593Smuzhiyun 		filemod_data->src_filepath_length = len;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 	len = read_absolute_file_path_from_dentry(dentry, payload);
932*4882a593Smuzhiyun 	barrier_var(len);
933*4882a593Smuzhiyun 	if (len <= MAX_FILEPATH_LENGTH) {
934*4882a593Smuzhiyun 		barrier_var(len);
935*4882a593Smuzhiyun 		payload += len;
936*4882a593Smuzhiyun 		filemod_data->dst_filepath_length = len;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
939*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)filemod_data;
940*4882a593Smuzhiyun 	data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
941*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
942*4882a593Smuzhiyun out:
943*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
944*4882a593Smuzhiyun 	return 0;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun SEC("raw_tracepoint/sched_process_fork")
raw_tracepoint__sched_process_fork(struct bpf_raw_tracepoint_args * ctx)948*4882a593Smuzhiyun int raw_tracepoint__sched_process_fork(struct bpf_raw_tracepoint_args* ctx)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun 	struct bpf_func_stats_ctx stats_ctx;
951*4882a593Smuzhiyun 	bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_fork);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	int zero = 0;
954*4882a593Smuzhiyun 	struct var_fork_data_t* fork_data = bpf_map_lookup_elem(&data_heap, &zero);
955*4882a593Smuzhiyun 	if (!fork_data)
956*4882a593Smuzhiyun 		goto out;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	struct task_struct* parent = (struct task_struct*)ctx->args[0];
959*4882a593Smuzhiyun 	struct task_struct* child = (struct task_struct*)ctx->args[1];
960*4882a593Smuzhiyun 	fork_data->meta.type = FORK_EVENT;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	void* payload = populate_var_metadata(&fork_data->meta, child,
963*4882a593Smuzhiyun 					      BPF_CORE_READ(child, pid), fork_data->payload);
964*4882a593Smuzhiyun 	fork_data->parent_pid = BPF_CORE_READ(parent, pid);
965*4882a593Smuzhiyun 	fork_data->parent_exec_id = BPF_CORE_READ(parent, self_exec_id);
966*4882a593Smuzhiyun 	fork_data->parent_start_time = BPF_CORE_READ(parent, start_time);
967*4882a593Smuzhiyun 	bpf_stats_pre_submit_var_perf_event(&stats_ctx, &fork_data->meta);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	unsigned long data_len = payload - (void*)fork_data;
970*4882a593Smuzhiyun 	data_len = data_len > sizeof(*fork_data) ? sizeof(*fork_data) : data_len;
971*4882a593Smuzhiyun 	bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, fork_data, data_len);
972*4882a593Smuzhiyun out:
973*4882a593Smuzhiyun 	bpf_stats_exit(&stats_ctx);
974*4882a593Smuzhiyun 	return 0;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun char _license[] SEC("license") = "GPL";
977