xref: /OK3568_Linux_fs/kernel/tools/lib/bpf/libbpf_internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun  * Internal libbpf helpers.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (c) 2019 Facebook
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef __LIBBPF_LIBBPF_INTERNAL_H
10*4882a593Smuzhiyun #define __LIBBPF_LIBBPF_INTERNAL_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <stdlib.h>
13*4882a593Smuzhiyun #include <limits.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* make sure libbpf doesn't use kernel-only integer typedefs */
16*4882a593Smuzhiyun #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* prevent accidental re-addition of reallocarray() */
19*4882a593Smuzhiyun #pragma GCC poison reallocarray
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "libbpf.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define BTF_INFO_ENC(kind, kind_flag, vlen) \
24*4882a593Smuzhiyun 	((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
25*4882a593Smuzhiyun #define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
26*4882a593Smuzhiyun #define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
27*4882a593Smuzhiyun 	((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
28*4882a593Smuzhiyun #define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
29*4882a593Smuzhiyun 	BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
30*4882a593Smuzhiyun 	BTF_INT_ENC(encoding, bits_offset, bits)
31*4882a593Smuzhiyun #define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
32*4882a593Smuzhiyun #define BTF_PARAM_ENC(name, type) (name), (type)
33*4882a593Smuzhiyun #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #ifndef likely
36*4882a593Smuzhiyun #define likely(x) __builtin_expect(!!(x), 1)
37*4882a593Smuzhiyun #endif
38*4882a593Smuzhiyun #ifndef unlikely
39*4882a593Smuzhiyun #define unlikely(x) __builtin_expect(!!(x), 0)
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun #ifndef min
42*4882a593Smuzhiyun # define min(x, y) ((x) < (y) ? (x) : (y))
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun #ifndef max
45*4882a593Smuzhiyun # define max(x, y) ((x) < (y) ? (y) : (x))
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun #ifndef offsetofend
48*4882a593Smuzhiyun # define offsetofend(TYPE, FIELD) \
49*4882a593Smuzhiyun 	(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Symbol versioning is different between static and shared library.
53*4882a593Smuzhiyun  * Properly versioned symbols are needed for shared library, but
54*4882a593Smuzhiyun  * only the symbol of the new version is needed for static library.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun #ifdef SHARED
57*4882a593Smuzhiyun # define COMPAT_VERSION(internal_name, api_name, version) \
58*4882a593Smuzhiyun 	asm(".symver " #internal_name "," #api_name "@" #version);
59*4882a593Smuzhiyun # define DEFAULT_VERSION(internal_name, api_name, version) \
60*4882a593Smuzhiyun 	asm(".symver " #internal_name "," #api_name "@@" #version);
61*4882a593Smuzhiyun #else
62*4882a593Smuzhiyun # define COMPAT_VERSION(internal_name, api_name, version)
63*4882a593Smuzhiyun # define DEFAULT_VERSION(internal_name, api_name, version) \
64*4882a593Smuzhiyun 	extern typeof(internal_name) api_name \
65*4882a593Smuzhiyun 	__attribute__((alias(#internal_name)));
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun extern void libbpf_print(enum libbpf_print_level level,
69*4882a593Smuzhiyun 			 const char *format, ...)
70*4882a593Smuzhiyun 	__attribute__((format(printf, 2, 3)));
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define __pr(level, fmt, ...)	\
73*4882a593Smuzhiyun do {				\
74*4882a593Smuzhiyun 	libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__);	\
75*4882a593Smuzhiyun } while (0)
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define pr_warn(fmt, ...)	__pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
78*4882a593Smuzhiyun #define pr_info(fmt, ...)	__pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
79*4882a593Smuzhiyun #define pr_debug(fmt, ...)	__pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #ifndef __has_builtin
82*4882a593Smuzhiyun #define __has_builtin(x) 0
83*4882a593Smuzhiyun #endif
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * Re-implement glibc's reallocarray() for libbpf internal-only use.
86*4882a593Smuzhiyun  * reallocarray(), unfortunately, is not available in all versions of glibc,
87*4882a593Smuzhiyun  * so requires extra feature detection and using reallocarray() stub from
88*4882a593Smuzhiyun  * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
89*4882a593Smuzhiyun  * build of libbpf unnecessarily and is just a maintenance burden. Instead,
90*4882a593Smuzhiyun  * it's trivial to implement libbpf-specific internal version and use it
91*4882a593Smuzhiyun  * throughout libbpf.
92*4882a593Smuzhiyun  */
libbpf_reallocarray(void * ptr,size_t nmemb,size_t size)93*4882a593Smuzhiyun static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	size_t total;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #if __has_builtin(__builtin_mul_overflow)
98*4882a593Smuzhiyun 	if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
99*4882a593Smuzhiyun 		return NULL;
100*4882a593Smuzhiyun #else
101*4882a593Smuzhiyun 	if (size == 0 || nmemb > ULONG_MAX / size)
102*4882a593Smuzhiyun 		return NULL;
103*4882a593Smuzhiyun 	total = nmemb * size;
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun 	return realloc(ptr, total);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
109*4882a593Smuzhiyun 		  size_t cur_cnt, size_t max_cnt, size_t add_cnt);
110*4882a593Smuzhiyun int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
111*4882a593Smuzhiyun 
libbpf_validate_opts(const char * opts,size_t opts_sz,size_t user_sz,const char * type_name)112*4882a593Smuzhiyun static inline bool libbpf_validate_opts(const char *opts,
113*4882a593Smuzhiyun 					size_t opts_sz, size_t user_sz,
114*4882a593Smuzhiyun 					const char *type_name)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	if (user_sz < sizeof(size_t)) {
117*4882a593Smuzhiyun 		pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
118*4882a593Smuzhiyun 		return false;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	if (user_sz > opts_sz) {
121*4882a593Smuzhiyun 		size_t i;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 		for (i = opts_sz; i < user_sz; i++) {
124*4882a593Smuzhiyun 			if (opts[i]) {
125*4882a593Smuzhiyun 				pr_warn("%s has non-zero extra bytes\n",
126*4882a593Smuzhiyun 					type_name);
127*4882a593Smuzhiyun 				return false;
128*4882a593Smuzhiyun 			}
129*4882a593Smuzhiyun 		}
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun 	return true;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define OPTS_VALID(opts, type)						      \
135*4882a593Smuzhiyun 	(!(opts) || libbpf_validate_opts((const char *)opts,		      \
136*4882a593Smuzhiyun 					 offsetofend(struct type,	      \
137*4882a593Smuzhiyun 						     type##__last_field),     \
138*4882a593Smuzhiyun 					 (opts)->sz, #type))
139*4882a593Smuzhiyun #define OPTS_HAS(opts, field) \
140*4882a593Smuzhiyun 	((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
141*4882a593Smuzhiyun #define OPTS_GET(opts, field, fallback_value) \
142*4882a593Smuzhiyun 	(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
143*4882a593Smuzhiyun #define OPTS_SET(opts, field, value)		\
144*4882a593Smuzhiyun 	do {					\
145*4882a593Smuzhiyun 		if (OPTS_HAS(opts, field))	\
146*4882a593Smuzhiyun 			(opts)->field = value;	\
147*4882a593Smuzhiyun 	} while (0)
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
150*4882a593Smuzhiyun int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
151*4882a593Smuzhiyun int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
152*4882a593Smuzhiyun 			 const char *str_sec, size_t str_len);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun int bpf_object__section_size(const struct bpf_object *obj, const char *name,
155*4882a593Smuzhiyun 			     __u32 *size);
156*4882a593Smuzhiyun int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
157*4882a593Smuzhiyun 				__u32 *off);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun struct btf_ext_info {
160*4882a593Smuzhiyun 	/*
161*4882a593Smuzhiyun 	 * info points to the individual info section (e.g. func_info and
162*4882a593Smuzhiyun 	 * line_info) from the .BTF.ext. It does not include the __u32 rec_size.
163*4882a593Smuzhiyun 	 */
164*4882a593Smuzhiyun 	void *info;
165*4882a593Smuzhiyun 	__u32 rec_size;
166*4882a593Smuzhiyun 	__u32 len;
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define for_each_btf_ext_sec(seg, sec)					\
170*4882a593Smuzhiyun 	for (sec = (seg)->info;						\
171*4882a593Smuzhiyun 	     (void *)sec < (seg)->info + (seg)->len;			\
172*4882a593Smuzhiyun 	     sec = (void *)sec + sizeof(struct btf_ext_info_sec) +	\
173*4882a593Smuzhiyun 		   (seg)->rec_size * sec->num_info)
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun #define for_each_btf_ext_rec(seg, sec, i, rec)				\
176*4882a593Smuzhiyun 	for (i = 0, rec = (void *)&(sec)->data;				\
177*4882a593Smuzhiyun 	     i < (sec)->num_info;					\
178*4882a593Smuzhiyun 	     i++, rec = (void *)rec + (seg)->rec_size)
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun  * The .BTF.ext ELF section layout defined as
182*4882a593Smuzhiyun  *   struct btf_ext_header
183*4882a593Smuzhiyun  *   func_info subsection
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  * The func_info subsection layout:
186*4882a593Smuzhiyun  *   record size for struct bpf_func_info in the func_info subsection
187*4882a593Smuzhiyun  *   struct btf_sec_func_info for section #1
188*4882a593Smuzhiyun  *   a list of bpf_func_info records for section #1
189*4882a593Smuzhiyun  *     where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
190*4882a593Smuzhiyun  *     but may not be identical
191*4882a593Smuzhiyun  *   struct btf_sec_func_info for section #2
192*4882a593Smuzhiyun  *   a list of bpf_func_info records for section #2
193*4882a593Smuzhiyun  *   ......
194*4882a593Smuzhiyun  *
195*4882a593Smuzhiyun  * Note that the bpf_func_info record size in .BTF.ext may not
196*4882a593Smuzhiyun  * be the same as the one defined in include/uapi/linux/bpf.h.
197*4882a593Smuzhiyun  * The loader should ensure that record_size meets minimum
198*4882a593Smuzhiyun  * requirement and pass the record as is to the kernel. The
199*4882a593Smuzhiyun  * kernel will handle the func_info properly based on its contents.
200*4882a593Smuzhiyun  */
201*4882a593Smuzhiyun struct btf_ext_header {
202*4882a593Smuzhiyun 	__u16	magic;
203*4882a593Smuzhiyun 	__u8	version;
204*4882a593Smuzhiyun 	__u8	flags;
205*4882a593Smuzhiyun 	__u32	hdr_len;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* All offsets are in bytes relative to the end of this header */
208*4882a593Smuzhiyun 	__u32	func_info_off;
209*4882a593Smuzhiyun 	__u32	func_info_len;
210*4882a593Smuzhiyun 	__u32	line_info_off;
211*4882a593Smuzhiyun 	__u32	line_info_len;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* optional part of .BTF.ext header */
214*4882a593Smuzhiyun 	__u32	core_relo_off;
215*4882a593Smuzhiyun 	__u32	core_relo_len;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct btf_ext {
219*4882a593Smuzhiyun 	union {
220*4882a593Smuzhiyun 		struct btf_ext_header *hdr;
221*4882a593Smuzhiyun 		void *data;
222*4882a593Smuzhiyun 	};
223*4882a593Smuzhiyun 	struct btf_ext_info func_info;
224*4882a593Smuzhiyun 	struct btf_ext_info line_info;
225*4882a593Smuzhiyun 	struct btf_ext_info core_relo_info;
226*4882a593Smuzhiyun 	__u32 data_size;
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun struct btf_ext_info_sec {
230*4882a593Smuzhiyun 	__u32	sec_name_off;
231*4882a593Smuzhiyun 	__u32	num_info;
232*4882a593Smuzhiyun 	/* Followed by num_info * record_size number of bytes */
233*4882a593Smuzhiyun 	__u8	data[];
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* The minimum bpf_func_info checked by the loader */
237*4882a593Smuzhiyun struct bpf_func_info_min {
238*4882a593Smuzhiyun 	__u32   insn_off;
239*4882a593Smuzhiyun 	__u32   type_id;
240*4882a593Smuzhiyun };
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /* The minimum bpf_line_info checked by the loader */
243*4882a593Smuzhiyun struct bpf_line_info_min {
244*4882a593Smuzhiyun 	__u32	insn_off;
245*4882a593Smuzhiyun 	__u32	file_name_off;
246*4882a593Smuzhiyun 	__u32	line_off;
247*4882a593Smuzhiyun 	__u32	line_col;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
251*4882a593Smuzhiyun  * has to be adjusted by relocations.
252*4882a593Smuzhiyun  */
253*4882a593Smuzhiyun enum bpf_core_relo_kind {
254*4882a593Smuzhiyun 	BPF_FIELD_BYTE_OFFSET = 0,	/* field byte offset */
255*4882a593Smuzhiyun 	BPF_FIELD_BYTE_SIZE = 1,	/* field size in bytes */
256*4882a593Smuzhiyun 	BPF_FIELD_EXISTS = 2,		/* field existence in target kernel */
257*4882a593Smuzhiyun 	BPF_FIELD_SIGNED = 3,		/* field signedness (0 - unsigned, 1 - signed) */
258*4882a593Smuzhiyun 	BPF_FIELD_LSHIFT_U64 = 4,	/* bitfield-specific left bitshift */
259*4882a593Smuzhiyun 	BPF_FIELD_RSHIFT_U64 = 5,	/* bitfield-specific right bitshift */
260*4882a593Smuzhiyun 	BPF_TYPE_ID_LOCAL = 6,		/* type ID in local BPF object */
261*4882a593Smuzhiyun 	BPF_TYPE_ID_TARGET = 7,		/* type ID in target kernel */
262*4882a593Smuzhiyun 	BPF_TYPE_EXISTS = 8,		/* type existence in target kernel */
263*4882a593Smuzhiyun 	BPF_TYPE_SIZE = 9,		/* type size in bytes */
264*4882a593Smuzhiyun 	BPF_ENUMVAL_EXISTS = 10,	/* enum value existence in target kernel */
265*4882a593Smuzhiyun 	BPF_ENUMVAL_VALUE = 11,		/* enum value integer value */
266*4882a593Smuzhiyun };
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /* The minimum bpf_core_relo checked by the loader
269*4882a593Smuzhiyun  *
270*4882a593Smuzhiyun  * CO-RE relocation captures the following data:
271*4882a593Smuzhiyun  * - insn_off - instruction offset (in bytes) within a BPF program that needs
272*4882a593Smuzhiyun  *   its insn->imm field to be relocated with actual field info;
273*4882a593Smuzhiyun  * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
274*4882a593Smuzhiyun  *   type or field;
275*4882a593Smuzhiyun  * - access_str_off - offset into corresponding .BTF string section. String
276*4882a593Smuzhiyun  *   interpretation depends on specific relocation kind:
277*4882a593Smuzhiyun  *     - for field-based relocations, string encodes an accessed field using
278*4882a593Smuzhiyun  *     a sequence of field and array indices, separated by colon (:). It's
279*4882a593Smuzhiyun  *     conceptually very close to LLVM's getelementptr ([0]) instruction's
280*4882a593Smuzhiyun  *     arguments for identifying offset to a field.
281*4882a593Smuzhiyun  *     - for type-based relocations, strings is expected to be just "0";
282*4882a593Smuzhiyun  *     - for enum value-based relocations, string contains an index of enum
283*4882a593Smuzhiyun  *     value within its enum type;
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * Example to provide a better feel.
286*4882a593Smuzhiyun  *
287*4882a593Smuzhiyun  *   struct sample {
288*4882a593Smuzhiyun  *       int a;
289*4882a593Smuzhiyun  *       struct {
290*4882a593Smuzhiyun  *           int b[10];
291*4882a593Smuzhiyun  *       };
292*4882a593Smuzhiyun  *   };
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  *   struct sample *s = ...;
295*4882a593Smuzhiyun  *   int x = &s->a;     // encoded as "0:0" (a is field #0)
296*4882a593Smuzhiyun  *   int y = &s->b[5];  // encoded as "0:1:0:5" (anon struct is field #1,
297*4882a593Smuzhiyun  *                      // b is field #0 inside anon struct, accessing elem #5)
298*4882a593Smuzhiyun  *   int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * type_id for all relocs in this example  will capture BTF type id of
301*4882a593Smuzhiyun  * `struct sample`.
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * Such relocation is emitted when using __builtin_preserve_access_index()
304*4882a593Smuzhiyun  * Clang built-in, passing expression that captures field address, e.g.:
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * bpf_probe_read(&dst, sizeof(dst),
307*4882a593Smuzhiyun  *		  __builtin_preserve_access_index(&src->a.b.c));
308*4882a593Smuzhiyun  *
309*4882a593Smuzhiyun  * In this case Clang will emit field relocation recording necessary data to
310*4882a593Smuzhiyun  * be able to find offset of embedded `a.b.c` field within `src` struct.
311*4882a593Smuzhiyun  *
312*4882a593Smuzhiyun  *   [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
313*4882a593Smuzhiyun  */
314*4882a593Smuzhiyun struct bpf_core_relo {
315*4882a593Smuzhiyun 	__u32   insn_off;
316*4882a593Smuzhiyun 	__u32   type_id;
317*4882a593Smuzhiyun 	__u32   access_str_off;
318*4882a593Smuzhiyun 	enum bpf_core_relo_kind kind;
319*4882a593Smuzhiyun };
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun #endif /* __LIBBPF_LIBBPF_INTERNAL_H */
322