xref: /OK3568_Linux_fs/kernel/samples/seccomp/bpf-helper.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Example wrapper around BPF macros.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
6*4882a593Smuzhiyun  * Author: Will Drewry <wad@chromium.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * The code may be used by anyone for any purpose,
9*4882a593Smuzhiyun  * and can serve as a starting point for developing
10*4882a593Smuzhiyun  * applications using prctl(PR_SET_SECCOMP, 2, ...).
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * No guarantees are provided with respect to the correctness
13*4882a593Smuzhiyun  * or functionality of this code.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun #ifndef __BPF_HELPER_H__
16*4882a593Smuzhiyun #define __BPF_HELPER_H__
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/bitsperlong.h>	/* for __BITS_PER_LONG */
19*4882a593Smuzhiyun #include <endian.h>
20*4882a593Smuzhiyun #include <linux/filter.h>
21*4882a593Smuzhiyun #include <linux/seccomp.h>	/* for seccomp_data */
22*4882a593Smuzhiyun #include <linux/types.h>
23*4882a593Smuzhiyun #include <linux/unistd.h>
24*4882a593Smuzhiyun #include <stddef.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define BPF_LABELS_MAX 256
27*4882a593Smuzhiyun struct bpf_labels {
28*4882a593Smuzhiyun 	int count;
29*4882a593Smuzhiyun 	struct __bpf_label {
30*4882a593Smuzhiyun 		const char *label;
31*4882a593Smuzhiyun 		__u32 location;
32*4882a593Smuzhiyun 	} labels[BPF_LABELS_MAX];
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun int bpf_resolve_jumps(struct bpf_labels *labels,
36*4882a593Smuzhiyun 		      struct sock_filter *filter, size_t count);
37*4882a593Smuzhiyun __u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
38*4882a593Smuzhiyun void seccomp_bpf_print(struct sock_filter *filter, size_t count);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define JUMP_JT 0xff
41*4882a593Smuzhiyun #define JUMP_JF 0xff
42*4882a593Smuzhiyun #define LABEL_JT 0xfe
43*4882a593Smuzhiyun #define LABEL_JF 0xfe
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define ALLOW \
46*4882a593Smuzhiyun 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
47*4882a593Smuzhiyun #define DENY \
48*4882a593Smuzhiyun 	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
49*4882a593Smuzhiyun #define JUMP(labels, label) \
50*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
51*4882a593Smuzhiyun 		 JUMP_JT, JUMP_JF)
52*4882a593Smuzhiyun #define LABEL(labels, label) \
53*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
54*4882a593Smuzhiyun 		 LABEL_JT, LABEL_JF)
55*4882a593Smuzhiyun #define SYSCALL(nr, jt) \
56*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
57*4882a593Smuzhiyun 	jt
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* Lame, but just an example */
60*4882a593Smuzhiyun #define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define EXPAND(...) __VA_ARGS__
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* Ensure that we load the logically correct offset. */
65*4882a593Smuzhiyun #if __BYTE_ORDER == __LITTLE_ENDIAN
66*4882a593Smuzhiyun #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
67*4882a593Smuzhiyun #elif __BYTE_ORDER == __BIG_ENDIAN
68*4882a593Smuzhiyun #define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
69*4882a593Smuzhiyun #else
70*4882a593Smuzhiyun #error "Unknown endianness"
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* Map all width-sensitive operations */
74*4882a593Smuzhiyun #if __BITS_PER_LONG == 32
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define JEQ(x, jt) JEQ32(x, EXPAND(jt))
77*4882a593Smuzhiyun #define JNE(x, jt) JNE32(x, EXPAND(jt))
78*4882a593Smuzhiyun #define JGT(x, jt) JGT32(x, EXPAND(jt))
79*4882a593Smuzhiyun #define JLT(x, jt) JLT32(x, EXPAND(jt))
80*4882a593Smuzhiyun #define JGE(x, jt) JGE32(x, EXPAND(jt))
81*4882a593Smuzhiyun #define JLE(x, jt) JLE32(x, EXPAND(jt))
82*4882a593Smuzhiyun #define JA(x, jt) JA32(x, EXPAND(jt))
83*4882a593Smuzhiyun #define ARG(i) ARG_32(i)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #elif __BITS_PER_LONG == 64
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Ensure that we load the logically correct offset. */
88*4882a593Smuzhiyun #if __BYTE_ORDER == __LITTLE_ENDIAN
89*4882a593Smuzhiyun #define ENDIAN(_lo, _hi) _lo, _hi
90*4882a593Smuzhiyun #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
91*4882a593Smuzhiyun #elif __BYTE_ORDER == __BIG_ENDIAN
92*4882a593Smuzhiyun #define ENDIAN(_lo, _hi) _hi, _lo
93*4882a593Smuzhiyun #define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun union arg64 {
97*4882a593Smuzhiyun 	struct {
98*4882a593Smuzhiyun 		__u32 ENDIAN(lo32, hi32);
99*4882a593Smuzhiyun 	};
100*4882a593Smuzhiyun 	__u64 u64;
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define JEQ(x, jt) \
104*4882a593Smuzhiyun 	JEQ64(((union arg64){.u64 = (x)}).lo32, \
105*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
106*4882a593Smuzhiyun 	      EXPAND(jt))
107*4882a593Smuzhiyun #define JGT(x, jt) \
108*4882a593Smuzhiyun 	JGT64(((union arg64){.u64 = (x)}).lo32, \
109*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
110*4882a593Smuzhiyun 	      EXPAND(jt))
111*4882a593Smuzhiyun #define JGE(x, jt) \
112*4882a593Smuzhiyun 	JGE64(((union arg64){.u64 = (x)}).lo32, \
113*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
114*4882a593Smuzhiyun 	      EXPAND(jt))
115*4882a593Smuzhiyun #define JNE(x, jt) \
116*4882a593Smuzhiyun 	JNE64(((union arg64){.u64 = (x)}).lo32, \
117*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
118*4882a593Smuzhiyun 	      EXPAND(jt))
119*4882a593Smuzhiyun #define JLT(x, jt) \
120*4882a593Smuzhiyun 	JLT64(((union arg64){.u64 = (x)}).lo32, \
121*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
122*4882a593Smuzhiyun 	      EXPAND(jt))
123*4882a593Smuzhiyun #define JLE(x, jt) \
124*4882a593Smuzhiyun 	JLE64(((union arg64){.u64 = (x)}).lo32, \
125*4882a593Smuzhiyun 	      ((union arg64){.u64 = (x)}).hi32, \
126*4882a593Smuzhiyun 	      EXPAND(jt))
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define JA(x, jt) \
129*4882a593Smuzhiyun 	JA64(((union arg64){.u64 = (x)}).lo32, \
130*4882a593Smuzhiyun 	       ((union arg64){.u64 = (x)}).hi32, \
131*4882a593Smuzhiyun 	       EXPAND(jt))
132*4882a593Smuzhiyun #define ARG(i) ARG_64(i)
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #else
135*4882a593Smuzhiyun #error __BITS_PER_LONG value unusable.
136*4882a593Smuzhiyun #endif
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /* Loads the arg into A */
139*4882a593Smuzhiyun #define ARG_32(idx) \
140*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /* Loads lo into M[0] and hi into M[1] and A */
143*4882a593Smuzhiyun #define ARG_64(idx) \
144*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
145*4882a593Smuzhiyun 	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
146*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
147*4882a593Smuzhiyun 	BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun #define JEQ32(value, jt) \
150*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
151*4882a593Smuzhiyun 	jt
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define JNE32(value, jt) \
154*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
155*4882a593Smuzhiyun 	jt
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun #define JA32(value, jt) \
158*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
159*4882a593Smuzhiyun 	jt
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define JGE32(value, jt) \
162*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
163*4882a593Smuzhiyun 	jt
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun #define JGT32(value, jt) \
166*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
167*4882a593Smuzhiyun 	jt
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define JLE32(value, jt) \
170*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
171*4882a593Smuzhiyun 	jt
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun #define JLT32(value, jt) \
174*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
175*4882a593Smuzhiyun 	jt
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun /*
178*4882a593Smuzhiyun  * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
179*4882a593Smuzhiyun  * A and M[1]. This invariant is kept by restoring A if necessary.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun #define JEQ64(lo, hi, jt) \
182*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto NOMATCH; */ \
183*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
184*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
185*4882a593Smuzhiyun 	/* if (lo != arg.lo) goto NOMATCH; */ \
186*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
187*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
188*4882a593Smuzhiyun 	jt, \
189*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #define JNE64(lo, hi, jt) \
192*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto MATCH; */ \
193*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
194*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
195*4882a593Smuzhiyun 	/* if (lo != arg.lo) goto MATCH; */ \
196*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
197*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
198*4882a593Smuzhiyun 	jt, \
199*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define JA64(lo, hi, jt) \
202*4882a593Smuzhiyun 	/* if (hi & arg.hi) goto MATCH; */ \
203*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
204*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
205*4882a593Smuzhiyun 	/* if (lo & arg.lo) goto MATCH; */ \
206*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
207*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
208*4882a593Smuzhiyun 	jt, \
209*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun #define JGE64(lo, hi, jt) \
212*4882a593Smuzhiyun 	/* if (hi > arg.hi) goto MATCH; */ \
213*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
214*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto NOMATCH; */ \
215*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
216*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
217*4882a593Smuzhiyun 	/* if (lo >= arg.lo) goto MATCH; */ \
218*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
219*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
220*4882a593Smuzhiyun 	jt, \
221*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #define JGT64(lo, hi, jt) \
224*4882a593Smuzhiyun 	/* if (hi > arg.hi) goto MATCH; */ \
225*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
226*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto NOMATCH; */ \
227*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
228*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
229*4882a593Smuzhiyun 	/* if (lo > arg.lo) goto MATCH; */ \
230*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
231*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
232*4882a593Smuzhiyun 	jt, \
233*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #define JLE64(lo, hi, jt) \
236*4882a593Smuzhiyun 	/* if (hi < arg.hi) goto MATCH; */ \
237*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
238*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto NOMATCH; */ \
239*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
240*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
241*4882a593Smuzhiyun 	/* if (lo <= arg.lo) goto MATCH; */ \
242*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
243*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
244*4882a593Smuzhiyun 	jt, \
245*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #define JLT64(lo, hi, jt) \
248*4882a593Smuzhiyun 	/* if (hi < arg.hi) goto MATCH; */ \
249*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
250*4882a593Smuzhiyun 	/* if (hi != arg.hi) goto NOMATCH; */ \
251*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
252*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 0), \
253*4882a593Smuzhiyun 	/* if (lo < arg.lo) goto MATCH; */ \
254*4882a593Smuzhiyun 	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
255*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1), \
256*4882a593Smuzhiyun 	jt, \
257*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_MEM, 1)
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #define LOAD_SYSCALL_NR \
260*4882a593Smuzhiyun 	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
261*4882a593Smuzhiyun 		 offsetof(struct seccomp_data, nr))
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #endif  /* __BPF_HELPER_H__ */
264