1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* bpf_jit_comp.c: BPF JIT compiler
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
7*4882a593Smuzhiyun * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/moduleloader.h>
10*4882a593Smuzhiyun #include <asm/cacheflush.h>
11*4882a593Smuzhiyun #include <asm/asm-compat.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/filter.h>
14*4882a593Smuzhiyun #include <linux/if_vlan.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include "bpf_jit32.h"
17*4882a593Smuzhiyun
bpf_flush_icache(void * start,void * end)18*4882a593Smuzhiyun static inline void bpf_flush_icache(void *start, void *end)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun smp_wmb();
21*4882a593Smuzhiyun flush_icache_range((unsigned long)start, (unsigned long)end);
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
bpf_jit_build_prologue(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx)24*4882a593Smuzhiyun static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
25*4882a593Smuzhiyun struct codegen_context *ctx)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun int i;
28*4882a593Smuzhiyun const struct sock_filter *filter = fp->insns;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
31*4882a593Smuzhiyun /* Make stackframe */
32*4882a593Smuzhiyun if (ctx->seen & SEEN_DATAREF) {
33*4882a593Smuzhiyun /* If we call any helpers (for loads), save LR */
34*4882a593Smuzhiyun EMIT(PPC_INST_MFLR | __PPC_RT(R0));
35*4882a593Smuzhiyun PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Back up non-volatile regs. */
38*4882a593Smuzhiyun PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
39*4882a593Smuzhiyun PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun if (ctx->seen & SEEN_MEM) {
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * Conditionally save regs r15-r31 as some will be used
44*4882a593Smuzhiyun * for M[] data.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun for (i = r_M; i < (r_M+16); i++) {
47*4882a593Smuzhiyun if (ctx->seen & (1 << (i-r_M)))
48*4882a593Smuzhiyun PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun if (ctx->seen & SEEN_DATAREF) {
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * If this filter needs to access skb data,
57*4882a593Smuzhiyun * prepare r_D and r_HL:
58*4882a593Smuzhiyun * r_HL = skb->len - skb->data_len
59*4882a593Smuzhiyun * r_D = skb->data
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
62*4882a593Smuzhiyun data_len));
63*4882a593Smuzhiyun PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
64*4882a593Smuzhiyun EMIT(PPC_RAW_SUB(r_HL, r_HL, r_scratch1));
65*4882a593Smuzhiyun PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (ctx->seen & SEEN_XREG) {
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * TODO: Could also detect whether first instr. sets X and
71*4882a593Smuzhiyun * avoid this (as below, with A).
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun EMIT(PPC_RAW_LI(r_X, 0));
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* make sure we dont leak kernel information to user */
77*4882a593Smuzhiyun if (bpf_needs_clear_a(&filter[0]))
78*4882a593Smuzhiyun EMIT(PPC_RAW_LI(r_A, 0));
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
bpf_jit_build_epilogue(u32 * image,struct codegen_context * ctx)81*4882a593Smuzhiyun static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun int i;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
86*4882a593Smuzhiyun EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME));
87*4882a593Smuzhiyun if (ctx->seen & SEEN_DATAREF) {
88*4882a593Smuzhiyun PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
89*4882a593Smuzhiyun EMIT(PPC_RAW_MTLR(0));
90*4882a593Smuzhiyun PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
91*4882a593Smuzhiyun PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun if (ctx->seen & SEEN_MEM) {
94*4882a593Smuzhiyun /* Restore any saved non-vol registers */
95*4882a593Smuzhiyun for (i = r_M; i < (r_M+16); i++) {
96*4882a593Smuzhiyun if (ctx->seen & (1 << (i-r_M)))
97*4882a593Smuzhiyun PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun /* The RETs have left a return value in R3. */
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun EMIT(PPC_RAW_BLR());
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define CHOOSE_LOAD_FUNC(K, func) \
107*4882a593Smuzhiyun ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Assemble the body code between the prologue & epilogue. */
bpf_jit_build_body(struct bpf_prog * fp,u32 * image,struct codegen_context * ctx,unsigned int * addrs)110*4882a593Smuzhiyun static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
111*4882a593Smuzhiyun struct codegen_context *ctx,
112*4882a593Smuzhiyun unsigned int *addrs)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun const struct sock_filter *filter = fp->insns;
115*4882a593Smuzhiyun int flen = fp->len;
116*4882a593Smuzhiyun u8 *func;
117*4882a593Smuzhiyun unsigned int true_cond;
118*4882a593Smuzhiyun int i;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Start of epilogue code */
121*4882a593Smuzhiyun unsigned int exit_addr = addrs[flen];
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < flen; i++) {
124*4882a593Smuzhiyun unsigned int K = filter[i].k;
125*4882a593Smuzhiyun u16 code = bpf_anc_helper(&filter[i]);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * addrs[] maps a BPF bytecode address into a real offset from
129*4882a593Smuzhiyun * the start of the body code.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun addrs[i] = ctx->idx * 4;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun switch (code) {
134*4882a593Smuzhiyun /*** ALU ops ***/
135*4882a593Smuzhiyun case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
136*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
137*4882a593Smuzhiyun EMIT(PPC_RAW_ADD(r_A, r_A, r_X));
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
140*4882a593Smuzhiyun if (!K)
141*4882a593Smuzhiyun break;
142*4882a593Smuzhiyun EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(K)));
143*4882a593Smuzhiyun if (K >= 32768)
144*4882a593Smuzhiyun EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(K)));
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
147*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
148*4882a593Smuzhiyun EMIT(PPC_RAW_SUB(r_A, r_A, r_X));
149*4882a593Smuzhiyun break;
150*4882a593Smuzhiyun case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
151*4882a593Smuzhiyun if (!K)
152*4882a593Smuzhiyun break;
153*4882a593Smuzhiyun EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(-K)));
154*4882a593Smuzhiyun if (K >= 32768)
155*4882a593Smuzhiyun EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(-K)));
156*4882a593Smuzhiyun break;
157*4882a593Smuzhiyun case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
158*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
159*4882a593Smuzhiyun EMIT(PPC_RAW_MULW(r_A, r_A, r_X));
160*4882a593Smuzhiyun break;
161*4882a593Smuzhiyun case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
162*4882a593Smuzhiyun if (K < 32768)
163*4882a593Smuzhiyun EMIT(PPC_RAW_MULI(r_A, r_A, K));
164*4882a593Smuzhiyun else {
165*4882a593Smuzhiyun PPC_LI32(r_scratch1, K);
166*4882a593Smuzhiyun EMIT(PPC_RAW_MULW(r_A, r_A, r_scratch1));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
170*4882a593Smuzhiyun case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
171*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
172*4882a593Smuzhiyun EMIT(PPC_RAW_CMPWI(r_X, 0));
173*4882a593Smuzhiyun if (ctx->pc_ret0 != -1) {
174*4882a593Smuzhiyun PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
175*4882a593Smuzhiyun } else {
176*4882a593Smuzhiyun PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
177*4882a593Smuzhiyun EMIT(PPC_RAW_LI(r_ret, 0));
178*4882a593Smuzhiyun PPC_JMP(exit_addr);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
181*4882a593Smuzhiyun EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_X));
182*4882a593Smuzhiyun EMIT(PPC_RAW_MULW(r_scratch1, r_X, r_scratch1));
183*4882a593Smuzhiyun EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1));
184*4882a593Smuzhiyun } else {
185*4882a593Smuzhiyun EMIT(PPC_RAW_DIVWU(r_A, r_A, r_X));
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun break;
188*4882a593Smuzhiyun case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
189*4882a593Smuzhiyun PPC_LI32(r_scratch2, K);
190*4882a593Smuzhiyun EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_scratch2));
191*4882a593Smuzhiyun EMIT(PPC_RAW_MULW(r_scratch1, r_scratch2, r_scratch1));
192*4882a593Smuzhiyun EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1));
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
195*4882a593Smuzhiyun if (K == 1)
196*4882a593Smuzhiyun break;
197*4882a593Smuzhiyun PPC_LI32(r_scratch1, K);
198*4882a593Smuzhiyun EMIT(PPC_RAW_DIVWU(r_A, r_A, r_scratch1));
199*4882a593Smuzhiyun break;
200*4882a593Smuzhiyun case BPF_ALU | BPF_AND | BPF_X:
201*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
202*4882a593Smuzhiyun EMIT(PPC_RAW_AND(r_A, r_A, r_X));
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun case BPF_ALU | BPF_AND | BPF_K:
205*4882a593Smuzhiyun if (!IMM_H(K))
206*4882a593Smuzhiyun EMIT(PPC_RAW_ANDI(r_A, r_A, K));
207*4882a593Smuzhiyun else {
208*4882a593Smuzhiyun PPC_LI32(r_scratch1, K);
209*4882a593Smuzhiyun EMIT(PPC_RAW_AND(r_A, r_A, r_scratch1));
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun case BPF_ALU | BPF_OR | BPF_X:
213*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
214*4882a593Smuzhiyun EMIT(PPC_RAW_OR(r_A, r_A, r_X));
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun case BPF_ALU | BPF_OR | BPF_K:
217*4882a593Smuzhiyun if (IMM_L(K))
218*4882a593Smuzhiyun EMIT(PPC_RAW_ORI(r_A, r_A, IMM_L(K)));
219*4882a593Smuzhiyun if (K >= 65536)
220*4882a593Smuzhiyun EMIT(PPC_RAW_ORIS(r_A, r_A, IMM_H(K)));
221*4882a593Smuzhiyun break;
222*4882a593Smuzhiyun case BPF_ANC | SKF_AD_ALU_XOR_X:
223*4882a593Smuzhiyun case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
224*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
225*4882a593Smuzhiyun EMIT(PPC_RAW_XOR(r_A, r_A, r_X));
226*4882a593Smuzhiyun break;
227*4882a593Smuzhiyun case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
228*4882a593Smuzhiyun if (IMM_L(K))
229*4882a593Smuzhiyun EMIT(PPC_RAW_XORI(r_A, r_A, IMM_L(K)));
230*4882a593Smuzhiyun if (K >= 65536)
231*4882a593Smuzhiyun EMIT(PPC_RAW_XORIS(r_A, r_A, IMM_H(K)));
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
234*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
235*4882a593Smuzhiyun EMIT(PPC_RAW_SLW(r_A, r_A, r_X));
236*4882a593Smuzhiyun break;
237*4882a593Smuzhiyun case BPF_ALU | BPF_LSH | BPF_K:
238*4882a593Smuzhiyun if (K == 0)
239*4882a593Smuzhiyun break;
240*4882a593Smuzhiyun else
241*4882a593Smuzhiyun EMIT(PPC_RAW_SLWI(r_A, r_A, K));
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
244*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
245*4882a593Smuzhiyun EMIT(PPC_RAW_SRW(r_A, r_A, r_X));
246*4882a593Smuzhiyun break;
247*4882a593Smuzhiyun case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
248*4882a593Smuzhiyun if (K == 0)
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun else
251*4882a593Smuzhiyun EMIT(PPC_RAW_SRWI(r_A, r_A, K));
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun case BPF_ALU | BPF_NEG:
254*4882a593Smuzhiyun EMIT(PPC_RAW_NEG(r_A, r_A));
255*4882a593Smuzhiyun break;
256*4882a593Smuzhiyun case BPF_RET | BPF_K:
257*4882a593Smuzhiyun PPC_LI32(r_ret, K);
258*4882a593Smuzhiyun if (!K) {
259*4882a593Smuzhiyun if (ctx->pc_ret0 == -1)
260*4882a593Smuzhiyun ctx->pc_ret0 = i;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * If this isn't the very last instruction, branch to
264*4882a593Smuzhiyun * the epilogue if we've stuff to clean up. Otherwise,
265*4882a593Smuzhiyun * if there's nothing to tidy, just return. If we /are/
266*4882a593Smuzhiyun * the last instruction, we're about to fall through to
267*4882a593Smuzhiyun * the epilogue to return.
268*4882a593Smuzhiyun */
269*4882a593Smuzhiyun if (i != flen - 1) {
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Note: 'seen' is properly valid only on pass
272*4882a593Smuzhiyun * #2. Both parts of this conditional are the
273*4882a593Smuzhiyun * same instruction size though, meaning the
274*4882a593Smuzhiyun * first pass will still correctly determine the
275*4882a593Smuzhiyun * code size/addresses.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun if (ctx->seen)
278*4882a593Smuzhiyun PPC_JMP(exit_addr);
279*4882a593Smuzhiyun else
280*4882a593Smuzhiyun EMIT(PPC_RAW_BLR());
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun case BPF_RET | BPF_A:
284*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_ret, r_A));
285*4882a593Smuzhiyun if (i != flen - 1) {
286*4882a593Smuzhiyun if (ctx->seen)
287*4882a593Smuzhiyun PPC_JMP(exit_addr);
288*4882a593Smuzhiyun else
289*4882a593Smuzhiyun EMIT(PPC_RAW_BLR());
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun case BPF_MISC | BPF_TAX: /* X = A */
293*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_X, r_A));
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun case BPF_MISC | BPF_TXA: /* A = X */
296*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
297*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_A, r_X));
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*** Constant loads/M[] access ***/
301*4882a593Smuzhiyun case BPF_LD | BPF_IMM: /* A = K */
302*4882a593Smuzhiyun PPC_LI32(r_A, K);
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun case BPF_LDX | BPF_IMM: /* X = K */
305*4882a593Smuzhiyun PPC_LI32(r_X, K);
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun case BPF_LD | BPF_MEM: /* A = mem[K] */
308*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_A, r_M + (K & 0xf)));
309*4882a593Smuzhiyun ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun case BPF_LDX | BPF_MEM: /* X = mem[K] */
312*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_X, r_M + (K & 0xf)));
313*4882a593Smuzhiyun ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
314*4882a593Smuzhiyun break;
315*4882a593Smuzhiyun case BPF_ST: /* mem[K] = A */
316*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_A));
317*4882a593Smuzhiyun ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
318*4882a593Smuzhiyun break;
319*4882a593Smuzhiyun case BPF_STX: /* mem[K] = X */
320*4882a593Smuzhiyun EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_X));
321*4882a593Smuzhiyun ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
322*4882a593Smuzhiyun break;
323*4882a593Smuzhiyun case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
324*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
325*4882a593Smuzhiyun PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
328*4882a593Smuzhiyun PPC_LWZ_OFFS(r_A, r_skb, K);
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
331*4882a593Smuzhiyun PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*** Ancillary info loads ***/
335*4882a593Smuzhiyun case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
336*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff,
337*4882a593Smuzhiyun protocol) != 2);
338*4882a593Smuzhiyun PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
339*4882a593Smuzhiyun protocol));
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun case BPF_ANC | SKF_AD_IFINDEX:
342*4882a593Smuzhiyun case BPF_ANC | SKF_AD_HATYPE:
343*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct net_device,
344*4882a593Smuzhiyun ifindex) != 4);
345*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct net_device,
346*4882a593Smuzhiyun type) != 2);
347*4882a593Smuzhiyun PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
348*4882a593Smuzhiyun dev));
349*4882a593Smuzhiyun EMIT(PPC_RAW_CMPDI(r_scratch1, 0));
350*4882a593Smuzhiyun if (ctx->pc_ret0 != -1) {
351*4882a593Smuzhiyun PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
352*4882a593Smuzhiyun } else {
353*4882a593Smuzhiyun /* Exit, returning 0; first pass hits here. */
354*4882a593Smuzhiyun PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
355*4882a593Smuzhiyun EMIT(PPC_RAW_LI(r_ret, 0));
356*4882a593Smuzhiyun PPC_JMP(exit_addr);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
359*4882a593Smuzhiyun PPC_LWZ_OFFS(r_A, r_scratch1,
360*4882a593Smuzhiyun offsetof(struct net_device, ifindex));
361*4882a593Smuzhiyun } else {
362*4882a593Smuzhiyun PPC_LHZ_OFFS(r_A, r_scratch1,
363*4882a593Smuzhiyun offsetof(struct net_device, type));
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun case BPF_ANC | SKF_AD_MARK:
368*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
369*4882a593Smuzhiyun PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
370*4882a593Smuzhiyun mark));
371*4882a593Smuzhiyun break;
372*4882a593Smuzhiyun case BPF_ANC | SKF_AD_RXHASH:
373*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
374*4882a593Smuzhiyun PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
375*4882a593Smuzhiyun hash));
376*4882a593Smuzhiyun break;
377*4882a593Smuzhiyun case BPF_ANC | SKF_AD_VLAN_TAG:
378*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
381*4882a593Smuzhiyun vlan_tci));
382*4882a593Smuzhiyun break;
383*4882a593Smuzhiyun case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
384*4882a593Smuzhiyun PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
385*4882a593Smuzhiyun if (PKT_VLAN_PRESENT_BIT)
386*4882a593Smuzhiyun EMIT(PPC_RAW_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT));
387*4882a593Smuzhiyun if (PKT_VLAN_PRESENT_BIT < 7)
388*4882a593Smuzhiyun EMIT(PPC_RAW_ANDI(r_A, r_A, 1));
389*4882a593Smuzhiyun break;
390*4882a593Smuzhiyun case BPF_ANC | SKF_AD_QUEUE:
391*4882a593Smuzhiyun BUILD_BUG_ON(sizeof_field(struct sk_buff,
392*4882a593Smuzhiyun queue_mapping) != 2);
393*4882a593Smuzhiyun PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394*4882a593Smuzhiyun queue_mapping));
395*4882a593Smuzhiyun break;
396*4882a593Smuzhiyun case BPF_ANC | SKF_AD_PKTTYPE:
397*4882a593Smuzhiyun PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
398*4882a593Smuzhiyun EMIT(PPC_RAW_ANDI(r_A, r_A, PKT_TYPE_MAX));
399*4882a593Smuzhiyun EMIT(PPC_RAW_SRWI(r_A, r_A, 5));
400*4882a593Smuzhiyun break;
401*4882a593Smuzhiyun case BPF_ANC | SKF_AD_CPU:
402*4882a593Smuzhiyun PPC_BPF_LOAD_CPU(r_A);
403*4882a593Smuzhiyun break;
404*4882a593Smuzhiyun /*** Absolute loads from packet header/data ***/
405*4882a593Smuzhiyun case BPF_LD | BPF_W | BPF_ABS:
406*4882a593Smuzhiyun func = CHOOSE_LOAD_FUNC(K, sk_load_word);
407*4882a593Smuzhiyun goto common_load;
408*4882a593Smuzhiyun case BPF_LD | BPF_H | BPF_ABS:
409*4882a593Smuzhiyun func = CHOOSE_LOAD_FUNC(K, sk_load_half);
410*4882a593Smuzhiyun goto common_load;
411*4882a593Smuzhiyun case BPF_LD | BPF_B | BPF_ABS:
412*4882a593Smuzhiyun func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
413*4882a593Smuzhiyun common_load:
414*4882a593Smuzhiyun /* Load from [K]. */
415*4882a593Smuzhiyun ctx->seen |= SEEN_DATAREF;
416*4882a593Smuzhiyun PPC_FUNC_ADDR(r_scratch1, func);
417*4882a593Smuzhiyun EMIT(PPC_RAW_MTLR(r_scratch1));
418*4882a593Smuzhiyun PPC_LI32(r_addr, K);
419*4882a593Smuzhiyun EMIT(PPC_RAW_BLRL());
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Helper returns 'lt' condition on error, and an
422*4882a593Smuzhiyun * appropriate return value in r3
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun PPC_BCC(COND_LT, exit_addr);
425*4882a593Smuzhiyun break;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /*** Indirect loads from packet header/data ***/
428*4882a593Smuzhiyun case BPF_LD | BPF_W | BPF_IND:
429*4882a593Smuzhiyun func = sk_load_word;
430*4882a593Smuzhiyun goto common_load_ind;
431*4882a593Smuzhiyun case BPF_LD | BPF_H | BPF_IND:
432*4882a593Smuzhiyun func = sk_load_half;
433*4882a593Smuzhiyun goto common_load_ind;
434*4882a593Smuzhiyun case BPF_LD | BPF_B | BPF_IND:
435*4882a593Smuzhiyun func = sk_load_byte;
436*4882a593Smuzhiyun common_load_ind:
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Load from [X + K]. Negative offsets are tested for
439*4882a593Smuzhiyun * in the helper functions.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun ctx->seen |= SEEN_DATAREF | SEEN_XREG;
442*4882a593Smuzhiyun PPC_FUNC_ADDR(r_scratch1, func);
443*4882a593Smuzhiyun EMIT(PPC_RAW_MTLR(r_scratch1));
444*4882a593Smuzhiyun EMIT(PPC_RAW_ADDI(r_addr, r_X, IMM_L(K)));
445*4882a593Smuzhiyun if (K >= 32768)
446*4882a593Smuzhiyun EMIT(PPC_RAW_ADDIS(r_addr, r_addr, IMM_HA(K)));
447*4882a593Smuzhiyun EMIT(PPC_RAW_BLRL());
448*4882a593Smuzhiyun /* If error, cr0.LT set */
449*4882a593Smuzhiyun PPC_BCC(COND_LT, exit_addr);
450*4882a593Smuzhiyun break;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun case BPF_LDX | BPF_B | BPF_MSH:
453*4882a593Smuzhiyun func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
454*4882a593Smuzhiyun goto common_load;
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /*** Jump and branches ***/
458*4882a593Smuzhiyun case BPF_JMP | BPF_JA:
459*4882a593Smuzhiyun if (K != 0)
460*4882a593Smuzhiyun PPC_JMP(addrs[i + 1 + K]);
461*4882a593Smuzhiyun break;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun case BPF_JMP | BPF_JGT | BPF_K:
464*4882a593Smuzhiyun case BPF_JMP | BPF_JGT | BPF_X:
465*4882a593Smuzhiyun true_cond = COND_GT;
466*4882a593Smuzhiyun goto cond_branch;
467*4882a593Smuzhiyun case BPF_JMP | BPF_JGE | BPF_K:
468*4882a593Smuzhiyun case BPF_JMP | BPF_JGE | BPF_X:
469*4882a593Smuzhiyun true_cond = COND_GE;
470*4882a593Smuzhiyun goto cond_branch;
471*4882a593Smuzhiyun case BPF_JMP | BPF_JEQ | BPF_K:
472*4882a593Smuzhiyun case BPF_JMP | BPF_JEQ | BPF_X:
473*4882a593Smuzhiyun true_cond = COND_EQ;
474*4882a593Smuzhiyun goto cond_branch;
475*4882a593Smuzhiyun case BPF_JMP | BPF_JSET | BPF_K:
476*4882a593Smuzhiyun case BPF_JMP | BPF_JSET | BPF_X:
477*4882a593Smuzhiyun true_cond = COND_NE;
478*4882a593Smuzhiyun cond_branch:
479*4882a593Smuzhiyun /* same targets, can avoid doing the test :) */
480*4882a593Smuzhiyun if (filter[i].jt == filter[i].jf) {
481*4882a593Smuzhiyun if (filter[i].jt > 0)
482*4882a593Smuzhiyun PPC_JMP(addrs[i + 1 + filter[i].jt]);
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun switch (code) {
487*4882a593Smuzhiyun case BPF_JMP | BPF_JGT | BPF_X:
488*4882a593Smuzhiyun case BPF_JMP | BPF_JGE | BPF_X:
489*4882a593Smuzhiyun case BPF_JMP | BPF_JEQ | BPF_X:
490*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
491*4882a593Smuzhiyun EMIT(PPC_RAW_CMPLW(r_A, r_X));
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun case BPF_JMP | BPF_JSET | BPF_X:
494*4882a593Smuzhiyun ctx->seen |= SEEN_XREG;
495*4882a593Smuzhiyun EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, r_X));
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun case BPF_JMP | BPF_JEQ | BPF_K:
498*4882a593Smuzhiyun case BPF_JMP | BPF_JGT | BPF_K:
499*4882a593Smuzhiyun case BPF_JMP | BPF_JGE | BPF_K:
500*4882a593Smuzhiyun if (K < 32768)
501*4882a593Smuzhiyun EMIT(PPC_RAW_CMPLWI(r_A, K));
502*4882a593Smuzhiyun else {
503*4882a593Smuzhiyun PPC_LI32(r_scratch1, K);
504*4882a593Smuzhiyun EMIT(PPC_RAW_CMPLW(r_A, r_scratch1));
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun break;
507*4882a593Smuzhiyun case BPF_JMP | BPF_JSET | BPF_K:
508*4882a593Smuzhiyun if (K < 32768)
509*4882a593Smuzhiyun /* PPC_ANDI is /only/ dot-form */
510*4882a593Smuzhiyun EMIT(PPC_RAW_ANDI(r_scratch1, r_A, K));
511*4882a593Smuzhiyun else {
512*4882a593Smuzhiyun PPC_LI32(r_scratch1, K);
513*4882a593Smuzhiyun EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A,
514*4882a593Smuzhiyun r_scratch1));
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun break;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun /* Sometimes branches are constructed "backward", with
519*4882a593Smuzhiyun * the false path being the branch and true path being
520*4882a593Smuzhiyun * a fallthrough to the next instruction.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun if (filter[i].jt == 0)
523*4882a593Smuzhiyun /* Swap the sense of the branch */
524*4882a593Smuzhiyun PPC_BCC(true_cond ^ COND_CMP_TRUE,
525*4882a593Smuzhiyun addrs[i + 1 + filter[i].jf]);
526*4882a593Smuzhiyun else {
527*4882a593Smuzhiyun PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
528*4882a593Smuzhiyun if (filter[i].jf != 0)
529*4882a593Smuzhiyun PPC_JMP(addrs[i + 1 + filter[i].jf]);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun break;
532*4882a593Smuzhiyun default:
533*4882a593Smuzhiyun /* The filter contains something cruel & unusual.
534*4882a593Smuzhiyun * We don't handle it, but also there shouldn't be
535*4882a593Smuzhiyun * anything missing from our list.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun if (printk_ratelimit())
538*4882a593Smuzhiyun pr_err("BPF filter opcode %04x (@%d) unsupported\n",
539*4882a593Smuzhiyun filter[i].code, i);
540*4882a593Smuzhiyun return -ENOTSUPP;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun /* Set end-of-body-code address for exit. */
545*4882a593Smuzhiyun addrs[i] = ctx->idx * 4;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return 0;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
bpf_jit_compile(struct bpf_prog * fp)550*4882a593Smuzhiyun void bpf_jit_compile(struct bpf_prog *fp)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun unsigned int proglen;
553*4882a593Smuzhiyun unsigned int alloclen;
554*4882a593Smuzhiyun u32 *image = NULL;
555*4882a593Smuzhiyun u32 *code_base;
556*4882a593Smuzhiyun unsigned int *addrs;
557*4882a593Smuzhiyun struct codegen_context cgctx;
558*4882a593Smuzhiyun int pass;
559*4882a593Smuzhiyun int flen = fp->len;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!bpf_jit_enable)
562*4882a593Smuzhiyun return;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
565*4882a593Smuzhiyun if (addrs == NULL)
566*4882a593Smuzhiyun return;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * There are multiple assembly passes as the generated code will change
570*4882a593Smuzhiyun * size as it settles down, figuring out the max branch offsets/exit
571*4882a593Smuzhiyun * paths required.
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * The range of standard conditional branches is +/- 32Kbytes. Since
574*4882a593Smuzhiyun * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
575*4882a593Smuzhiyun * finish with 8 bytes/instruction. Not feasible, so long jumps are
576*4882a593Smuzhiyun * used, distinct from short branches.
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * Current:
579*4882a593Smuzhiyun *
580*4882a593Smuzhiyun * For now, both branch types assemble to 2 words (short branches padded
581*4882a593Smuzhiyun * with a NOP); this is less efficient, but assembly will always complete
582*4882a593Smuzhiyun * after exactly 3 passes:
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * First pass: No code buffer; Program is "faux-generated" -- no code
585*4882a593Smuzhiyun * emitted but maximum size of output determined (and addrs[] filled
586*4882a593Smuzhiyun * in). Also, we note whether we use M[], whether we use skb data, etc.
587*4882a593Smuzhiyun * All generation choices assumed to be 'worst-case', e.g. branches all
588*4882a593Smuzhiyun * far (2 instructions), return path code reduction not available, etc.
589*4882a593Smuzhiyun *
590*4882a593Smuzhiyun * Second pass: Code buffer allocated with size determined previously.
591*4882a593Smuzhiyun * Prologue generated to support features we have seen used. Exit paths
592*4882a593Smuzhiyun * determined and addrs[] is filled in again, as code may be slightly
593*4882a593Smuzhiyun * smaller as a result.
594*4882a593Smuzhiyun *
595*4882a593Smuzhiyun * Third pass: Code generated 'for real', and branch destinations
596*4882a593Smuzhiyun * determined from now-accurate addrs[] map.
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun * Ideal:
599*4882a593Smuzhiyun *
600*4882a593Smuzhiyun * If we optimise this, near branches will be shorter. On the
601*4882a593Smuzhiyun * first assembly pass, we should err on the side of caution and
602*4882a593Smuzhiyun * generate the biggest code. On subsequent passes, branches will be
603*4882a593Smuzhiyun * generated short or long and code size will reduce. With smaller
604*4882a593Smuzhiyun * code, more branches may fall into the short category, and code will
605*4882a593Smuzhiyun * reduce more.
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * Finally, if we see one pass generate code the same size as the
608*4882a593Smuzhiyun * previous pass we have converged and should now generate code for
609*4882a593Smuzhiyun * real. Allocating at the end will also save the memory that would
610*4882a593Smuzhiyun * otherwise be wasted by the (small) current code shrinkage.
611*4882a593Smuzhiyun * Preferably, we should do a small number of passes (e.g. 5) and if we
612*4882a593Smuzhiyun * haven't converged by then, get impatient and force code to generate
613*4882a593Smuzhiyun * as-is, even if the odd branch would be left long. The chances of a
614*4882a593Smuzhiyun * long jump are tiny with all but the most enormous of BPF filter
615*4882a593Smuzhiyun * inputs, so we should usually converge on the third pass.
616*4882a593Smuzhiyun */
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun cgctx.idx = 0;
619*4882a593Smuzhiyun cgctx.seen = 0;
620*4882a593Smuzhiyun cgctx.pc_ret0 = -1;
621*4882a593Smuzhiyun /* Scouting faux-generate pass 0 */
622*4882a593Smuzhiyun if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
623*4882a593Smuzhiyun /* We hit something illegal or unsupported. */
624*4882a593Smuzhiyun goto out;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Pretend to build prologue, given the features we've seen. This will
628*4882a593Smuzhiyun * update ctgtx.idx as it pretends to output instructions, then we can
629*4882a593Smuzhiyun * calculate total size from idx.
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun bpf_jit_build_prologue(fp, 0, &cgctx);
632*4882a593Smuzhiyun bpf_jit_build_epilogue(0, &cgctx);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun proglen = cgctx.idx * 4;
635*4882a593Smuzhiyun alloclen = proglen + FUNCTION_DESCR_SIZE;
636*4882a593Smuzhiyun image = module_alloc(alloclen);
637*4882a593Smuzhiyun if (!image)
638*4882a593Smuzhiyun goto out;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun code_base = image + (FUNCTION_DESCR_SIZE/4);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /* Code generation passes 1-2 */
643*4882a593Smuzhiyun for (pass = 1; pass < 3; pass++) {
644*4882a593Smuzhiyun /* Now build the prologue, body code & epilogue for real. */
645*4882a593Smuzhiyun cgctx.idx = 0;
646*4882a593Smuzhiyun bpf_jit_build_prologue(fp, code_base, &cgctx);
647*4882a593Smuzhiyun bpf_jit_build_body(fp, code_base, &cgctx, addrs);
648*4882a593Smuzhiyun bpf_jit_build_epilogue(code_base, &cgctx);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (bpf_jit_enable > 1)
651*4882a593Smuzhiyun pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
652*4882a593Smuzhiyun proglen - (cgctx.idx * 4), cgctx.seen);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if (bpf_jit_enable > 1)
656*4882a593Smuzhiyun /* Note that we output the base address of the code_base
657*4882a593Smuzhiyun * rather than image, since opcodes are in code_base.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun bpf_jit_dump(flen, proglen, pass, code_base);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun bpf_flush_icache(code_base, code_base + (proglen/4));
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun #ifdef CONFIG_PPC64
664*4882a593Smuzhiyun /* Function descriptor nastiness: Address + TOC */
665*4882a593Smuzhiyun ((u64 *)image)[0] = (u64)code_base;
666*4882a593Smuzhiyun ((u64 *)image)[1] = local_paca->kernel_toc;
667*4882a593Smuzhiyun #endif
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun fp->bpf_func = (void *)image;
670*4882a593Smuzhiyun fp->jited = 1;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun out:
673*4882a593Smuzhiyun kfree(addrs);
674*4882a593Smuzhiyun return;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
bpf_jit_free(struct bpf_prog * fp)677*4882a593Smuzhiyun void bpf_jit_free(struct bpf_prog *fp)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun if (fp->jited)
680*4882a593Smuzhiyun module_memfree(fp->bpf_func);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun bpf_prog_unlock_free(fp);
683*4882a593Smuzhiyun }
684