xref: /OK3568_Linux_fs/kernel/arch/arm/net/bpf_jit_32.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Just-In-Time compiler for eBPF filters on 32bit ARM
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2017 Shubham Bansal <illusionist.neo@gmail.com>
6*4882a593Smuzhiyun  * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bpf.h>
10*4882a593Smuzhiyun #include <linux/bitops.h>
11*4882a593Smuzhiyun #include <linux/compiler.h>
12*4882a593Smuzhiyun #include <linux/errno.h>
13*4882a593Smuzhiyun #include <linux/filter.h>
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/if_vlan.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/cacheflush.h>
20*4882a593Smuzhiyun #include <asm/hwcap.h>
21*4882a593Smuzhiyun #include <asm/opcodes.h>
22*4882a593Smuzhiyun #include <asm/system_info.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "bpf_jit_32.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * eBPF prog stack layout:
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  *                         high
30*4882a593Smuzhiyun  * original ARM_SP =>     +-----+
31*4882a593Smuzhiyun  *                        |     | callee saved registers
32*4882a593Smuzhiyun  *                        +-----+ <= (BPF_FP + SCRATCH_SIZE)
33*4882a593Smuzhiyun  *                        | ... | eBPF JIT scratch space
34*4882a593Smuzhiyun  * eBPF fp register =>    +-----+
35*4882a593Smuzhiyun  *   (BPF_FP)             | ... | eBPF prog stack
36*4882a593Smuzhiyun  *                        +-----+
37*4882a593Smuzhiyun  *                        |RSVD | JIT scratchpad
38*4882a593Smuzhiyun  * current ARM_SP =>      +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
39*4882a593Smuzhiyun  *                        | ... | caller-saved registers
40*4882a593Smuzhiyun  *                        +-----+
41*4882a593Smuzhiyun  *                        | ... | arguments passed on stack
42*4882a593Smuzhiyun  * ARM_SP during call =>  +-----|
43*4882a593Smuzhiyun  *                        |     |
44*4882a593Smuzhiyun  *                        | ... | Function call stack
45*4882a593Smuzhiyun  *                        |     |
46*4882a593Smuzhiyun  *                        +-----+
47*4882a593Smuzhiyun  *                          low
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * The callee saved registers depends on whether frame pointers are enabled.
50*4882a593Smuzhiyun  * With frame pointers (to be compliant with the ABI):
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  *                              high
53*4882a593Smuzhiyun  * original ARM_SP =>     +--------------+ \
54*4882a593Smuzhiyun  *                        |      pc      | |
55*4882a593Smuzhiyun  * current ARM_FP =>      +--------------+ } callee saved registers
56*4882a593Smuzhiyun  *                        |r4-r9,fp,ip,lr| |
57*4882a593Smuzhiyun  *                        +--------------+ /
58*4882a593Smuzhiyun  *                              low
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * Without frame pointers:
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  *                              high
63*4882a593Smuzhiyun  * original ARM_SP =>     +--------------+
64*4882a593Smuzhiyun  *                        |  r4-r9,fp,lr | callee saved registers
65*4882a593Smuzhiyun  * current ARM_FP =>      +--------------+
66*4882a593Smuzhiyun  *                              low
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * When popping registers off the stack at the end of a BPF function, we
69*4882a593Smuzhiyun  * reference them via the current ARM_FP register.
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * Some eBPF operations are implemented via a call to a helper function.
72*4882a593Smuzhiyun  * Such calls are "invisible" in the eBPF code, so it is up to the calling
73*4882a593Smuzhiyun  * program to preserve any caller-saved ARM registers during the call. The
74*4882a593Smuzhiyun  * JIT emits code to push and pop those registers onto the stack, immediately
75*4882a593Smuzhiyun  * above the callee stack frame.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define CALLEE_MASK	(1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
78*4882a593Smuzhiyun 			 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \
79*4882a593Smuzhiyun 			 1 << ARM_FP)
80*4882a593Smuzhiyun #define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
81*4882a593Smuzhiyun #define CALLEE_POP_MASK  (CALLEE_MASK | 1 << ARM_PC)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define CALLER_MASK	(1 << ARM_R0 | 1 << ARM_R1 | 1 << ARM_R2 | 1 << ARM_R3)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun enum {
86*4882a593Smuzhiyun 	/* Stack layout - these are offsets from (top of stack - 4) */
87*4882a593Smuzhiyun 	BPF_R2_HI,
88*4882a593Smuzhiyun 	BPF_R2_LO,
89*4882a593Smuzhiyun 	BPF_R3_HI,
90*4882a593Smuzhiyun 	BPF_R3_LO,
91*4882a593Smuzhiyun 	BPF_R4_HI,
92*4882a593Smuzhiyun 	BPF_R4_LO,
93*4882a593Smuzhiyun 	BPF_R5_HI,
94*4882a593Smuzhiyun 	BPF_R5_LO,
95*4882a593Smuzhiyun 	BPF_R7_HI,
96*4882a593Smuzhiyun 	BPF_R7_LO,
97*4882a593Smuzhiyun 	BPF_R8_HI,
98*4882a593Smuzhiyun 	BPF_R8_LO,
99*4882a593Smuzhiyun 	BPF_R9_HI,
100*4882a593Smuzhiyun 	BPF_R9_LO,
101*4882a593Smuzhiyun 	BPF_FP_HI,
102*4882a593Smuzhiyun 	BPF_FP_LO,
103*4882a593Smuzhiyun 	BPF_TC_HI,
104*4882a593Smuzhiyun 	BPF_TC_LO,
105*4882a593Smuzhiyun 	BPF_AX_HI,
106*4882a593Smuzhiyun 	BPF_AX_LO,
107*4882a593Smuzhiyun 	/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
108*4882a593Smuzhiyun 	 * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
109*4882a593Smuzhiyun 	 * BPF_REG_FP and Tail call counts.
110*4882a593Smuzhiyun 	 */
111*4882a593Smuzhiyun 	BPF_JIT_SCRATCH_REGS,
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * Negative "register" values indicate the register is stored on the stack
116*4882a593Smuzhiyun  * and are the offset from the top of the eBPF JIT scratch space.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun #define STACK_OFFSET(k)	(-4 - (k) * 4)
119*4882a593Smuzhiyun #define SCRATCH_SIZE	(BPF_JIT_SCRATCH_REGS * 4)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #ifdef CONFIG_FRAME_POINTER
122*4882a593Smuzhiyun #define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4)
123*4882a593Smuzhiyun #else
124*4882a593Smuzhiyun #define EBPF_SCRATCH_TO_ARM_FP(x) (x)
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define TMP_REG_1	(MAX_BPF_JIT_REG + 0)	/* TEMP Register 1 */
128*4882a593Smuzhiyun #define TMP_REG_2	(MAX_BPF_JIT_REG + 1)	/* TEMP Register 2 */
129*4882a593Smuzhiyun #define TCALL_CNT	(MAX_BPF_JIT_REG + 2)	/* Tail Call Count */
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #define FLAG_IMM_OVERFLOW	(1 << 0)
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun  * Map eBPF registers to ARM 32bit registers or stack scratch space.
135*4882a593Smuzhiyun  *
136*4882a593Smuzhiyun  * 1. First argument is passed using the arm 32bit registers and rest of the
137*4882a593Smuzhiyun  * arguments are passed on stack scratch space.
138*4882a593Smuzhiyun  * 2. First callee-saved argument is mapped to arm 32 bit registers and rest
139*4882a593Smuzhiyun  * arguments are mapped to scratch space on stack.
140*4882a593Smuzhiyun  * 3. We need two 64 bit temp registers to do complex operations on eBPF
141*4882a593Smuzhiyun  * registers.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * As the eBPF registers are all 64 bit registers and arm has only 32 bit
144*4882a593Smuzhiyun  * registers, we have to map each eBPF registers with two arm 32 bit regs or
145*4882a593Smuzhiyun  * scratch memory space and we have to build eBPF 64 bit register from those.
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  */
148*4882a593Smuzhiyun static const s8 bpf2a32[][2] = {
149*4882a593Smuzhiyun 	/* return value from in-kernel function, and exit value from eBPF */
150*4882a593Smuzhiyun 	[BPF_REG_0] = {ARM_R1, ARM_R0},
151*4882a593Smuzhiyun 	/* arguments from eBPF program to in-kernel function */
152*4882a593Smuzhiyun 	[BPF_REG_1] = {ARM_R3, ARM_R2},
153*4882a593Smuzhiyun 	/* Stored on stack scratch space */
154*4882a593Smuzhiyun 	[BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)},
155*4882a593Smuzhiyun 	[BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)},
156*4882a593Smuzhiyun 	[BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)},
157*4882a593Smuzhiyun 	[BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)},
158*4882a593Smuzhiyun 	/* callee saved registers that in-kernel function will preserve */
159*4882a593Smuzhiyun 	[BPF_REG_6] = {ARM_R5, ARM_R4},
160*4882a593Smuzhiyun 	/* Stored on stack scratch space */
161*4882a593Smuzhiyun 	[BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
162*4882a593Smuzhiyun 	[BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
163*4882a593Smuzhiyun 	[BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
164*4882a593Smuzhiyun 	/* Read only Frame Pointer to access Stack */
165*4882a593Smuzhiyun 	[BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)},
166*4882a593Smuzhiyun 	/* Temporary Register for internal BPF JIT, can be used
167*4882a593Smuzhiyun 	 * for constant blindings and others.
168*4882a593Smuzhiyun 	 */
169*4882a593Smuzhiyun 	[TMP_REG_1] = {ARM_R7, ARM_R6},
170*4882a593Smuzhiyun 	[TMP_REG_2] = {ARM_R9, ARM_R8},
171*4882a593Smuzhiyun 	/* Tail call count. Stored on stack scratch space. */
172*4882a593Smuzhiyun 	[TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)},
173*4882a593Smuzhiyun 	/* temporary register for blinding constants.
174*4882a593Smuzhiyun 	 * Stored on stack scratch space.
175*4882a593Smuzhiyun 	 */
176*4882a593Smuzhiyun 	[BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun #define	dst_lo	dst[1]
180*4882a593Smuzhiyun #define dst_hi	dst[0]
181*4882a593Smuzhiyun #define src_lo	src[1]
182*4882a593Smuzhiyun #define src_hi	src[0]
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * JIT Context:
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * prog			:	bpf_prog
188*4882a593Smuzhiyun  * idx			:	index of current last JITed instruction.
189*4882a593Smuzhiyun  * prologue_bytes	:	bytes used in prologue.
190*4882a593Smuzhiyun  * epilogue_offset	:	offset of epilogue starting.
191*4882a593Smuzhiyun  * offsets		:	array of eBPF instruction offsets in
192*4882a593Smuzhiyun  *				JITed code.
193*4882a593Smuzhiyun  * target		:	final JITed code.
194*4882a593Smuzhiyun  * epilogue_bytes	:	no of bytes used in epilogue.
195*4882a593Smuzhiyun  * imm_count		:	no of immediate counts used for global
196*4882a593Smuzhiyun  *				variables.
197*4882a593Smuzhiyun  * imms			:	array of global variable addresses.
198*4882a593Smuzhiyun  */
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun struct jit_ctx {
201*4882a593Smuzhiyun 	const struct bpf_prog *prog;
202*4882a593Smuzhiyun 	unsigned int idx;
203*4882a593Smuzhiyun 	unsigned int prologue_bytes;
204*4882a593Smuzhiyun 	unsigned int epilogue_offset;
205*4882a593Smuzhiyun 	unsigned int cpu_architecture;
206*4882a593Smuzhiyun 	u32 flags;
207*4882a593Smuzhiyun 	u32 *offsets;
208*4882a593Smuzhiyun 	u32 *target;
209*4882a593Smuzhiyun 	u32 stack_size;
210*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
211*4882a593Smuzhiyun 	u16 epilogue_bytes;
212*4882a593Smuzhiyun 	u16 imm_count;
213*4882a593Smuzhiyun 	u32 *imms;
214*4882a593Smuzhiyun #endif
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun  * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
219*4882a593Smuzhiyun  * (where the assembly routines like __aeabi_uidiv could cause problems).
220*4882a593Smuzhiyun  */
jit_udiv32(u32 dividend,u32 divisor)221*4882a593Smuzhiyun static u32 jit_udiv32(u32 dividend, u32 divisor)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	return dividend / divisor;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
jit_mod32(u32 dividend,u32 divisor)226*4882a593Smuzhiyun static u32 jit_mod32(u32 dividend, u32 divisor)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	return dividend % divisor;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
_emit(int cond,u32 inst,struct jit_ctx * ctx)231*4882a593Smuzhiyun static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	inst |= (cond << 28);
234*4882a593Smuzhiyun 	inst = __opcode_to_mem_arm(inst);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (ctx->target != NULL)
237*4882a593Smuzhiyun 		ctx->target[ctx->idx] = inst;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	ctx->idx++;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun  * Emit an instruction that will be executed unconditionally.
244*4882a593Smuzhiyun  */
emit(u32 inst,struct jit_ctx * ctx)245*4882a593Smuzhiyun static inline void emit(u32 inst, struct jit_ctx *ctx)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	_emit(ARM_COND_AL, inst, ctx);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /*
251*4882a593Smuzhiyun  * This is rather horrid, but necessary to convert an integer constant
252*4882a593Smuzhiyun  * to an immediate operand for the opcodes, and be able to detect at
253*4882a593Smuzhiyun  * build time whether the constant can't be converted (iow, usable in
254*4882a593Smuzhiyun  * BUILD_BUG_ON()).
255*4882a593Smuzhiyun  */
256*4882a593Smuzhiyun #define imm12val(v, s) (rol32(v, (s)) | (s) << 7)
257*4882a593Smuzhiyun #define const_imm8m(x)					\
258*4882a593Smuzhiyun 	({ int r;					\
259*4882a593Smuzhiyun 	   u32 v = (x);					\
260*4882a593Smuzhiyun 	   if (!(v & ~0x000000ff))			\
261*4882a593Smuzhiyun 		r = imm12val(v, 0);			\
262*4882a593Smuzhiyun 	   else if (!(v & ~0xc000003f))			\
263*4882a593Smuzhiyun 		r = imm12val(v, 2);			\
264*4882a593Smuzhiyun 	   else if (!(v & ~0xf000000f))			\
265*4882a593Smuzhiyun 		r = imm12val(v, 4);			\
266*4882a593Smuzhiyun 	   else if (!(v & ~0xfc000003))			\
267*4882a593Smuzhiyun 		r = imm12val(v, 6);			\
268*4882a593Smuzhiyun 	   else if (!(v & ~0xff000000))			\
269*4882a593Smuzhiyun 		r = imm12val(v, 8);			\
270*4882a593Smuzhiyun 	   else if (!(v & ~0x3fc00000))			\
271*4882a593Smuzhiyun 		r = imm12val(v, 10);			\
272*4882a593Smuzhiyun 	   else if (!(v & ~0x0ff00000))			\
273*4882a593Smuzhiyun 		r = imm12val(v, 12);			\
274*4882a593Smuzhiyun 	   else if (!(v & ~0x03fc0000))			\
275*4882a593Smuzhiyun 		r = imm12val(v, 14);			\
276*4882a593Smuzhiyun 	   else if (!(v & ~0x00ff0000))			\
277*4882a593Smuzhiyun 		r = imm12val(v, 16);			\
278*4882a593Smuzhiyun 	   else if (!(v & ~0x003fc000))			\
279*4882a593Smuzhiyun 		r = imm12val(v, 18);			\
280*4882a593Smuzhiyun 	   else if (!(v & ~0x000ff000))			\
281*4882a593Smuzhiyun 		r = imm12val(v, 20);			\
282*4882a593Smuzhiyun 	   else if (!(v & ~0x0003fc00))			\
283*4882a593Smuzhiyun 		r = imm12val(v, 22);			\
284*4882a593Smuzhiyun 	   else if (!(v & ~0x0000ff00))			\
285*4882a593Smuzhiyun 		r = imm12val(v, 24);			\
286*4882a593Smuzhiyun 	   else if (!(v & ~0x00003fc0))			\
287*4882a593Smuzhiyun 		r = imm12val(v, 26);			\
288*4882a593Smuzhiyun 	   else if (!(v & ~0x00000ff0))			\
289*4882a593Smuzhiyun 		r = imm12val(v, 28);			\
290*4882a593Smuzhiyun 	   else if (!(v & ~0x000003fc))			\
291*4882a593Smuzhiyun 		r = imm12val(v, 30);			\
292*4882a593Smuzhiyun 	   else						\
293*4882a593Smuzhiyun 		r = -1;					\
294*4882a593Smuzhiyun 	   r; })
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /*
297*4882a593Smuzhiyun  * Checks if immediate value can be converted to imm12(12 bits) value.
298*4882a593Smuzhiyun  */
imm8m(u32 x)299*4882a593Smuzhiyun static int imm8m(u32 x)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	u32 rot;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	for (rot = 0; rot < 16; rot++)
304*4882a593Smuzhiyun 		if ((x & ~ror32(0xff, 2 * rot)) == 0)
305*4882a593Smuzhiyun 			return rol32(x, 2 * rot) | (rot << 8);
306*4882a593Smuzhiyun 	return -1;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun #define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x))
310*4882a593Smuzhiyun 
arm_bpf_ldst_imm12(u32 op,u8 rt,u8 rn,s16 imm12)311*4882a593Smuzhiyun static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	op |= rt << 12 | rn << 16;
314*4882a593Smuzhiyun 	if (imm12 >= 0)
315*4882a593Smuzhiyun 		op |= ARM_INST_LDST__U;
316*4882a593Smuzhiyun 	else
317*4882a593Smuzhiyun 		imm12 = -imm12;
318*4882a593Smuzhiyun 	return op | (imm12 & ARM_INST_LDST__IMM12);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
arm_bpf_ldst_imm8(u32 op,u8 rt,u8 rn,s16 imm8)321*4882a593Smuzhiyun static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	op |= rt << 12 | rn << 16;
324*4882a593Smuzhiyun 	if (imm8 >= 0)
325*4882a593Smuzhiyun 		op |= ARM_INST_LDST__U;
326*4882a593Smuzhiyun 	else
327*4882a593Smuzhiyun 		imm8 = -imm8;
328*4882a593Smuzhiyun 	return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun #define ARM_LDR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off)
332*4882a593Smuzhiyun #define ARM_LDRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off)
333*4882a593Smuzhiyun #define ARM_LDRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off)
334*4882a593Smuzhiyun #define ARM_LDRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off)
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun #define ARM_STR_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off)
337*4882a593Smuzhiyun #define ARM_STRB_I(rt, rn, off)	arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off)
338*4882a593Smuzhiyun #define ARM_STRD_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off)
339*4882a593Smuzhiyun #define ARM_STRH_I(rt, rn, off)	arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off)
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun  * Initializes the JIT space with undefined instructions.
343*4882a593Smuzhiyun  */
jit_fill_hole(void * area,unsigned int size)344*4882a593Smuzhiyun static void jit_fill_hole(void *area, unsigned int size)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	u32 *ptr;
347*4882a593Smuzhiyun 	/* We are guaranteed to have aligned memory. */
348*4882a593Smuzhiyun 	for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
349*4882a593Smuzhiyun 		*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
353*4882a593Smuzhiyun /* EABI requires the stack to be aligned to 64-bit boundaries */
354*4882a593Smuzhiyun #define STACK_ALIGNMENT	8
355*4882a593Smuzhiyun #else
356*4882a593Smuzhiyun /* Stack must be aligned to 32-bit boundaries */
357*4882a593Smuzhiyun #define STACK_ALIGNMENT	4
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun /* total stack size used in JITed code */
361*4882a593Smuzhiyun #define _STACK_SIZE	(ctx->prog->aux->stack_depth + SCRATCH_SIZE)
362*4882a593Smuzhiyun #define STACK_SIZE	ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
365*4882a593Smuzhiyun 
imm_offset(u32 k,struct jit_ctx * ctx)366*4882a593Smuzhiyun static u16 imm_offset(u32 k, struct jit_ctx *ctx)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	unsigned int i = 0, offset;
369*4882a593Smuzhiyun 	u16 imm;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* on the "fake" run we just count them (duplicates included) */
372*4882a593Smuzhiyun 	if (ctx->target == NULL) {
373*4882a593Smuzhiyun 		ctx->imm_count++;
374*4882a593Smuzhiyun 		return 0;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	while ((i < ctx->imm_count) && ctx->imms[i]) {
378*4882a593Smuzhiyun 		if (ctx->imms[i] == k)
379*4882a593Smuzhiyun 			break;
380*4882a593Smuzhiyun 		i++;
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (ctx->imms[i] == 0)
384*4882a593Smuzhiyun 		ctx->imms[i] = k;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* constants go just after the epilogue */
387*4882a593Smuzhiyun 	offset =  ctx->offsets[ctx->prog->len - 1] * 4;
388*4882a593Smuzhiyun 	offset += ctx->prologue_bytes;
389*4882a593Smuzhiyun 	offset += ctx->epilogue_bytes;
390*4882a593Smuzhiyun 	offset += i * 4;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	ctx->target[offset / 4] = k;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/* PC in ARM mode == address of the instruction + 8 */
395*4882a593Smuzhiyun 	imm = offset - (8 + ctx->idx * 4);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (imm & ~0xfff) {
398*4882a593Smuzhiyun 		/*
399*4882a593Smuzhiyun 		 * literal pool is too far, signal it into flags. we
400*4882a593Smuzhiyun 		 * can only detect it on the second pass unfortunately.
401*4882a593Smuzhiyun 		 */
402*4882a593Smuzhiyun 		ctx->flags |= FLAG_IMM_OVERFLOW;
403*4882a593Smuzhiyun 		return 0;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return imm;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun #endif /* __LINUX_ARM_ARCH__ */
410*4882a593Smuzhiyun 
bpf2a32_offset(int bpf_to,int bpf_from,const struct jit_ctx * ctx)411*4882a593Smuzhiyun static inline int bpf2a32_offset(int bpf_to, int bpf_from,
412*4882a593Smuzhiyun 				 const struct jit_ctx *ctx) {
413*4882a593Smuzhiyun 	int to, from;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (ctx->target == NULL)
416*4882a593Smuzhiyun 		return 0;
417*4882a593Smuzhiyun 	to = ctx->offsets[bpf_to];
418*4882a593Smuzhiyun 	from = ctx->offsets[bpf_from];
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	return to - from - 1;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun  * Move an immediate that's not an imm8m to a core register.
425*4882a593Smuzhiyun  */
emit_mov_i_no8m(const u8 rd,u32 val,struct jit_ctx * ctx)426*4882a593Smuzhiyun static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
429*4882a593Smuzhiyun 	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
430*4882a593Smuzhiyun #else
431*4882a593Smuzhiyun 	emit(ARM_MOVW(rd, val & 0xffff), ctx);
432*4882a593Smuzhiyun 	if (val > 0xffff)
433*4882a593Smuzhiyun 		emit(ARM_MOVT(rd, val >> 16), ctx);
434*4882a593Smuzhiyun #endif
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
emit_mov_i(const u8 rd,u32 val,struct jit_ctx * ctx)437*4882a593Smuzhiyun static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	int imm12 = imm8m(val);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (imm12 >= 0)
442*4882a593Smuzhiyun 		emit(ARM_MOV_I(rd, imm12), ctx);
443*4882a593Smuzhiyun 	else
444*4882a593Smuzhiyun 		emit_mov_i_no8m(rd, val, ctx);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
emit_bx_r(u8 tgt_reg,struct jit_ctx * ctx)447*4882a593Smuzhiyun static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	if (elf_hwcap & HWCAP_THUMB)
450*4882a593Smuzhiyun 		emit(ARM_BX(tgt_reg), ctx);
451*4882a593Smuzhiyun 	else
452*4882a593Smuzhiyun 		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
emit_blx_r(u8 tgt_reg,struct jit_ctx * ctx)455*4882a593Smuzhiyun static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 5
458*4882a593Smuzhiyun 	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
459*4882a593Smuzhiyun 	emit_bx_r(tgt_reg, ctx);
460*4882a593Smuzhiyun #else
461*4882a593Smuzhiyun 	emit(ARM_BLX_R(tgt_reg), ctx);
462*4882a593Smuzhiyun #endif
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
epilogue_offset(const struct jit_ctx * ctx)465*4882a593Smuzhiyun static inline int epilogue_offset(const struct jit_ctx *ctx)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	int to, from;
468*4882a593Smuzhiyun 	/* No need for 1st dummy run */
469*4882a593Smuzhiyun 	if (ctx->target == NULL)
470*4882a593Smuzhiyun 		return 0;
471*4882a593Smuzhiyun 	to = ctx->epilogue_offset;
472*4882a593Smuzhiyun 	from = ctx->idx;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	return to - from - 2;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
emit_udivmod(u8 rd,u8 rm,u8 rn,struct jit_ctx * ctx,u8 op)477*4882a593Smuzhiyun static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	const int exclude_mask = BIT(ARM_R0) | BIT(ARM_R1);
480*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ == 7
483*4882a593Smuzhiyun 	if (elf_hwcap & HWCAP_IDIVA) {
484*4882a593Smuzhiyun 		if (op == BPF_DIV)
485*4882a593Smuzhiyun 			emit(ARM_UDIV(rd, rm, rn), ctx);
486*4882a593Smuzhiyun 		else {
487*4882a593Smuzhiyun 			emit(ARM_UDIV(ARM_IP, rm, rn), ctx);
488*4882a593Smuzhiyun 			emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx);
489*4882a593Smuzhiyun 		}
490*4882a593Smuzhiyun 		return;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun #endif
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/*
495*4882a593Smuzhiyun 	 * For BPF_ALU | BPF_DIV | BPF_K instructions
496*4882a593Smuzhiyun 	 * As ARM_R1 and ARM_R0 contains 1st argument of bpf
497*4882a593Smuzhiyun 	 * function, we need to save it on caller side to save
498*4882a593Smuzhiyun 	 * it from getting destroyed within callee.
499*4882a593Smuzhiyun 	 * After the return from the callee, we restore ARM_R0
500*4882a593Smuzhiyun 	 * ARM_R1.
501*4882a593Smuzhiyun 	 */
502*4882a593Smuzhiyun 	if (rn != ARM_R1) {
503*4882a593Smuzhiyun 		emit(ARM_MOV_R(tmp[0], ARM_R1), ctx);
504*4882a593Smuzhiyun 		emit(ARM_MOV_R(ARM_R1, rn), ctx);
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 	if (rm != ARM_R0) {
507*4882a593Smuzhiyun 		emit(ARM_MOV_R(tmp[1], ARM_R0), ctx);
508*4882a593Smuzhiyun 		emit(ARM_MOV_R(ARM_R0, rm), ctx);
509*4882a593Smuzhiyun 	}
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* Push caller-saved registers on stack */
512*4882a593Smuzhiyun 	emit(ARM_PUSH(CALLER_MASK & ~exclude_mask), ctx);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Call appropriate function */
515*4882a593Smuzhiyun 	emit_mov_i(ARM_IP, op == BPF_DIV ?
516*4882a593Smuzhiyun 		   (u32)jit_udiv32 : (u32)jit_mod32, ctx);
517*4882a593Smuzhiyun 	emit_blx_r(ARM_IP, ctx);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/* Restore caller-saved registers from stack */
520*4882a593Smuzhiyun 	emit(ARM_POP(CALLER_MASK & ~exclude_mask), ctx);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Save return value */
523*4882a593Smuzhiyun 	if (rd != ARM_R0)
524*4882a593Smuzhiyun 		emit(ARM_MOV_R(rd, ARM_R0), ctx);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* Restore ARM_R0 and ARM_R1 */
527*4882a593Smuzhiyun 	if (rn != ARM_R1)
528*4882a593Smuzhiyun 		emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx);
529*4882a593Smuzhiyun 	if (rm != ARM_R0)
530*4882a593Smuzhiyun 		emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun /* Is the translated BPF register on stack? */
is_stacked(s8 reg)534*4882a593Smuzhiyun static bool is_stacked(s8 reg)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	return reg < 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun /* If a BPF register is on the stack (stk is true), load it to the
540*4882a593Smuzhiyun  * supplied temporary register and return the temporary register
541*4882a593Smuzhiyun  * for subsequent operations, otherwise just use the CPU register.
542*4882a593Smuzhiyun  */
arm_bpf_get_reg32(s8 reg,s8 tmp,struct jit_ctx * ctx)543*4882a593Smuzhiyun static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	if (is_stacked(reg)) {
546*4882a593Smuzhiyun 		emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
547*4882a593Smuzhiyun 		reg = tmp;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 	return reg;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
arm_bpf_get_reg64(const s8 * reg,const s8 * tmp,struct jit_ctx * ctx)552*4882a593Smuzhiyun static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp,
553*4882a593Smuzhiyun 				   struct jit_ctx *ctx)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	if (is_stacked(reg[1])) {
556*4882a593Smuzhiyun 		if (__LINUX_ARM_ARCH__ >= 6 ||
557*4882a593Smuzhiyun 		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
558*4882a593Smuzhiyun 			emit(ARM_LDRD_I(tmp[1], ARM_FP,
559*4882a593Smuzhiyun 					EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
560*4882a593Smuzhiyun 		} else {
561*4882a593Smuzhiyun 			emit(ARM_LDR_I(tmp[1], ARM_FP,
562*4882a593Smuzhiyun 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
563*4882a593Smuzhiyun 			emit(ARM_LDR_I(tmp[0], ARM_FP,
564*4882a593Smuzhiyun 				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
565*4882a593Smuzhiyun 		}
566*4882a593Smuzhiyun 		reg = tmp;
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 	return reg;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun /* If a BPF register is on the stack (stk is true), save the register
572*4882a593Smuzhiyun  * back to the stack.  If the source register is not the same, then
573*4882a593Smuzhiyun  * move it into the correct register.
574*4882a593Smuzhiyun  */
arm_bpf_put_reg32(s8 reg,s8 src,struct jit_ctx * ctx)575*4882a593Smuzhiyun static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	if (is_stacked(reg))
578*4882a593Smuzhiyun 		emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx);
579*4882a593Smuzhiyun 	else if (reg != src)
580*4882a593Smuzhiyun 		emit(ARM_MOV_R(reg, src), ctx);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
arm_bpf_put_reg64(const s8 * reg,const s8 * src,struct jit_ctx * ctx)583*4882a593Smuzhiyun static void arm_bpf_put_reg64(const s8 *reg, const s8 *src,
584*4882a593Smuzhiyun 			      struct jit_ctx *ctx)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	if (is_stacked(reg[1])) {
587*4882a593Smuzhiyun 		if (__LINUX_ARM_ARCH__ >= 6 ||
588*4882a593Smuzhiyun 		    ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) {
589*4882a593Smuzhiyun 			emit(ARM_STRD_I(src[1], ARM_FP,
590*4882a593Smuzhiyun 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
591*4882a593Smuzhiyun 		} else {
592*4882a593Smuzhiyun 			emit(ARM_STR_I(src[1], ARM_FP,
593*4882a593Smuzhiyun 				       EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx);
594*4882a593Smuzhiyun 			emit(ARM_STR_I(src[0], ARM_FP,
595*4882a593Smuzhiyun 				       EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx);
596*4882a593Smuzhiyun 		}
597*4882a593Smuzhiyun 	} else {
598*4882a593Smuzhiyun 		if (reg[1] != src[1])
599*4882a593Smuzhiyun 			emit(ARM_MOV_R(reg[1], src[1]), ctx);
600*4882a593Smuzhiyun 		if (reg[0] != src[0])
601*4882a593Smuzhiyun 			emit(ARM_MOV_R(reg[0], src[0]), ctx);
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
emit_a32_mov_i(const s8 dst,const u32 val,struct jit_ctx * ctx)605*4882a593Smuzhiyun static inline void emit_a32_mov_i(const s8 dst, const u32 val,
606*4882a593Smuzhiyun 				  struct jit_ctx *ctx)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (is_stacked(dst)) {
611*4882a593Smuzhiyun 		emit_mov_i(tmp[1], val, ctx);
612*4882a593Smuzhiyun 		arm_bpf_put_reg32(dst, tmp[1], ctx);
613*4882a593Smuzhiyun 	} else {
614*4882a593Smuzhiyun 		emit_mov_i(dst, val, ctx);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
emit_a32_mov_i64(const s8 dst[],u64 val,struct jit_ctx * ctx)618*4882a593Smuzhiyun static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
621*4882a593Smuzhiyun 	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	emit_mov_i(rd[1], (u32)val, ctx);
624*4882a593Smuzhiyun 	emit_mov_i(rd[0], val >> 32, ctx);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun /* Sign extended move */
emit_a32_mov_se_i64(const bool is64,const s8 dst[],const u32 val,struct jit_ctx * ctx)630*4882a593Smuzhiyun static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[],
631*4882a593Smuzhiyun 				       const u32 val, struct jit_ctx *ctx) {
632*4882a593Smuzhiyun 	u64 val64 = val;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (is64 && (val & (1<<31)))
635*4882a593Smuzhiyun 		val64 |= 0xffffffff00000000ULL;
636*4882a593Smuzhiyun 	emit_a32_mov_i64(dst, val64, ctx);
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun 
emit_a32_add_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)639*4882a593Smuzhiyun static inline void emit_a32_add_r(const u8 dst, const u8 src,
640*4882a593Smuzhiyun 			      const bool is64, const bool hi,
641*4882a593Smuzhiyun 			      struct jit_ctx *ctx) {
642*4882a593Smuzhiyun 	/* 64 bit :
643*4882a593Smuzhiyun 	 *	adds dst_lo, dst_lo, src_lo
644*4882a593Smuzhiyun 	 *	adc dst_hi, dst_hi, src_hi
645*4882a593Smuzhiyun 	 * 32 bit :
646*4882a593Smuzhiyun 	 *	add dst_lo, dst_lo, src_lo
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 	if (!hi && is64)
649*4882a593Smuzhiyun 		emit(ARM_ADDS_R(dst, dst, src), ctx);
650*4882a593Smuzhiyun 	else if (hi && is64)
651*4882a593Smuzhiyun 		emit(ARM_ADC_R(dst, dst, src), ctx);
652*4882a593Smuzhiyun 	else
653*4882a593Smuzhiyun 		emit(ARM_ADD_R(dst, dst, src), ctx);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
emit_a32_sub_r(const u8 dst,const u8 src,const bool is64,const bool hi,struct jit_ctx * ctx)656*4882a593Smuzhiyun static inline void emit_a32_sub_r(const u8 dst, const u8 src,
657*4882a593Smuzhiyun 				  const bool is64, const bool hi,
658*4882a593Smuzhiyun 				  struct jit_ctx *ctx) {
659*4882a593Smuzhiyun 	/* 64 bit :
660*4882a593Smuzhiyun 	 *	subs dst_lo, dst_lo, src_lo
661*4882a593Smuzhiyun 	 *	sbc dst_hi, dst_hi, src_hi
662*4882a593Smuzhiyun 	 * 32 bit :
663*4882a593Smuzhiyun 	 *	sub dst_lo, dst_lo, src_lo
664*4882a593Smuzhiyun 	 */
665*4882a593Smuzhiyun 	if (!hi && is64)
666*4882a593Smuzhiyun 		emit(ARM_SUBS_R(dst, dst, src), ctx);
667*4882a593Smuzhiyun 	else if (hi && is64)
668*4882a593Smuzhiyun 		emit(ARM_SBC_R(dst, dst, src), ctx);
669*4882a593Smuzhiyun 	else
670*4882a593Smuzhiyun 		emit(ARM_SUB_R(dst, dst, src), ctx);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
emit_alu_r(const u8 dst,const u8 src,const bool is64,const bool hi,const u8 op,struct jit_ctx * ctx)673*4882a593Smuzhiyun static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64,
674*4882a593Smuzhiyun 			      const bool hi, const u8 op, struct jit_ctx *ctx){
675*4882a593Smuzhiyun 	switch (BPF_OP(op)) {
676*4882a593Smuzhiyun 	/* dst = dst + src */
677*4882a593Smuzhiyun 	case BPF_ADD:
678*4882a593Smuzhiyun 		emit_a32_add_r(dst, src, is64, hi, ctx);
679*4882a593Smuzhiyun 		break;
680*4882a593Smuzhiyun 	/* dst = dst - src */
681*4882a593Smuzhiyun 	case BPF_SUB:
682*4882a593Smuzhiyun 		emit_a32_sub_r(dst, src, is64, hi, ctx);
683*4882a593Smuzhiyun 		break;
684*4882a593Smuzhiyun 	/* dst = dst | src */
685*4882a593Smuzhiyun 	case BPF_OR:
686*4882a593Smuzhiyun 		emit(ARM_ORR_R(dst, dst, src), ctx);
687*4882a593Smuzhiyun 		break;
688*4882a593Smuzhiyun 	/* dst = dst & src */
689*4882a593Smuzhiyun 	case BPF_AND:
690*4882a593Smuzhiyun 		emit(ARM_AND_R(dst, dst, src), ctx);
691*4882a593Smuzhiyun 		break;
692*4882a593Smuzhiyun 	/* dst = dst ^ src */
693*4882a593Smuzhiyun 	case BPF_XOR:
694*4882a593Smuzhiyun 		emit(ARM_EOR_R(dst, dst, src), ctx);
695*4882a593Smuzhiyun 		break;
696*4882a593Smuzhiyun 	/* dst = dst * src */
697*4882a593Smuzhiyun 	case BPF_MUL:
698*4882a593Smuzhiyun 		emit(ARM_MUL(dst, dst, src), ctx);
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 	/* dst = dst << src */
701*4882a593Smuzhiyun 	case BPF_LSH:
702*4882a593Smuzhiyun 		emit(ARM_LSL_R(dst, dst, src), ctx);
703*4882a593Smuzhiyun 		break;
704*4882a593Smuzhiyun 	/* dst = dst >> src */
705*4882a593Smuzhiyun 	case BPF_RSH:
706*4882a593Smuzhiyun 		emit(ARM_LSR_R(dst, dst, src), ctx);
707*4882a593Smuzhiyun 		break;
708*4882a593Smuzhiyun 	/* dst = dst >> src (signed)*/
709*4882a593Smuzhiyun 	case BPF_ARSH:
710*4882a593Smuzhiyun 		emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx);
711*4882a593Smuzhiyun 		break;
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun /* ALU operation (32 bit)
716*4882a593Smuzhiyun  * dst = dst (op) src
717*4882a593Smuzhiyun  */
emit_a32_alu_r(const s8 dst,const s8 src,struct jit_ctx * ctx,const bool is64,const bool hi,const u8 op)718*4882a593Smuzhiyun static inline void emit_a32_alu_r(const s8 dst, const s8 src,
719*4882a593Smuzhiyun 				  struct jit_ctx *ctx, const bool is64,
720*4882a593Smuzhiyun 				  const bool hi, const u8 op) {
721*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
722*4882a593Smuzhiyun 	s8 rn, rd;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	rn = arm_bpf_get_reg32(src, tmp[1], ctx);
725*4882a593Smuzhiyun 	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
726*4882a593Smuzhiyun 	/* ALU operation */
727*4882a593Smuzhiyun 	emit_alu_r(rd, rn, is64, hi, op, ctx);
728*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst, rd, ctx);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun /* ALU operation (64 bit) */
emit_a32_alu_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx,const u8 op)732*4882a593Smuzhiyun static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
733*4882a593Smuzhiyun 				  const s8 src[], struct jit_ctx *ctx,
734*4882a593Smuzhiyun 				  const u8 op) {
735*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
736*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
737*4882a593Smuzhiyun 	const s8 *rd;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
740*4882a593Smuzhiyun 	if (is64) {
741*4882a593Smuzhiyun 		const s8 *rs;
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		rs = arm_bpf_get_reg64(src, tmp2, ctx);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 		/* ALU operation */
746*4882a593Smuzhiyun 		emit_alu_r(rd[1], rs[1], true, false, op, ctx);
747*4882a593Smuzhiyun 		emit_alu_r(rd[0], rs[0], true, true, op, ctx);
748*4882a593Smuzhiyun 	} else {
749*4882a593Smuzhiyun 		s8 rs;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 		rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 		/* ALU operation */
754*4882a593Smuzhiyun 		emit_alu_r(rd[1], rs, true, false, op, ctx);
755*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
756*4882a593Smuzhiyun 			emit_a32_mov_i(rd[0], 0, ctx);
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun /* dst = src (4 bytes)*/
emit_a32_mov_r(const s8 dst,const s8 src,struct jit_ctx * ctx)763*4882a593Smuzhiyun static inline void emit_a32_mov_r(const s8 dst, const s8 src,
764*4882a593Smuzhiyun 				  struct jit_ctx *ctx) {
765*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
766*4882a593Smuzhiyun 	s8 rt;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	rt = arm_bpf_get_reg32(src, tmp[0], ctx);
769*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst, rt, ctx);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun /* dst = src */
emit_a32_mov_r64(const bool is64,const s8 dst[],const s8 src[],struct jit_ctx * ctx)773*4882a593Smuzhiyun static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
774*4882a593Smuzhiyun 				  const s8 src[],
775*4882a593Smuzhiyun 				  struct jit_ctx *ctx) {
776*4882a593Smuzhiyun 	if (!is64) {
777*4882a593Smuzhiyun 		emit_a32_mov_r(dst_lo, src_lo, ctx);
778*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
779*4882a593Smuzhiyun 			/* Zero out high 4 bytes */
780*4882a593Smuzhiyun 			emit_a32_mov_i(dst_hi, 0, ctx);
781*4882a593Smuzhiyun 	} else if (__LINUX_ARM_ARCH__ < 6 &&
782*4882a593Smuzhiyun 		   ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
783*4882a593Smuzhiyun 		/* complete 8 byte move */
784*4882a593Smuzhiyun 		emit_a32_mov_r(dst_lo, src_lo, ctx);
785*4882a593Smuzhiyun 		emit_a32_mov_r(dst_hi, src_hi, ctx);
786*4882a593Smuzhiyun 	} else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
787*4882a593Smuzhiyun 		const u8 *tmp = bpf2a32[TMP_REG_1];
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
790*4882a593Smuzhiyun 		emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
791*4882a593Smuzhiyun 	} else if (is_stacked(src_lo)) {
792*4882a593Smuzhiyun 		emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx);
793*4882a593Smuzhiyun 	} else if (is_stacked(dst_lo)) {
794*4882a593Smuzhiyun 		emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx);
795*4882a593Smuzhiyun 	} else {
796*4882a593Smuzhiyun 		emit(ARM_MOV_R(dst[0], src[0]), ctx);
797*4882a593Smuzhiyun 		emit(ARM_MOV_R(dst[1], src[1]), ctx);
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun /* Shift operations */
emit_a32_alu_i(const s8 dst,const u32 val,struct jit_ctx * ctx,const u8 op)802*4882a593Smuzhiyun static inline void emit_a32_alu_i(const s8 dst, const u32 val,
803*4882a593Smuzhiyun 				struct jit_ctx *ctx, const u8 op) {
804*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
805*4882a593Smuzhiyun 	s8 rd;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	rd = arm_bpf_get_reg32(dst, tmp[0], ctx);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* Do shift operation */
810*4882a593Smuzhiyun 	switch (op) {
811*4882a593Smuzhiyun 	case BPF_LSH:
812*4882a593Smuzhiyun 		emit(ARM_LSL_I(rd, rd, val), ctx);
813*4882a593Smuzhiyun 		break;
814*4882a593Smuzhiyun 	case BPF_RSH:
815*4882a593Smuzhiyun 		emit(ARM_LSR_I(rd, rd, val), ctx);
816*4882a593Smuzhiyun 		break;
817*4882a593Smuzhiyun 	case BPF_ARSH:
818*4882a593Smuzhiyun 		emit(ARM_ASR_I(rd, rd, val), ctx);
819*4882a593Smuzhiyun 		break;
820*4882a593Smuzhiyun 	case BPF_NEG:
821*4882a593Smuzhiyun 		emit(ARM_RSB_I(rd, rd, val), ctx);
822*4882a593Smuzhiyun 		break;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst, rd, ctx);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun /* dst = ~dst (64 bit) */
emit_a32_neg64(const s8 dst[],struct jit_ctx * ctx)829*4882a593Smuzhiyun static inline void emit_a32_neg64(const s8 dst[],
830*4882a593Smuzhiyun 				struct jit_ctx *ctx){
831*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
832*4882a593Smuzhiyun 	const s8 *rd;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* Setup Operand */
835*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	/* Do Negate Operation */
838*4882a593Smuzhiyun 	emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx);
839*4882a593Smuzhiyun 	emit(ARM_RSC_I(rd[0], rd[0], 0), ctx);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun /* dst = dst << src */
emit_a32_lsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)845*4882a593Smuzhiyun static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[],
846*4882a593Smuzhiyun 				    struct jit_ctx *ctx) {
847*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
848*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
849*4882a593Smuzhiyun 	const s8 *rd;
850*4882a593Smuzhiyun 	s8 rt;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* Setup Operands */
853*4882a593Smuzhiyun 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
854*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/* Do LSH operation */
857*4882a593Smuzhiyun 	emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
858*4882a593Smuzhiyun 	emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
859*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx);
860*4882a593Smuzhiyun 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx);
861*4882a593Smuzhiyun 	emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx);
862*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
865*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun /* dst = dst >> src (signed)*/
emit_a32_arsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)869*4882a593Smuzhiyun static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[],
870*4882a593Smuzhiyun 				     struct jit_ctx *ctx) {
871*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
872*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
873*4882a593Smuzhiyun 	const s8 *rd;
874*4882a593Smuzhiyun 	s8 rt;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	/* Setup Operands */
877*4882a593Smuzhiyun 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
878*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/* Do the ARSH operation */
881*4882a593Smuzhiyun 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
882*4882a593Smuzhiyun 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
883*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
884*4882a593Smuzhiyun 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
885*4882a593Smuzhiyun 	_emit(ARM_COND_PL,
886*4882a593Smuzhiyun 	      ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx);
887*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
890*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun /* dst = dst >> src */
emit_a32_rsh_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)894*4882a593Smuzhiyun static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[],
895*4882a593Smuzhiyun 				    struct jit_ctx *ctx) {
896*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
897*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
898*4882a593Smuzhiyun 	const s8 *rd;
899*4882a593Smuzhiyun 	s8 rt;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/* Setup Operands */
902*4882a593Smuzhiyun 	rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
903*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/* Do RSH operation */
906*4882a593Smuzhiyun 	emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
907*4882a593Smuzhiyun 	emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
908*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx);
909*4882a593Smuzhiyun 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx);
910*4882a593Smuzhiyun 	emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx);
911*4882a593Smuzhiyun 	emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_lo, ARM_LR, ctx);
914*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_hi, ARM_IP, ctx);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun /* dst = dst << val */
emit_a32_lsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)918*4882a593Smuzhiyun static inline void emit_a32_lsh_i64(const s8 dst[],
919*4882a593Smuzhiyun 				    const u32 val, struct jit_ctx *ctx){
920*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
921*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
922*4882a593Smuzhiyun 	const s8 *rd;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* Setup operands */
925*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	/* Do LSH operation */
928*4882a593Smuzhiyun 	if (val < 32) {
929*4882a593Smuzhiyun 		emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx);
930*4882a593Smuzhiyun 		emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx);
931*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx);
932*4882a593Smuzhiyun 	} else {
933*4882a593Smuzhiyun 		if (val == 32)
934*4882a593Smuzhiyun 			emit(ARM_MOV_R(rd[0], rd[1]), ctx);
935*4882a593Smuzhiyun 		else
936*4882a593Smuzhiyun 			emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx);
937*4882a593Smuzhiyun 		emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx);
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun /* dst = dst >> val */
emit_a32_rsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)944*4882a593Smuzhiyun static inline void emit_a32_rsh_i64(const s8 dst[],
945*4882a593Smuzhiyun 				    const u32 val, struct jit_ctx *ctx) {
946*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
947*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
948*4882a593Smuzhiyun 	const s8 *rd;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	/* Setup operands */
951*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/* Do LSR operation */
954*4882a593Smuzhiyun 	if (val == 0) {
955*4882a593Smuzhiyun 		/* An immediate value of 0 encodes a shift amount of 32
956*4882a593Smuzhiyun 		 * for LSR. To shift by 0, don't do anything.
957*4882a593Smuzhiyun 		 */
958*4882a593Smuzhiyun 	} else if (val < 32) {
959*4882a593Smuzhiyun 		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
960*4882a593Smuzhiyun 		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
961*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx);
962*4882a593Smuzhiyun 	} else if (val == 32) {
963*4882a593Smuzhiyun 		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
964*4882a593Smuzhiyun 		emit(ARM_MOV_I(rd[0], 0), ctx);
965*4882a593Smuzhiyun 	} else {
966*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx);
967*4882a593Smuzhiyun 		emit(ARM_MOV_I(rd[0], 0), ctx);
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun /* dst = dst >> val (signed) */
emit_a32_arsh_i64(const s8 dst[],const u32 val,struct jit_ctx * ctx)974*4882a593Smuzhiyun static inline void emit_a32_arsh_i64(const s8 dst[],
975*4882a593Smuzhiyun 				     const u32 val, struct jit_ctx *ctx){
976*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
977*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
978*4882a593Smuzhiyun 	const s8 *rd;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/* Setup operands */
981*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* Do ARSH operation */
984*4882a593Smuzhiyun 	if (val == 0) {
985*4882a593Smuzhiyun 		/* An immediate value of 0 encodes a shift amount of 32
986*4882a593Smuzhiyun 		 * for ASR. To shift by 0, don't do anything.
987*4882a593Smuzhiyun 		 */
988*4882a593Smuzhiyun 	} else if (val < 32) {
989*4882a593Smuzhiyun 		emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx);
990*4882a593Smuzhiyun 		emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx);
991*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx);
992*4882a593Smuzhiyun 	} else if (val == 32) {
993*4882a593Smuzhiyun 		emit(ARM_MOV_R(rd[1], rd[0]), ctx);
994*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
995*4882a593Smuzhiyun 	} else {
996*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx);
997*4882a593Smuzhiyun 		emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx);
998*4882a593Smuzhiyun 	}
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
emit_a32_mul_r64(const s8 dst[],const s8 src[],struct jit_ctx * ctx)1003*4882a593Smuzhiyun static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
1004*4882a593Smuzhiyun 				    struct jit_ctx *ctx) {
1005*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
1006*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1007*4882a593Smuzhiyun 	const s8 *rd, *rt;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/* Setup operands for multiplication */
1010*4882a593Smuzhiyun 	rd = arm_bpf_get_reg64(dst, tmp, ctx);
1011*4882a593Smuzhiyun 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	/* Do Multiplication */
1014*4882a593Smuzhiyun 	emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx);
1015*4882a593Smuzhiyun 	emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx);
1016*4882a593Smuzhiyun 	emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx);
1019*4882a593Smuzhiyun 	emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_lo, ARM_IP, ctx);
1022*4882a593Smuzhiyun 	arm_bpf_put_reg32(dst_hi, rd[0], ctx);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
is_ldst_imm(s16 off,const u8 size)1025*4882a593Smuzhiyun static bool is_ldst_imm(s16 off, const u8 size)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun 	s16 off_max = 0;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	switch (size) {
1030*4882a593Smuzhiyun 	case BPF_B:
1031*4882a593Smuzhiyun 	case BPF_W:
1032*4882a593Smuzhiyun 		off_max = 0xfff;
1033*4882a593Smuzhiyun 		break;
1034*4882a593Smuzhiyun 	case BPF_H:
1035*4882a593Smuzhiyun 		off_max = 0xff;
1036*4882a593Smuzhiyun 		break;
1037*4882a593Smuzhiyun 	case BPF_DW:
1038*4882a593Smuzhiyun 		/* Need to make sure off+4 does not overflow. */
1039*4882a593Smuzhiyun 		off_max = 0xfff - 4;
1040*4882a593Smuzhiyun 		break;
1041*4882a593Smuzhiyun 	}
1042*4882a593Smuzhiyun 	return -off_max <= off && off <= off_max;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun /* *(size *)(dst + off) = src */
emit_str_r(const s8 dst,const s8 src[],s16 off,struct jit_ctx * ctx,const u8 sz)1046*4882a593Smuzhiyun static inline void emit_str_r(const s8 dst, const s8 src[],
1047*4882a593Smuzhiyun 			      s16 off, struct jit_ctx *ctx, const u8 sz){
1048*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
1049*4882a593Smuzhiyun 	s8 rd;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	if (!is_ldst_imm(off, sz)) {
1054*4882a593Smuzhiyun 		emit_a32_mov_i(tmp[0], off, ctx);
1055*4882a593Smuzhiyun 		emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
1056*4882a593Smuzhiyun 		rd = tmp[0];
1057*4882a593Smuzhiyun 		off = 0;
1058*4882a593Smuzhiyun 	}
1059*4882a593Smuzhiyun 	switch (sz) {
1060*4882a593Smuzhiyun 	case BPF_B:
1061*4882a593Smuzhiyun 		/* Store a Byte */
1062*4882a593Smuzhiyun 		emit(ARM_STRB_I(src_lo, rd, off), ctx);
1063*4882a593Smuzhiyun 		break;
1064*4882a593Smuzhiyun 	case BPF_H:
1065*4882a593Smuzhiyun 		/* Store a HalfWord */
1066*4882a593Smuzhiyun 		emit(ARM_STRH_I(src_lo, rd, off), ctx);
1067*4882a593Smuzhiyun 		break;
1068*4882a593Smuzhiyun 	case BPF_W:
1069*4882a593Smuzhiyun 		/* Store a Word */
1070*4882a593Smuzhiyun 		emit(ARM_STR_I(src_lo, rd, off), ctx);
1071*4882a593Smuzhiyun 		break;
1072*4882a593Smuzhiyun 	case BPF_DW:
1073*4882a593Smuzhiyun 		/* Store a Double Word */
1074*4882a593Smuzhiyun 		emit(ARM_STR_I(src_lo, rd, off), ctx);
1075*4882a593Smuzhiyun 		emit(ARM_STR_I(src_hi, rd, off + 4), ctx);
1076*4882a593Smuzhiyun 		break;
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun /* dst = *(size*)(src + off) */
emit_ldx_r(const s8 dst[],const s8 src,s16 off,struct jit_ctx * ctx,const u8 sz)1081*4882a593Smuzhiyun static inline void emit_ldx_r(const s8 dst[], const s8 src,
1082*4882a593Smuzhiyun 			      s16 off, struct jit_ctx *ctx, const u8 sz){
1083*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
1084*4882a593Smuzhiyun 	const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
1085*4882a593Smuzhiyun 	s8 rm = src;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	if (!is_ldst_imm(off, sz)) {
1088*4882a593Smuzhiyun 		emit_a32_mov_i(tmp[0], off, ctx);
1089*4882a593Smuzhiyun 		emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
1090*4882a593Smuzhiyun 		rm = tmp[0];
1091*4882a593Smuzhiyun 		off = 0;
1092*4882a593Smuzhiyun 	} else if (rd[1] == rm) {
1093*4882a593Smuzhiyun 		emit(ARM_MOV_R(tmp[0], rm), ctx);
1094*4882a593Smuzhiyun 		rm = tmp[0];
1095*4882a593Smuzhiyun 	}
1096*4882a593Smuzhiyun 	switch (sz) {
1097*4882a593Smuzhiyun 	case BPF_B:
1098*4882a593Smuzhiyun 		/* Load a Byte */
1099*4882a593Smuzhiyun 		emit(ARM_LDRB_I(rd[1], rm, off), ctx);
1100*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1101*4882a593Smuzhiyun 			emit_a32_mov_i(rd[0], 0, ctx);
1102*4882a593Smuzhiyun 		break;
1103*4882a593Smuzhiyun 	case BPF_H:
1104*4882a593Smuzhiyun 		/* Load a HalfWord */
1105*4882a593Smuzhiyun 		emit(ARM_LDRH_I(rd[1], rm, off), ctx);
1106*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1107*4882a593Smuzhiyun 			emit_a32_mov_i(rd[0], 0, ctx);
1108*4882a593Smuzhiyun 		break;
1109*4882a593Smuzhiyun 	case BPF_W:
1110*4882a593Smuzhiyun 		/* Load a Word */
1111*4882a593Smuzhiyun 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1112*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1113*4882a593Smuzhiyun 			emit_a32_mov_i(rd[0], 0, ctx);
1114*4882a593Smuzhiyun 		break;
1115*4882a593Smuzhiyun 	case BPF_DW:
1116*4882a593Smuzhiyun 		/* Load a Double Word */
1117*4882a593Smuzhiyun 		emit(ARM_LDR_I(rd[1], rm, off), ctx);
1118*4882a593Smuzhiyun 		emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
1119*4882a593Smuzhiyun 		break;
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 	arm_bpf_put_reg64(dst, rd, ctx);
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun /* Arithmatic Operation */
emit_ar_r(const u8 rd,const u8 rt,const u8 rm,const u8 rn,struct jit_ctx * ctx,u8 op,bool is_jmp64)1125*4882a593Smuzhiyun static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
1126*4882a593Smuzhiyun 			     const u8 rn, struct jit_ctx *ctx, u8 op,
1127*4882a593Smuzhiyun 			     bool is_jmp64) {
1128*4882a593Smuzhiyun 	switch (op) {
1129*4882a593Smuzhiyun 	case BPF_JSET:
1130*4882a593Smuzhiyun 		if (is_jmp64) {
1131*4882a593Smuzhiyun 			emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
1132*4882a593Smuzhiyun 			emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
1133*4882a593Smuzhiyun 			emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
1134*4882a593Smuzhiyun 		} else {
1135*4882a593Smuzhiyun 			emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx);
1136*4882a593Smuzhiyun 		}
1137*4882a593Smuzhiyun 		break;
1138*4882a593Smuzhiyun 	case BPF_JEQ:
1139*4882a593Smuzhiyun 	case BPF_JNE:
1140*4882a593Smuzhiyun 	case BPF_JGT:
1141*4882a593Smuzhiyun 	case BPF_JGE:
1142*4882a593Smuzhiyun 	case BPF_JLE:
1143*4882a593Smuzhiyun 	case BPF_JLT:
1144*4882a593Smuzhiyun 		if (is_jmp64) {
1145*4882a593Smuzhiyun 			emit(ARM_CMP_R(rd, rm), ctx);
1146*4882a593Smuzhiyun 			/* Only compare low halve if high halve are equal. */
1147*4882a593Smuzhiyun 			_emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
1148*4882a593Smuzhiyun 		} else {
1149*4882a593Smuzhiyun 			emit(ARM_CMP_R(rt, rn), ctx);
1150*4882a593Smuzhiyun 		}
1151*4882a593Smuzhiyun 		break;
1152*4882a593Smuzhiyun 	case BPF_JSLE:
1153*4882a593Smuzhiyun 	case BPF_JSGT:
1154*4882a593Smuzhiyun 		emit(ARM_CMP_R(rn, rt), ctx);
1155*4882a593Smuzhiyun 		if (is_jmp64)
1156*4882a593Smuzhiyun 			emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
1157*4882a593Smuzhiyun 		break;
1158*4882a593Smuzhiyun 	case BPF_JSLT:
1159*4882a593Smuzhiyun 	case BPF_JSGE:
1160*4882a593Smuzhiyun 		emit(ARM_CMP_R(rt, rn), ctx);
1161*4882a593Smuzhiyun 		if (is_jmp64)
1162*4882a593Smuzhiyun 			emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
1163*4882a593Smuzhiyun 		break;
1164*4882a593Smuzhiyun 	}
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun static int out_offset = -1; /* initialized on the first pass of build_body() */
emit_bpf_tail_call(struct jit_ctx * ctx)1168*4882a593Smuzhiyun static int emit_bpf_tail_call(struct jit_ctx *ctx)
1169*4882a593Smuzhiyun {
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	/* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
1172*4882a593Smuzhiyun 	const s8 *r2 = bpf2a32[BPF_REG_2];
1173*4882a593Smuzhiyun 	const s8 *r3 = bpf2a32[BPF_REG_3];
1174*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
1175*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1176*4882a593Smuzhiyun 	const s8 *tcc = bpf2a32[TCALL_CNT];
1177*4882a593Smuzhiyun 	const s8 *tc;
1178*4882a593Smuzhiyun 	const int idx0 = ctx->idx;
1179*4882a593Smuzhiyun #define cur_offset (ctx->idx - idx0)
1180*4882a593Smuzhiyun #define jmp_offset (out_offset - (cur_offset) - 2)
1181*4882a593Smuzhiyun 	u32 lo, hi;
1182*4882a593Smuzhiyun 	s8 r_array, r_index;
1183*4882a593Smuzhiyun 	int off;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	/* if (index >= array->map.max_entries)
1186*4882a593Smuzhiyun 	 *	goto out;
1187*4882a593Smuzhiyun 	 */
1188*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) >
1189*4882a593Smuzhiyun 		     ARM_INST_LDST__IMM12);
1190*4882a593Smuzhiyun 	off = offsetof(struct bpf_array, map.max_entries);
1191*4882a593Smuzhiyun 	r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx);
1192*4882a593Smuzhiyun 	/* index is 32-bit for arrays */
1193*4882a593Smuzhiyun 	r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx);
1194*4882a593Smuzhiyun 	/* array->map.max_entries */
1195*4882a593Smuzhiyun 	emit(ARM_LDR_I(tmp[1], r_array, off), ctx);
1196*4882a593Smuzhiyun 	/* index >= array->map.max_entries */
1197*4882a593Smuzhiyun 	emit(ARM_CMP_R(r_index, tmp[1]), ctx);
1198*4882a593Smuzhiyun 	_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	/* tmp2[0] = array, tmp2[1] = index */
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1203*4882a593Smuzhiyun 	 *	goto out;
1204*4882a593Smuzhiyun 	 * tail_call_cnt++;
1205*4882a593Smuzhiyun 	 */
1206*4882a593Smuzhiyun 	lo = (u32)MAX_TAIL_CALL_CNT;
1207*4882a593Smuzhiyun 	hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1208*4882a593Smuzhiyun 	tc = arm_bpf_get_reg64(tcc, tmp, ctx);
1209*4882a593Smuzhiyun 	emit(ARM_CMP_I(tc[0], hi), ctx);
1210*4882a593Smuzhiyun 	_emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx);
1211*4882a593Smuzhiyun 	_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1212*4882a593Smuzhiyun 	emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx);
1213*4882a593Smuzhiyun 	emit(ARM_ADC_I(tc[0], tc[0], 0), ctx);
1214*4882a593Smuzhiyun 	arm_bpf_put_reg64(tcc, tmp, ctx);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	/* prog = array->ptrs[index]
1217*4882a593Smuzhiyun 	 * if (prog == NULL)
1218*4882a593Smuzhiyun 	 *	goto out;
1219*4882a593Smuzhiyun 	 */
1220*4882a593Smuzhiyun 	BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0);
1221*4882a593Smuzhiyun 	off = imm8m(offsetof(struct bpf_array, ptrs));
1222*4882a593Smuzhiyun 	emit(ARM_ADD_I(tmp[1], r_array, off), ctx);
1223*4882a593Smuzhiyun 	emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx);
1224*4882a593Smuzhiyun 	emit(ARM_CMP_I(tmp[1], 0), ctx);
1225*4882a593Smuzhiyun 	_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	/* goto *(prog->bpf_func + prologue_size); */
1228*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) >
1229*4882a593Smuzhiyun 		     ARM_INST_LDST__IMM12);
1230*4882a593Smuzhiyun 	off = offsetof(struct bpf_prog, bpf_func);
1231*4882a593Smuzhiyun 	emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx);
1232*4882a593Smuzhiyun 	emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
1233*4882a593Smuzhiyun 	emit_bx_r(tmp[1], ctx);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	/* out: */
1236*4882a593Smuzhiyun 	if (out_offset == -1)
1237*4882a593Smuzhiyun 		out_offset = cur_offset;
1238*4882a593Smuzhiyun 	if (cur_offset != out_offset) {
1239*4882a593Smuzhiyun 		pr_err_once("tail_call out_offset = %d, expected %d!\n",
1240*4882a593Smuzhiyun 			    cur_offset, out_offset);
1241*4882a593Smuzhiyun 		return -1;
1242*4882a593Smuzhiyun 	}
1243*4882a593Smuzhiyun 	return 0;
1244*4882a593Smuzhiyun #undef cur_offset
1245*4882a593Smuzhiyun #undef jmp_offset
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun /* 0xabcd => 0xcdab */
emit_rev16(const u8 rd,const u8 rn,struct jit_ctx * ctx)1249*4882a593Smuzhiyun static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
1252*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1255*4882a593Smuzhiyun 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx);
1256*4882a593Smuzhiyun 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1257*4882a593Smuzhiyun 	emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx);
1258*4882a593Smuzhiyun #else /* ARMv6+ */
1259*4882a593Smuzhiyun 	emit(ARM_REV16(rd, rn), ctx);
1260*4882a593Smuzhiyun #endif
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun /* 0xabcdefgh => 0xghefcdab */
emit_rev32(const u8 rd,const u8 rn,struct jit_ctx * ctx)1264*4882a593Smuzhiyun static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
1267*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx);
1270*4882a593Smuzhiyun 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx);
1271*4882a593Smuzhiyun 	emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx);
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx);
1274*4882a593Smuzhiyun 	emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx);
1275*4882a593Smuzhiyun 	emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx);
1276*4882a593Smuzhiyun 	emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx);
1277*4882a593Smuzhiyun 	emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx);
1278*4882a593Smuzhiyun 	emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx);
1279*4882a593Smuzhiyun 	emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx);
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun #else /* ARMv6+ */
1282*4882a593Smuzhiyun 	emit(ARM_REV(rd, rn), ctx);
1283*4882a593Smuzhiyun #endif
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun // push the scratch stack register on top of the stack
emit_push_r64(const s8 src[],struct jit_ctx * ctx)1287*4882a593Smuzhiyun static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1290*4882a593Smuzhiyun 	const s8 *rt;
1291*4882a593Smuzhiyun 	u16 reg_set = 0;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	rt = arm_bpf_get_reg64(src, tmp2, ctx);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	reg_set = (1 << rt[1]) | (1 << rt[0]);
1296*4882a593Smuzhiyun 	emit(ARM_PUSH(reg_set), ctx);
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun 
build_prologue(struct jit_ctx * ctx)1299*4882a593Smuzhiyun static void build_prologue(struct jit_ctx *ctx)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	const s8 arm_r0 = bpf2a32[BPF_REG_0][1];
1302*4882a593Smuzhiyun 	const s8 *bpf_r1 = bpf2a32[BPF_REG_1];
1303*4882a593Smuzhiyun 	const s8 *bpf_fp = bpf2a32[BPF_REG_FP];
1304*4882a593Smuzhiyun 	const s8 *tcc = bpf2a32[TCALL_CNT];
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	/* Save callee saved registers. */
1307*4882a593Smuzhiyun #ifdef CONFIG_FRAME_POINTER
1308*4882a593Smuzhiyun 	u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
1309*4882a593Smuzhiyun 	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
1310*4882a593Smuzhiyun 	emit(ARM_PUSH(reg_set), ctx);
1311*4882a593Smuzhiyun 	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
1312*4882a593Smuzhiyun #else
1313*4882a593Smuzhiyun 	emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
1314*4882a593Smuzhiyun 	emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
1315*4882a593Smuzhiyun #endif
1316*4882a593Smuzhiyun 	/* mov r3, #0 */
1317*4882a593Smuzhiyun 	/* sub r2, sp, #SCRATCH_SIZE */
1318*4882a593Smuzhiyun 	emit(ARM_MOV_I(bpf_r1[0], 0), ctx);
1319*4882a593Smuzhiyun 	emit(ARM_SUB_I(bpf_r1[1], ARM_SP, SCRATCH_SIZE), ctx);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	ctx->stack_size = imm8m(STACK_SIZE);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	/* Set up function call stack */
1324*4882a593Smuzhiyun 	emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	/* Set up BPF prog stack base register */
1327*4882a593Smuzhiyun 	emit_a32_mov_r64(true, bpf_fp, bpf_r1, ctx);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	/* Initialize Tail Count */
1330*4882a593Smuzhiyun 	emit(ARM_MOV_I(bpf_r1[1], 0), ctx);
1331*4882a593Smuzhiyun 	emit_a32_mov_r64(true, tcc, bpf_r1, ctx);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	/* Move BPF_CTX to BPF_R1 */
1334*4882a593Smuzhiyun 	emit(ARM_MOV_R(bpf_r1[1], arm_r0), ctx);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	/* end of prologue */
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun /* restore callee saved registers. */
build_epilogue(struct jit_ctx * ctx)1340*4882a593Smuzhiyun static void build_epilogue(struct jit_ctx *ctx)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun #ifdef CONFIG_FRAME_POINTER
1343*4882a593Smuzhiyun 	/* When using frame pointers, some additional registers need to
1344*4882a593Smuzhiyun 	 * be loaded. */
1345*4882a593Smuzhiyun 	u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
1346*4882a593Smuzhiyun 	emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
1347*4882a593Smuzhiyun 	emit(ARM_LDM(ARM_SP, reg_set), ctx);
1348*4882a593Smuzhiyun #else
1349*4882a593Smuzhiyun 	/* Restore callee saved registers. */
1350*4882a593Smuzhiyun 	emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
1351*4882a593Smuzhiyun 	emit(ARM_POP(CALLEE_POP_MASK), ctx);
1352*4882a593Smuzhiyun #endif
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun /*
1356*4882a593Smuzhiyun  * Convert an eBPF instruction to native instruction, i.e
1357*4882a593Smuzhiyun  * JITs an eBPF instruction.
1358*4882a593Smuzhiyun  * Returns :
1359*4882a593Smuzhiyun  *	0  - Successfully JITed an 8-byte eBPF instruction
1360*4882a593Smuzhiyun  *	>0 - Successfully JITed a 16-byte eBPF instruction
1361*4882a593Smuzhiyun  *	<0 - Failed to JIT.
1362*4882a593Smuzhiyun  */
build_insn(const struct bpf_insn * insn,struct jit_ctx * ctx)1363*4882a593Smuzhiyun static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun 	const u8 code = insn->code;
1366*4882a593Smuzhiyun 	const s8 *dst = bpf2a32[insn->dst_reg];
1367*4882a593Smuzhiyun 	const s8 *src = bpf2a32[insn->src_reg];
1368*4882a593Smuzhiyun 	const s8 *tmp = bpf2a32[TMP_REG_1];
1369*4882a593Smuzhiyun 	const s8 *tmp2 = bpf2a32[TMP_REG_2];
1370*4882a593Smuzhiyun 	const s16 off = insn->off;
1371*4882a593Smuzhiyun 	const s32 imm = insn->imm;
1372*4882a593Smuzhiyun 	const int i = insn - ctx->prog->insnsi;
1373*4882a593Smuzhiyun 	const bool is64 = BPF_CLASS(code) == BPF_ALU64;
1374*4882a593Smuzhiyun 	const s8 *rd, *rs;
1375*4882a593Smuzhiyun 	s8 rd_lo, rt, rm, rn;
1376*4882a593Smuzhiyun 	s32 jmp_offset;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun #define check_imm(bits, imm) do {				\
1379*4882a593Smuzhiyun 	if ((imm) >= (1 << ((bits) - 1)) ||			\
1380*4882a593Smuzhiyun 	    (imm) < -(1 << ((bits) - 1))) {			\
1381*4882a593Smuzhiyun 		pr_info("[%2d] imm=%d(0x%x) out of range\n",	\
1382*4882a593Smuzhiyun 			i, imm, imm);				\
1383*4882a593Smuzhiyun 		return -EINVAL;					\
1384*4882a593Smuzhiyun 	}							\
1385*4882a593Smuzhiyun } while (0)
1386*4882a593Smuzhiyun #define check_imm24(imm) check_imm(24, imm)
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	switch (code) {
1389*4882a593Smuzhiyun 	/* ALU operations */
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	/* dst = src */
1392*4882a593Smuzhiyun 	case BPF_ALU | BPF_MOV | BPF_K:
1393*4882a593Smuzhiyun 	case BPF_ALU | BPF_MOV | BPF_X:
1394*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MOV | BPF_K:
1395*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MOV | BPF_X:
1396*4882a593Smuzhiyun 		switch (BPF_SRC(code)) {
1397*4882a593Smuzhiyun 		case BPF_X:
1398*4882a593Smuzhiyun 			if (imm == 1) {
1399*4882a593Smuzhiyun 				/* Special mov32 for zext */
1400*4882a593Smuzhiyun 				emit_a32_mov_i(dst_hi, 0, ctx);
1401*4882a593Smuzhiyun 				break;
1402*4882a593Smuzhiyun 			}
1403*4882a593Smuzhiyun 			emit_a32_mov_r64(is64, dst, src, ctx);
1404*4882a593Smuzhiyun 			break;
1405*4882a593Smuzhiyun 		case BPF_K:
1406*4882a593Smuzhiyun 			/* Sign-extend immediate value to destination reg */
1407*4882a593Smuzhiyun 			emit_a32_mov_se_i64(is64, dst, imm, ctx);
1408*4882a593Smuzhiyun 			break;
1409*4882a593Smuzhiyun 		}
1410*4882a593Smuzhiyun 		break;
1411*4882a593Smuzhiyun 	/* dst = dst + src/imm */
1412*4882a593Smuzhiyun 	/* dst = dst - src/imm */
1413*4882a593Smuzhiyun 	/* dst = dst | src/imm */
1414*4882a593Smuzhiyun 	/* dst = dst & src/imm */
1415*4882a593Smuzhiyun 	/* dst = dst ^ src/imm */
1416*4882a593Smuzhiyun 	/* dst = dst * src/imm */
1417*4882a593Smuzhiyun 	/* dst = dst << src */
1418*4882a593Smuzhiyun 	/* dst = dst >> src */
1419*4882a593Smuzhiyun 	case BPF_ALU | BPF_ADD | BPF_K:
1420*4882a593Smuzhiyun 	case BPF_ALU | BPF_ADD | BPF_X:
1421*4882a593Smuzhiyun 	case BPF_ALU | BPF_SUB | BPF_K:
1422*4882a593Smuzhiyun 	case BPF_ALU | BPF_SUB | BPF_X:
1423*4882a593Smuzhiyun 	case BPF_ALU | BPF_OR | BPF_K:
1424*4882a593Smuzhiyun 	case BPF_ALU | BPF_OR | BPF_X:
1425*4882a593Smuzhiyun 	case BPF_ALU | BPF_AND | BPF_K:
1426*4882a593Smuzhiyun 	case BPF_ALU | BPF_AND | BPF_X:
1427*4882a593Smuzhiyun 	case BPF_ALU | BPF_XOR | BPF_K:
1428*4882a593Smuzhiyun 	case BPF_ALU | BPF_XOR | BPF_X:
1429*4882a593Smuzhiyun 	case BPF_ALU | BPF_MUL | BPF_K:
1430*4882a593Smuzhiyun 	case BPF_ALU | BPF_MUL | BPF_X:
1431*4882a593Smuzhiyun 	case BPF_ALU | BPF_LSH | BPF_X:
1432*4882a593Smuzhiyun 	case BPF_ALU | BPF_RSH | BPF_X:
1433*4882a593Smuzhiyun 	case BPF_ALU | BPF_ARSH | BPF_X:
1434*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_ADD | BPF_K:
1435*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_ADD | BPF_X:
1436*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_SUB | BPF_K:
1437*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_SUB | BPF_X:
1438*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_OR | BPF_K:
1439*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_OR | BPF_X:
1440*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_AND | BPF_K:
1441*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_AND | BPF_X:
1442*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_XOR | BPF_K:
1443*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_XOR | BPF_X:
1444*4882a593Smuzhiyun 		switch (BPF_SRC(code)) {
1445*4882a593Smuzhiyun 		case BPF_X:
1446*4882a593Smuzhiyun 			emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code));
1447*4882a593Smuzhiyun 			break;
1448*4882a593Smuzhiyun 		case BPF_K:
1449*4882a593Smuzhiyun 			/* Move immediate value to the temporary register
1450*4882a593Smuzhiyun 			 * and then do the ALU operation on the temporary
1451*4882a593Smuzhiyun 			 * register as this will sign-extend the immediate
1452*4882a593Smuzhiyun 			 * value into temporary reg and then it would be
1453*4882a593Smuzhiyun 			 * safe to do the operation on it.
1454*4882a593Smuzhiyun 			 */
1455*4882a593Smuzhiyun 			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1456*4882a593Smuzhiyun 			emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code));
1457*4882a593Smuzhiyun 			break;
1458*4882a593Smuzhiyun 		}
1459*4882a593Smuzhiyun 		break;
1460*4882a593Smuzhiyun 	/* dst = dst / src(imm) */
1461*4882a593Smuzhiyun 	/* dst = dst % src(imm) */
1462*4882a593Smuzhiyun 	case BPF_ALU | BPF_DIV | BPF_K:
1463*4882a593Smuzhiyun 	case BPF_ALU | BPF_DIV | BPF_X:
1464*4882a593Smuzhiyun 	case BPF_ALU | BPF_MOD | BPF_K:
1465*4882a593Smuzhiyun 	case BPF_ALU | BPF_MOD | BPF_X:
1466*4882a593Smuzhiyun 		rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx);
1467*4882a593Smuzhiyun 		switch (BPF_SRC(code)) {
1468*4882a593Smuzhiyun 		case BPF_X:
1469*4882a593Smuzhiyun 			rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx);
1470*4882a593Smuzhiyun 			break;
1471*4882a593Smuzhiyun 		case BPF_K:
1472*4882a593Smuzhiyun 			rt = tmp2[0];
1473*4882a593Smuzhiyun 			emit_a32_mov_i(rt, imm, ctx);
1474*4882a593Smuzhiyun 			break;
1475*4882a593Smuzhiyun 		default:
1476*4882a593Smuzhiyun 			rt = src_lo;
1477*4882a593Smuzhiyun 			break;
1478*4882a593Smuzhiyun 		}
1479*4882a593Smuzhiyun 		emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code));
1480*4882a593Smuzhiyun 		arm_bpf_put_reg32(dst_lo, rd_lo, ctx);
1481*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1482*4882a593Smuzhiyun 			emit_a32_mov_i(dst_hi, 0, ctx);
1483*4882a593Smuzhiyun 		break;
1484*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_DIV | BPF_K:
1485*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_DIV | BPF_X:
1486*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MOD | BPF_K:
1487*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MOD | BPF_X:
1488*4882a593Smuzhiyun 		goto notyet;
1489*4882a593Smuzhiyun 	/* dst = dst << imm */
1490*4882a593Smuzhiyun 	/* dst = dst >> imm */
1491*4882a593Smuzhiyun 	/* dst = dst >> imm (signed) */
1492*4882a593Smuzhiyun 	case BPF_ALU | BPF_LSH | BPF_K:
1493*4882a593Smuzhiyun 	case BPF_ALU | BPF_RSH | BPF_K:
1494*4882a593Smuzhiyun 	case BPF_ALU | BPF_ARSH | BPF_K:
1495*4882a593Smuzhiyun 		if (unlikely(imm > 31))
1496*4882a593Smuzhiyun 			return -EINVAL;
1497*4882a593Smuzhiyun 		if (imm)
1498*4882a593Smuzhiyun 			emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code));
1499*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1500*4882a593Smuzhiyun 			emit_a32_mov_i(dst_hi, 0, ctx);
1501*4882a593Smuzhiyun 		break;
1502*4882a593Smuzhiyun 	/* dst = dst << imm */
1503*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_LSH | BPF_K:
1504*4882a593Smuzhiyun 		if (unlikely(imm > 63))
1505*4882a593Smuzhiyun 			return -EINVAL;
1506*4882a593Smuzhiyun 		emit_a32_lsh_i64(dst, imm, ctx);
1507*4882a593Smuzhiyun 		break;
1508*4882a593Smuzhiyun 	/* dst = dst >> imm */
1509*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_RSH | BPF_K:
1510*4882a593Smuzhiyun 		if (unlikely(imm > 63))
1511*4882a593Smuzhiyun 			return -EINVAL;
1512*4882a593Smuzhiyun 		emit_a32_rsh_i64(dst, imm, ctx);
1513*4882a593Smuzhiyun 		break;
1514*4882a593Smuzhiyun 	/* dst = dst << src */
1515*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_LSH | BPF_X:
1516*4882a593Smuzhiyun 		emit_a32_lsh_r64(dst, src, ctx);
1517*4882a593Smuzhiyun 		break;
1518*4882a593Smuzhiyun 	/* dst = dst >> src */
1519*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_RSH | BPF_X:
1520*4882a593Smuzhiyun 		emit_a32_rsh_r64(dst, src, ctx);
1521*4882a593Smuzhiyun 		break;
1522*4882a593Smuzhiyun 	/* dst = dst >> src (signed) */
1523*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_ARSH | BPF_X:
1524*4882a593Smuzhiyun 		emit_a32_arsh_r64(dst, src, ctx);
1525*4882a593Smuzhiyun 		break;
1526*4882a593Smuzhiyun 	/* dst = dst >> imm (signed) */
1527*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_ARSH | BPF_K:
1528*4882a593Smuzhiyun 		if (unlikely(imm > 63))
1529*4882a593Smuzhiyun 			return -EINVAL;
1530*4882a593Smuzhiyun 		emit_a32_arsh_i64(dst, imm, ctx);
1531*4882a593Smuzhiyun 		break;
1532*4882a593Smuzhiyun 	/* dst = ~dst */
1533*4882a593Smuzhiyun 	case BPF_ALU | BPF_NEG:
1534*4882a593Smuzhiyun 		emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code));
1535*4882a593Smuzhiyun 		if (!ctx->prog->aux->verifier_zext)
1536*4882a593Smuzhiyun 			emit_a32_mov_i(dst_hi, 0, ctx);
1537*4882a593Smuzhiyun 		break;
1538*4882a593Smuzhiyun 	/* dst = ~dst (64 bit) */
1539*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_NEG:
1540*4882a593Smuzhiyun 		emit_a32_neg64(dst, ctx);
1541*4882a593Smuzhiyun 		break;
1542*4882a593Smuzhiyun 	/* dst = dst * src/imm */
1543*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MUL | BPF_X:
1544*4882a593Smuzhiyun 	case BPF_ALU64 | BPF_MUL | BPF_K:
1545*4882a593Smuzhiyun 		switch (BPF_SRC(code)) {
1546*4882a593Smuzhiyun 		case BPF_X:
1547*4882a593Smuzhiyun 			emit_a32_mul_r64(dst, src, ctx);
1548*4882a593Smuzhiyun 			break;
1549*4882a593Smuzhiyun 		case BPF_K:
1550*4882a593Smuzhiyun 			/* Move immediate value to the temporary register
1551*4882a593Smuzhiyun 			 * and then do the multiplication on it as this
1552*4882a593Smuzhiyun 			 * will sign-extend the immediate value into temp
1553*4882a593Smuzhiyun 			 * reg then it would be safe to do the operation
1554*4882a593Smuzhiyun 			 * on it.
1555*4882a593Smuzhiyun 			 */
1556*4882a593Smuzhiyun 			emit_a32_mov_se_i64(is64, tmp2, imm, ctx);
1557*4882a593Smuzhiyun 			emit_a32_mul_r64(dst, tmp2, ctx);
1558*4882a593Smuzhiyun 			break;
1559*4882a593Smuzhiyun 		}
1560*4882a593Smuzhiyun 		break;
1561*4882a593Smuzhiyun 	/* dst = htole(dst) */
1562*4882a593Smuzhiyun 	/* dst = htobe(dst) */
1563*4882a593Smuzhiyun 	case BPF_ALU | BPF_END | BPF_FROM_LE:
1564*4882a593Smuzhiyun 	case BPF_ALU | BPF_END | BPF_FROM_BE:
1565*4882a593Smuzhiyun 		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1566*4882a593Smuzhiyun 		if (BPF_SRC(code) == BPF_FROM_LE)
1567*4882a593Smuzhiyun 			goto emit_bswap_uxt;
1568*4882a593Smuzhiyun 		switch (imm) {
1569*4882a593Smuzhiyun 		case 16:
1570*4882a593Smuzhiyun 			emit_rev16(rd[1], rd[1], ctx);
1571*4882a593Smuzhiyun 			goto emit_bswap_uxt;
1572*4882a593Smuzhiyun 		case 32:
1573*4882a593Smuzhiyun 			emit_rev32(rd[1], rd[1], ctx);
1574*4882a593Smuzhiyun 			goto emit_bswap_uxt;
1575*4882a593Smuzhiyun 		case 64:
1576*4882a593Smuzhiyun 			emit_rev32(ARM_LR, rd[1], ctx);
1577*4882a593Smuzhiyun 			emit_rev32(rd[1], rd[0], ctx);
1578*4882a593Smuzhiyun 			emit(ARM_MOV_R(rd[0], ARM_LR), ctx);
1579*4882a593Smuzhiyun 			break;
1580*4882a593Smuzhiyun 		}
1581*4882a593Smuzhiyun 		goto exit;
1582*4882a593Smuzhiyun emit_bswap_uxt:
1583*4882a593Smuzhiyun 		switch (imm) {
1584*4882a593Smuzhiyun 		case 16:
1585*4882a593Smuzhiyun 			/* zero-extend 16 bits into 64 bits */
1586*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
1587*4882a593Smuzhiyun 			emit_a32_mov_i(tmp2[1], 0xffff, ctx);
1588*4882a593Smuzhiyun 			emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx);
1589*4882a593Smuzhiyun #else /* ARMv6+ */
1590*4882a593Smuzhiyun 			emit(ARM_UXTH(rd[1], rd[1]), ctx);
1591*4882a593Smuzhiyun #endif
1592*4882a593Smuzhiyun 			if (!ctx->prog->aux->verifier_zext)
1593*4882a593Smuzhiyun 				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1594*4882a593Smuzhiyun 			break;
1595*4882a593Smuzhiyun 		case 32:
1596*4882a593Smuzhiyun 			/* zero-extend 32 bits into 64 bits */
1597*4882a593Smuzhiyun 			if (!ctx->prog->aux->verifier_zext)
1598*4882a593Smuzhiyun 				emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx);
1599*4882a593Smuzhiyun 			break;
1600*4882a593Smuzhiyun 		case 64:
1601*4882a593Smuzhiyun 			/* nop */
1602*4882a593Smuzhiyun 			break;
1603*4882a593Smuzhiyun 		}
1604*4882a593Smuzhiyun exit:
1605*4882a593Smuzhiyun 		arm_bpf_put_reg64(dst, rd, ctx);
1606*4882a593Smuzhiyun 		break;
1607*4882a593Smuzhiyun 	/* dst = imm64 */
1608*4882a593Smuzhiyun 	case BPF_LD | BPF_IMM | BPF_DW:
1609*4882a593Smuzhiyun 	{
1610*4882a593Smuzhiyun 		u64 val = (u32)imm | (u64)insn[1].imm << 32;
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 		emit_a32_mov_i64(dst, val, ctx);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 		return 1;
1615*4882a593Smuzhiyun 	}
1616*4882a593Smuzhiyun 	/* LDX: dst = *(size *)(src + off) */
1617*4882a593Smuzhiyun 	case BPF_LDX | BPF_MEM | BPF_W:
1618*4882a593Smuzhiyun 	case BPF_LDX | BPF_MEM | BPF_H:
1619*4882a593Smuzhiyun 	case BPF_LDX | BPF_MEM | BPF_B:
1620*4882a593Smuzhiyun 	case BPF_LDX | BPF_MEM | BPF_DW:
1621*4882a593Smuzhiyun 		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1622*4882a593Smuzhiyun 		emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code));
1623*4882a593Smuzhiyun 		break;
1624*4882a593Smuzhiyun 	/* speculation barrier */
1625*4882a593Smuzhiyun 	case BPF_ST | BPF_NOSPEC:
1626*4882a593Smuzhiyun 		break;
1627*4882a593Smuzhiyun 	/* ST: *(size *)(dst + off) = imm */
1628*4882a593Smuzhiyun 	case BPF_ST | BPF_MEM | BPF_W:
1629*4882a593Smuzhiyun 	case BPF_ST | BPF_MEM | BPF_H:
1630*4882a593Smuzhiyun 	case BPF_ST | BPF_MEM | BPF_B:
1631*4882a593Smuzhiyun 	case BPF_ST | BPF_MEM | BPF_DW:
1632*4882a593Smuzhiyun 		switch (BPF_SIZE(code)) {
1633*4882a593Smuzhiyun 		case BPF_DW:
1634*4882a593Smuzhiyun 			/* Sign-extend immediate value into temp reg */
1635*4882a593Smuzhiyun 			emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1636*4882a593Smuzhiyun 			break;
1637*4882a593Smuzhiyun 		case BPF_W:
1638*4882a593Smuzhiyun 		case BPF_H:
1639*4882a593Smuzhiyun 		case BPF_B:
1640*4882a593Smuzhiyun 			emit_a32_mov_i(tmp2[1], imm, ctx);
1641*4882a593Smuzhiyun 			break;
1642*4882a593Smuzhiyun 		}
1643*4882a593Smuzhiyun 		emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code));
1644*4882a593Smuzhiyun 		break;
1645*4882a593Smuzhiyun 	/* STX XADD: lock *(u32 *)(dst + off) += src */
1646*4882a593Smuzhiyun 	case BPF_STX | BPF_XADD | BPF_W:
1647*4882a593Smuzhiyun 	/* STX XADD: lock *(u64 *)(dst + off) += src */
1648*4882a593Smuzhiyun 	case BPF_STX | BPF_XADD | BPF_DW:
1649*4882a593Smuzhiyun 		goto notyet;
1650*4882a593Smuzhiyun 	/* STX: *(size *)(dst + off) = src */
1651*4882a593Smuzhiyun 	case BPF_STX | BPF_MEM | BPF_W:
1652*4882a593Smuzhiyun 	case BPF_STX | BPF_MEM | BPF_H:
1653*4882a593Smuzhiyun 	case BPF_STX | BPF_MEM | BPF_B:
1654*4882a593Smuzhiyun 	case BPF_STX | BPF_MEM | BPF_DW:
1655*4882a593Smuzhiyun 		rs = arm_bpf_get_reg64(src, tmp2, ctx);
1656*4882a593Smuzhiyun 		emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code));
1657*4882a593Smuzhiyun 		break;
1658*4882a593Smuzhiyun 	/* PC += off if dst == src */
1659*4882a593Smuzhiyun 	/* PC += off if dst > src */
1660*4882a593Smuzhiyun 	/* PC += off if dst >= src */
1661*4882a593Smuzhiyun 	/* PC += off if dst < src */
1662*4882a593Smuzhiyun 	/* PC += off if dst <= src */
1663*4882a593Smuzhiyun 	/* PC += off if dst != src */
1664*4882a593Smuzhiyun 	/* PC += off if dst > src (signed) */
1665*4882a593Smuzhiyun 	/* PC += off if dst >= src (signed) */
1666*4882a593Smuzhiyun 	/* PC += off if dst < src (signed) */
1667*4882a593Smuzhiyun 	/* PC += off if dst <= src (signed) */
1668*4882a593Smuzhiyun 	/* PC += off if dst & src */
1669*4882a593Smuzhiyun 	case BPF_JMP | BPF_JEQ | BPF_X:
1670*4882a593Smuzhiyun 	case BPF_JMP | BPF_JGT | BPF_X:
1671*4882a593Smuzhiyun 	case BPF_JMP | BPF_JGE | BPF_X:
1672*4882a593Smuzhiyun 	case BPF_JMP | BPF_JNE | BPF_X:
1673*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSGT | BPF_X:
1674*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSGE | BPF_X:
1675*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSET | BPF_X:
1676*4882a593Smuzhiyun 	case BPF_JMP | BPF_JLE | BPF_X:
1677*4882a593Smuzhiyun 	case BPF_JMP | BPF_JLT | BPF_X:
1678*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSLT | BPF_X:
1679*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSLE | BPF_X:
1680*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JEQ | BPF_X:
1681*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JGT | BPF_X:
1682*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JGE | BPF_X:
1683*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JNE | BPF_X:
1684*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSGT | BPF_X:
1685*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSGE | BPF_X:
1686*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSET | BPF_X:
1687*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JLE | BPF_X:
1688*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JLT | BPF_X:
1689*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSLT | BPF_X:
1690*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSLE | BPF_X:
1691*4882a593Smuzhiyun 		/* Setup source registers */
1692*4882a593Smuzhiyun 		rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
1693*4882a593Smuzhiyun 		rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
1694*4882a593Smuzhiyun 		goto go_jmp;
1695*4882a593Smuzhiyun 	/* PC += off if dst == imm */
1696*4882a593Smuzhiyun 	/* PC += off if dst > imm */
1697*4882a593Smuzhiyun 	/* PC += off if dst >= imm */
1698*4882a593Smuzhiyun 	/* PC += off if dst < imm */
1699*4882a593Smuzhiyun 	/* PC += off if dst <= imm */
1700*4882a593Smuzhiyun 	/* PC += off if dst != imm */
1701*4882a593Smuzhiyun 	/* PC += off if dst > imm (signed) */
1702*4882a593Smuzhiyun 	/* PC += off if dst >= imm (signed) */
1703*4882a593Smuzhiyun 	/* PC += off if dst < imm (signed) */
1704*4882a593Smuzhiyun 	/* PC += off if dst <= imm (signed) */
1705*4882a593Smuzhiyun 	/* PC += off if dst & imm */
1706*4882a593Smuzhiyun 	case BPF_JMP | BPF_JEQ | BPF_K:
1707*4882a593Smuzhiyun 	case BPF_JMP | BPF_JGT | BPF_K:
1708*4882a593Smuzhiyun 	case BPF_JMP | BPF_JGE | BPF_K:
1709*4882a593Smuzhiyun 	case BPF_JMP | BPF_JNE | BPF_K:
1710*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSGT | BPF_K:
1711*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSGE | BPF_K:
1712*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSET | BPF_K:
1713*4882a593Smuzhiyun 	case BPF_JMP | BPF_JLT | BPF_K:
1714*4882a593Smuzhiyun 	case BPF_JMP | BPF_JLE | BPF_K:
1715*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSLT | BPF_K:
1716*4882a593Smuzhiyun 	case BPF_JMP | BPF_JSLE | BPF_K:
1717*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JEQ | BPF_K:
1718*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JGT | BPF_K:
1719*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JGE | BPF_K:
1720*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JNE | BPF_K:
1721*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSGT | BPF_K:
1722*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSGE | BPF_K:
1723*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSET | BPF_K:
1724*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JLT | BPF_K:
1725*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JLE | BPF_K:
1726*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSLT | BPF_K:
1727*4882a593Smuzhiyun 	case BPF_JMP32 | BPF_JSLE | BPF_K:
1728*4882a593Smuzhiyun 		if (off == 0)
1729*4882a593Smuzhiyun 			break;
1730*4882a593Smuzhiyun 		rm = tmp2[0];
1731*4882a593Smuzhiyun 		rn = tmp2[1];
1732*4882a593Smuzhiyun 		/* Sign-extend immediate value */
1733*4882a593Smuzhiyun 		emit_a32_mov_se_i64(true, tmp2, imm, ctx);
1734*4882a593Smuzhiyun go_jmp:
1735*4882a593Smuzhiyun 		/* Setup destination register */
1736*4882a593Smuzhiyun 		rd = arm_bpf_get_reg64(dst, tmp, ctx);
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 		/* Check for the condition */
1739*4882a593Smuzhiyun 		emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code),
1740*4882a593Smuzhiyun 			  BPF_CLASS(code) == BPF_JMP);
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 		/* Setup JUMP instruction */
1743*4882a593Smuzhiyun 		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1744*4882a593Smuzhiyun 		switch (BPF_OP(code)) {
1745*4882a593Smuzhiyun 		case BPF_JNE:
1746*4882a593Smuzhiyun 		case BPF_JSET:
1747*4882a593Smuzhiyun 			_emit(ARM_COND_NE, ARM_B(jmp_offset), ctx);
1748*4882a593Smuzhiyun 			break;
1749*4882a593Smuzhiyun 		case BPF_JEQ:
1750*4882a593Smuzhiyun 			_emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
1751*4882a593Smuzhiyun 			break;
1752*4882a593Smuzhiyun 		case BPF_JGT:
1753*4882a593Smuzhiyun 			_emit(ARM_COND_HI, ARM_B(jmp_offset), ctx);
1754*4882a593Smuzhiyun 			break;
1755*4882a593Smuzhiyun 		case BPF_JGE:
1756*4882a593Smuzhiyun 			_emit(ARM_COND_CS, ARM_B(jmp_offset), ctx);
1757*4882a593Smuzhiyun 			break;
1758*4882a593Smuzhiyun 		case BPF_JSGT:
1759*4882a593Smuzhiyun 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1760*4882a593Smuzhiyun 			break;
1761*4882a593Smuzhiyun 		case BPF_JSGE:
1762*4882a593Smuzhiyun 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1763*4882a593Smuzhiyun 			break;
1764*4882a593Smuzhiyun 		case BPF_JLE:
1765*4882a593Smuzhiyun 			_emit(ARM_COND_LS, ARM_B(jmp_offset), ctx);
1766*4882a593Smuzhiyun 			break;
1767*4882a593Smuzhiyun 		case BPF_JLT:
1768*4882a593Smuzhiyun 			_emit(ARM_COND_CC, ARM_B(jmp_offset), ctx);
1769*4882a593Smuzhiyun 			break;
1770*4882a593Smuzhiyun 		case BPF_JSLT:
1771*4882a593Smuzhiyun 			_emit(ARM_COND_LT, ARM_B(jmp_offset), ctx);
1772*4882a593Smuzhiyun 			break;
1773*4882a593Smuzhiyun 		case BPF_JSLE:
1774*4882a593Smuzhiyun 			_emit(ARM_COND_GE, ARM_B(jmp_offset), ctx);
1775*4882a593Smuzhiyun 			break;
1776*4882a593Smuzhiyun 		}
1777*4882a593Smuzhiyun 		break;
1778*4882a593Smuzhiyun 	/* JMP OFF */
1779*4882a593Smuzhiyun 	case BPF_JMP | BPF_JA:
1780*4882a593Smuzhiyun 	{
1781*4882a593Smuzhiyun 		if (off == 0)
1782*4882a593Smuzhiyun 			break;
1783*4882a593Smuzhiyun 		jmp_offset = bpf2a32_offset(i+off, i, ctx);
1784*4882a593Smuzhiyun 		check_imm24(jmp_offset);
1785*4882a593Smuzhiyun 		emit(ARM_B(jmp_offset), ctx);
1786*4882a593Smuzhiyun 		break;
1787*4882a593Smuzhiyun 	}
1788*4882a593Smuzhiyun 	/* tail call */
1789*4882a593Smuzhiyun 	case BPF_JMP | BPF_TAIL_CALL:
1790*4882a593Smuzhiyun 		if (emit_bpf_tail_call(ctx))
1791*4882a593Smuzhiyun 			return -EFAULT;
1792*4882a593Smuzhiyun 		break;
1793*4882a593Smuzhiyun 	/* function call */
1794*4882a593Smuzhiyun 	case BPF_JMP | BPF_CALL:
1795*4882a593Smuzhiyun 	{
1796*4882a593Smuzhiyun 		const s8 *r0 = bpf2a32[BPF_REG_0];
1797*4882a593Smuzhiyun 		const s8 *r1 = bpf2a32[BPF_REG_1];
1798*4882a593Smuzhiyun 		const s8 *r2 = bpf2a32[BPF_REG_2];
1799*4882a593Smuzhiyun 		const s8 *r3 = bpf2a32[BPF_REG_3];
1800*4882a593Smuzhiyun 		const s8 *r4 = bpf2a32[BPF_REG_4];
1801*4882a593Smuzhiyun 		const s8 *r5 = bpf2a32[BPF_REG_5];
1802*4882a593Smuzhiyun 		const u32 func = (u32)__bpf_call_base + (u32)imm;
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 		emit_a32_mov_r64(true, r0, r1, ctx);
1805*4882a593Smuzhiyun 		emit_a32_mov_r64(true, r1, r2, ctx);
1806*4882a593Smuzhiyun 		emit_push_r64(r5, ctx);
1807*4882a593Smuzhiyun 		emit_push_r64(r4, ctx);
1808*4882a593Smuzhiyun 		emit_push_r64(r3, ctx);
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 		emit_a32_mov_i(tmp[1], func, ctx);
1811*4882a593Smuzhiyun 		emit_blx_r(tmp[1], ctx);
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean
1814*4882a593Smuzhiyun 		break;
1815*4882a593Smuzhiyun 	}
1816*4882a593Smuzhiyun 	/* function return */
1817*4882a593Smuzhiyun 	case BPF_JMP | BPF_EXIT:
1818*4882a593Smuzhiyun 		/* Optimization: when last instruction is EXIT
1819*4882a593Smuzhiyun 		 * simply fallthrough to epilogue.
1820*4882a593Smuzhiyun 		 */
1821*4882a593Smuzhiyun 		if (i == ctx->prog->len - 1)
1822*4882a593Smuzhiyun 			break;
1823*4882a593Smuzhiyun 		jmp_offset = epilogue_offset(ctx);
1824*4882a593Smuzhiyun 		check_imm24(jmp_offset);
1825*4882a593Smuzhiyun 		emit(ARM_B(jmp_offset), ctx);
1826*4882a593Smuzhiyun 		break;
1827*4882a593Smuzhiyun notyet:
1828*4882a593Smuzhiyun 		pr_info_once("*** NOT YET: opcode %02x ***\n", code);
1829*4882a593Smuzhiyun 		return -EFAULT;
1830*4882a593Smuzhiyun 	default:
1831*4882a593Smuzhiyun 		pr_err_once("unknown opcode %02x\n", code);
1832*4882a593Smuzhiyun 		return -EINVAL;
1833*4882a593Smuzhiyun 	}
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	if (ctx->flags & FLAG_IMM_OVERFLOW)
1836*4882a593Smuzhiyun 		/*
1837*4882a593Smuzhiyun 		 * this instruction generated an overflow when
1838*4882a593Smuzhiyun 		 * trying to access the literal pool, so
1839*4882a593Smuzhiyun 		 * delegate this filter to the kernel interpreter.
1840*4882a593Smuzhiyun 		 */
1841*4882a593Smuzhiyun 		return -1;
1842*4882a593Smuzhiyun 	return 0;
1843*4882a593Smuzhiyun }
1844*4882a593Smuzhiyun 
build_body(struct jit_ctx * ctx)1845*4882a593Smuzhiyun static int build_body(struct jit_ctx *ctx)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun 	const struct bpf_prog *prog = ctx->prog;
1848*4882a593Smuzhiyun 	unsigned int i;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	for (i = 0; i < prog->len; i++) {
1851*4882a593Smuzhiyun 		const struct bpf_insn *insn = &(prog->insnsi[i]);
1852*4882a593Smuzhiyun 		int ret;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 		ret = build_insn(insn, ctx);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 		/* It's used with loading the 64 bit immediate value. */
1857*4882a593Smuzhiyun 		if (ret > 0) {
1858*4882a593Smuzhiyun 			i++;
1859*4882a593Smuzhiyun 			if (ctx->target == NULL)
1860*4882a593Smuzhiyun 				ctx->offsets[i] = ctx->idx;
1861*4882a593Smuzhiyun 			continue;
1862*4882a593Smuzhiyun 		}
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 		if (ctx->target == NULL)
1865*4882a593Smuzhiyun 			ctx->offsets[i] = ctx->idx;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 		/* If unsuccesfull, return with error code */
1868*4882a593Smuzhiyun 		if (ret)
1869*4882a593Smuzhiyun 			return ret;
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 	return 0;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun 
validate_code(struct jit_ctx * ctx)1874*4882a593Smuzhiyun static int validate_code(struct jit_ctx *ctx)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun 	int i;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	for (i = 0; i < ctx->idx; i++) {
1879*4882a593Smuzhiyun 		if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF))
1880*4882a593Smuzhiyun 			return -1;
1881*4882a593Smuzhiyun 	}
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	return 0;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun 
bpf_jit_compile(struct bpf_prog * prog)1886*4882a593Smuzhiyun void bpf_jit_compile(struct bpf_prog *prog)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun 	/* Nothing to do here. We support Internal BPF. */
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun 
bpf_jit_needs_zext(void)1891*4882a593Smuzhiyun bool bpf_jit_needs_zext(void)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	return true;
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun 
bpf_int_jit_compile(struct bpf_prog * prog)1896*4882a593Smuzhiyun struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct bpf_prog *tmp, *orig_prog = prog;
1899*4882a593Smuzhiyun 	struct bpf_binary_header *header;
1900*4882a593Smuzhiyun 	bool tmp_blinded = false;
1901*4882a593Smuzhiyun 	struct jit_ctx ctx;
1902*4882a593Smuzhiyun 	unsigned int tmp_idx;
1903*4882a593Smuzhiyun 	unsigned int image_size;
1904*4882a593Smuzhiyun 	u8 *image_ptr;
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun 	/* If BPF JIT was not enabled then we must fall back to
1907*4882a593Smuzhiyun 	 * the interpreter.
1908*4882a593Smuzhiyun 	 */
1909*4882a593Smuzhiyun 	if (!prog->jit_requested)
1910*4882a593Smuzhiyun 		return orig_prog;
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	/* If constant blinding was enabled and we failed during blinding
1913*4882a593Smuzhiyun 	 * then we must fall back to the interpreter. Otherwise, we save
1914*4882a593Smuzhiyun 	 * the new JITed code.
1915*4882a593Smuzhiyun 	 */
1916*4882a593Smuzhiyun 	tmp = bpf_jit_blind_constants(prog);
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	if (IS_ERR(tmp))
1919*4882a593Smuzhiyun 		return orig_prog;
1920*4882a593Smuzhiyun 	if (tmp != prog) {
1921*4882a593Smuzhiyun 		tmp_blinded = true;
1922*4882a593Smuzhiyun 		prog = tmp;
1923*4882a593Smuzhiyun 	}
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	memset(&ctx, 0, sizeof(ctx));
1926*4882a593Smuzhiyun 	ctx.prog = prog;
1927*4882a593Smuzhiyun 	ctx.cpu_architecture = cpu_architecture();
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	/* Not able to allocate memory for offsets[] , then
1930*4882a593Smuzhiyun 	 * we must fall back to the interpreter
1931*4882a593Smuzhiyun 	 */
1932*4882a593Smuzhiyun 	ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
1933*4882a593Smuzhiyun 	if (ctx.offsets == NULL) {
1934*4882a593Smuzhiyun 		prog = orig_prog;
1935*4882a593Smuzhiyun 		goto out;
1936*4882a593Smuzhiyun 	}
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	/* 1) fake pass to find in the length of the JITed code,
1939*4882a593Smuzhiyun 	 * to compute ctx->offsets and other context variables
1940*4882a593Smuzhiyun 	 * needed to compute final JITed code.
1941*4882a593Smuzhiyun 	 * Also, calculate random starting pointer/start of JITed code
1942*4882a593Smuzhiyun 	 * which is prefixed by random number of fault instructions.
1943*4882a593Smuzhiyun 	 *
1944*4882a593Smuzhiyun 	 * If the first pass fails then there is no chance of it
1945*4882a593Smuzhiyun 	 * being successful in the second pass, so just fall back
1946*4882a593Smuzhiyun 	 * to the interpreter.
1947*4882a593Smuzhiyun 	 */
1948*4882a593Smuzhiyun 	if (build_body(&ctx)) {
1949*4882a593Smuzhiyun 		prog = orig_prog;
1950*4882a593Smuzhiyun 		goto out_off;
1951*4882a593Smuzhiyun 	}
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	tmp_idx = ctx.idx;
1954*4882a593Smuzhiyun 	build_prologue(&ctx);
1955*4882a593Smuzhiyun 	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	ctx.epilogue_offset = ctx.idx;
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
1960*4882a593Smuzhiyun 	tmp_idx = ctx.idx;
1961*4882a593Smuzhiyun 	build_epilogue(&ctx);
1962*4882a593Smuzhiyun 	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	ctx.idx += ctx.imm_count;
1965*4882a593Smuzhiyun 	if (ctx.imm_count) {
1966*4882a593Smuzhiyun 		ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL);
1967*4882a593Smuzhiyun 		if (ctx.imms == NULL) {
1968*4882a593Smuzhiyun 			prog = orig_prog;
1969*4882a593Smuzhiyun 			goto out_off;
1970*4882a593Smuzhiyun 		}
1971*4882a593Smuzhiyun 	}
1972*4882a593Smuzhiyun #else
1973*4882a593Smuzhiyun 	/* there's nothing about the epilogue on ARMv7 */
1974*4882a593Smuzhiyun 	build_epilogue(&ctx);
1975*4882a593Smuzhiyun #endif
1976*4882a593Smuzhiyun 	/* Now we can get the actual image size of the JITed arm code.
1977*4882a593Smuzhiyun 	 * Currently, we are not considering the THUMB-2 instructions
1978*4882a593Smuzhiyun 	 * for jit, although it can decrease the size of the image.
1979*4882a593Smuzhiyun 	 *
1980*4882a593Smuzhiyun 	 * As each arm instruction is of length 32bit, we are translating
1981*4882a593Smuzhiyun 	 * number of JITed intructions into the size required to store these
1982*4882a593Smuzhiyun 	 * JITed code.
1983*4882a593Smuzhiyun 	 */
1984*4882a593Smuzhiyun 	image_size = sizeof(u32) * ctx.idx;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	/* Now we know the size of the structure to make */
1987*4882a593Smuzhiyun 	header = bpf_jit_binary_alloc(image_size, &image_ptr,
1988*4882a593Smuzhiyun 				      sizeof(u32), jit_fill_hole);
1989*4882a593Smuzhiyun 	/* Not able to allocate memory for the structure then
1990*4882a593Smuzhiyun 	 * we must fall back to the interpretation
1991*4882a593Smuzhiyun 	 */
1992*4882a593Smuzhiyun 	if (header == NULL) {
1993*4882a593Smuzhiyun 		prog = orig_prog;
1994*4882a593Smuzhiyun 		goto out_imms;
1995*4882a593Smuzhiyun 	}
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 	/* 2.) Actual pass to generate final JIT code */
1998*4882a593Smuzhiyun 	ctx.target = (u32 *) image_ptr;
1999*4882a593Smuzhiyun 	ctx.idx = 0;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	build_prologue(&ctx);
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun 	/* If building the body of the JITed code fails somehow,
2004*4882a593Smuzhiyun 	 * we fall back to the interpretation.
2005*4882a593Smuzhiyun 	 */
2006*4882a593Smuzhiyun 	if (build_body(&ctx) < 0) {
2007*4882a593Smuzhiyun 		image_ptr = NULL;
2008*4882a593Smuzhiyun 		bpf_jit_binary_free(header);
2009*4882a593Smuzhiyun 		prog = orig_prog;
2010*4882a593Smuzhiyun 		goto out_imms;
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 	build_epilogue(&ctx);
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	/* 3.) Extra pass to validate JITed Code */
2015*4882a593Smuzhiyun 	if (validate_code(&ctx)) {
2016*4882a593Smuzhiyun 		image_ptr = NULL;
2017*4882a593Smuzhiyun 		bpf_jit_binary_free(header);
2018*4882a593Smuzhiyun 		prog = orig_prog;
2019*4882a593Smuzhiyun 		goto out_imms;
2020*4882a593Smuzhiyun 	}
2021*4882a593Smuzhiyun 	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	if (bpf_jit_enable > 1)
2024*4882a593Smuzhiyun 		/* there are 2 passes here */
2025*4882a593Smuzhiyun 		bpf_jit_dump(prog->len, image_size, 2, ctx.target);
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	bpf_jit_binary_lock_ro(header);
2028*4882a593Smuzhiyun 	prog->bpf_func = (void *)ctx.target;
2029*4882a593Smuzhiyun 	prog->jited = 1;
2030*4882a593Smuzhiyun 	prog->jited_len = image_size;
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun out_imms:
2033*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 7
2034*4882a593Smuzhiyun 	if (ctx.imm_count)
2035*4882a593Smuzhiyun 		kfree(ctx.imms);
2036*4882a593Smuzhiyun #endif
2037*4882a593Smuzhiyun out_off:
2038*4882a593Smuzhiyun 	kfree(ctx.offsets);
2039*4882a593Smuzhiyun out:
2040*4882a593Smuzhiyun 	if (tmp_blinded)
2041*4882a593Smuzhiyun 		bpf_jit_prog_release_other(prog, prog == orig_prog ?
2042*4882a593Smuzhiyun 					   tmp : orig_prog);
2043*4882a593Smuzhiyun 	return prog;
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun 
2046