1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2*4882a593Smuzhiyun /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/bpf.h>
5*4882a593Smuzhiyun #include <linux/bpf_verifier.h>
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/netdevice.h>
8*4882a593Smuzhiyun #include <linux/pkt_cls.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "../nfp_app.h"
11*4882a593Smuzhiyun #include "../nfp_main.h"
12*4882a593Smuzhiyun #include "../nfp_net.h"
13*4882a593Smuzhiyun #include "fw.h"
14*4882a593Smuzhiyun #include "main.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define pr_vlog(env, fmt, ...) \
17*4882a593Smuzhiyun bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,unsigned int insn_idx)20*4882a593Smuzhiyun nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
21*4882a593Smuzhiyun unsigned int insn_idx)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun unsigned int forward, backward, i;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun backward = meta->n - insn_idx;
26*4882a593Smuzhiyun forward = insn_idx - meta->n;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun if (min(forward, backward) > nfp_prog->n_insns - insn_idx - 1) {
29*4882a593Smuzhiyun backward = nfp_prog->n_insns - insn_idx - 1;
30*4882a593Smuzhiyun meta = nfp_prog_last_meta(nfp_prog);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun if (min(forward, backward) > insn_idx && backward > insn_idx) {
33*4882a593Smuzhiyun forward = insn_idx;
34*4882a593Smuzhiyun meta = nfp_prog_first_meta(nfp_prog);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (forward < backward)
38*4882a593Smuzhiyun for (i = 0; i < forward; i++)
39*4882a593Smuzhiyun meta = nfp_meta_next(meta);
40*4882a593Smuzhiyun else
41*4882a593Smuzhiyun for (i = 0; i < backward; i++)
42*4882a593Smuzhiyun meta = nfp_meta_prev(meta);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun return meta;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun static void
nfp_record_adjust_head(struct nfp_app_bpf * bpf,struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,const struct bpf_reg_state * reg2)48*4882a593Smuzhiyun nfp_record_adjust_head(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
49*4882a593Smuzhiyun struct nfp_insn_meta *meta,
50*4882a593Smuzhiyun const struct bpf_reg_state *reg2)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun unsigned int location = UINT_MAX;
53*4882a593Smuzhiyun int imm;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Datapath usually can give us guarantees on how much adjust head
56*4882a593Smuzhiyun * can be done without the need for any checks. Optimize the simple
57*4882a593Smuzhiyun * case where there is only one adjust head by a constant.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun if (reg2->type != SCALAR_VALUE || !tnum_is_const(reg2->var_off))
60*4882a593Smuzhiyun goto exit_set_location;
61*4882a593Smuzhiyun imm = reg2->var_off.value;
62*4882a593Smuzhiyun /* Translator will skip all checks, we need to guarantee min pkt len */
63*4882a593Smuzhiyun if (imm > ETH_ZLEN - ETH_HLEN)
64*4882a593Smuzhiyun goto exit_set_location;
65*4882a593Smuzhiyun if (imm > (int)bpf->adjust_head.guaranteed_add ||
66*4882a593Smuzhiyun imm < -bpf->adjust_head.guaranteed_sub)
67*4882a593Smuzhiyun goto exit_set_location;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (nfp_prog->adjust_head_location) {
70*4882a593Smuzhiyun /* Only one call per program allowed */
71*4882a593Smuzhiyun if (nfp_prog->adjust_head_location != meta->n)
72*4882a593Smuzhiyun goto exit_set_location;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (meta->arg2.reg.var_off.value != imm)
75*4882a593Smuzhiyun goto exit_set_location;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun location = meta->n;
79*4882a593Smuzhiyun exit_set_location:
80*4882a593Smuzhiyun nfp_prog->adjust_head_location = location;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
nfp_bpf_map_update_value_ok(struct bpf_verifier_env * env)83*4882a593Smuzhiyun static bool nfp_bpf_map_update_value_ok(struct bpf_verifier_env *env)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
86*4882a593Smuzhiyun const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
87*4882a593Smuzhiyun struct bpf_offloaded_map *offmap;
88*4882a593Smuzhiyun struct bpf_func_state *state;
89*4882a593Smuzhiyun struct nfp_bpf_map *nfp_map;
90*4882a593Smuzhiyun int off, i;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun state = env->cur_state->frame[reg3->frameno];
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* We need to record each time update happens with non-zero words,
95*4882a593Smuzhiyun * in case such word is used in atomic operations.
96*4882a593Smuzhiyun * Implicitly depend on nfp_bpf_stack_arg_ok(reg3) being run before.
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun offmap = map_to_offmap(reg1->map_ptr);
100*4882a593Smuzhiyun nfp_map = offmap->dev_priv;
101*4882a593Smuzhiyun off = reg3->off + reg3->var_off.value;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun for (i = 0; i < offmap->map.value_size; i++) {
104*4882a593Smuzhiyun struct bpf_stack_state *stack_entry;
105*4882a593Smuzhiyun unsigned int soff;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun soff = -(off + i) - 1;
108*4882a593Smuzhiyun stack_entry = &state->stack[soff / BPF_REG_SIZE];
109*4882a593Smuzhiyun if (stack_entry->slot_type[soff % BPF_REG_SIZE] == STACK_ZERO)
110*4882a593Smuzhiyun continue;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (nfp_map->use_map[i / 4].type == NFP_MAP_USE_ATOMIC_CNT) {
113*4882a593Smuzhiyun pr_vlog(env, "value at offset %d/%d may be non-zero, bpf_map_update_elem() is required to initialize atomic counters to zero to avoid offload endian issues\n",
114*4882a593Smuzhiyun i, soff);
115*4882a593Smuzhiyun return false;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun nfp_map->use_map[i / 4].non_zero_update = 1;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return true;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun static int
nfp_bpf_stack_arg_ok(const char * fname,struct bpf_verifier_env * env,const struct bpf_reg_state * reg,struct nfp_bpf_reg_state * old_arg)124*4882a593Smuzhiyun nfp_bpf_stack_arg_ok(const char *fname, struct bpf_verifier_env *env,
125*4882a593Smuzhiyun const struct bpf_reg_state *reg,
126*4882a593Smuzhiyun struct nfp_bpf_reg_state *old_arg)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun s64 off, old_off;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (reg->type != PTR_TO_STACK) {
131*4882a593Smuzhiyun pr_vlog(env, "%s: unsupported ptr type %d\n",
132*4882a593Smuzhiyun fname, reg->type);
133*4882a593Smuzhiyun return false;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun if (!tnum_is_const(reg->var_off)) {
136*4882a593Smuzhiyun pr_vlog(env, "%s: variable pointer\n", fname);
137*4882a593Smuzhiyun return false;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun off = reg->var_off.value + reg->off;
141*4882a593Smuzhiyun if (-off % 4) {
142*4882a593Smuzhiyun pr_vlog(env, "%s: unaligned stack pointer %lld\n", fname, -off);
143*4882a593Smuzhiyun return false;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Rest of the checks is only if we re-parse the same insn */
147*4882a593Smuzhiyun if (!old_arg)
148*4882a593Smuzhiyun return true;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun old_off = old_arg->reg.var_off.value + old_arg->reg.off;
151*4882a593Smuzhiyun old_arg->var_off |= off != old_off;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return true;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun static bool
nfp_bpf_map_call_ok(const char * fname,struct bpf_verifier_env * env,struct nfp_insn_meta * meta,u32 helper_tgt,const struct bpf_reg_state * reg1)157*4882a593Smuzhiyun nfp_bpf_map_call_ok(const char *fname, struct bpf_verifier_env *env,
158*4882a593Smuzhiyun struct nfp_insn_meta *meta,
159*4882a593Smuzhiyun u32 helper_tgt, const struct bpf_reg_state *reg1)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun if (!helper_tgt) {
162*4882a593Smuzhiyun pr_vlog(env, "%s: not supported by FW\n", fname);
163*4882a593Smuzhiyun return false;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return true;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static int
nfp_bpf_check_helper_call(struct nfp_prog * nfp_prog,struct bpf_verifier_env * env,struct nfp_insn_meta * meta)170*4882a593Smuzhiyun nfp_bpf_check_helper_call(struct nfp_prog *nfp_prog,
171*4882a593Smuzhiyun struct bpf_verifier_env *env,
172*4882a593Smuzhiyun struct nfp_insn_meta *meta)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
175*4882a593Smuzhiyun const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
176*4882a593Smuzhiyun const struct bpf_reg_state *reg3 = cur_regs(env) + BPF_REG_3;
177*4882a593Smuzhiyun struct nfp_app_bpf *bpf = nfp_prog->bpf;
178*4882a593Smuzhiyun u32 func_id = meta->insn.imm;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun switch (func_id) {
181*4882a593Smuzhiyun case BPF_FUNC_xdp_adjust_head:
182*4882a593Smuzhiyun if (!bpf->adjust_head.off_max) {
183*4882a593Smuzhiyun pr_vlog(env, "adjust_head not supported by FW\n");
184*4882a593Smuzhiyun return -EOPNOTSUPP;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
187*4882a593Smuzhiyun pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
188*4882a593Smuzhiyun return -EOPNOTSUPP;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
192*4882a593Smuzhiyun break;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun case BPF_FUNC_xdp_adjust_tail:
195*4882a593Smuzhiyun if (!bpf->adjust_tail) {
196*4882a593Smuzhiyun pr_vlog(env, "adjust_tail not supported by FW\n");
197*4882a593Smuzhiyun return -EOPNOTSUPP;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun break;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun case BPF_FUNC_map_lookup_elem:
202*4882a593Smuzhiyun if (!nfp_bpf_map_call_ok("map_lookup", env, meta,
203*4882a593Smuzhiyun bpf->helpers.map_lookup, reg1) ||
204*4882a593Smuzhiyun !nfp_bpf_stack_arg_ok("map_lookup", env, reg2,
205*4882a593Smuzhiyun meta->func_id ? &meta->arg2 : NULL))
206*4882a593Smuzhiyun return -EOPNOTSUPP;
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun case BPF_FUNC_map_update_elem:
210*4882a593Smuzhiyun if (!nfp_bpf_map_call_ok("map_update", env, meta,
211*4882a593Smuzhiyun bpf->helpers.map_update, reg1) ||
212*4882a593Smuzhiyun !nfp_bpf_stack_arg_ok("map_update", env, reg2,
213*4882a593Smuzhiyun meta->func_id ? &meta->arg2 : NULL) ||
214*4882a593Smuzhiyun !nfp_bpf_stack_arg_ok("map_update", env, reg3, NULL) ||
215*4882a593Smuzhiyun !nfp_bpf_map_update_value_ok(env))
216*4882a593Smuzhiyun return -EOPNOTSUPP;
217*4882a593Smuzhiyun break;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun case BPF_FUNC_map_delete_elem:
220*4882a593Smuzhiyun if (!nfp_bpf_map_call_ok("map_delete", env, meta,
221*4882a593Smuzhiyun bpf->helpers.map_delete, reg1) ||
222*4882a593Smuzhiyun !nfp_bpf_stack_arg_ok("map_delete", env, reg2,
223*4882a593Smuzhiyun meta->func_id ? &meta->arg2 : NULL))
224*4882a593Smuzhiyun return -EOPNOTSUPP;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun case BPF_FUNC_get_prandom_u32:
228*4882a593Smuzhiyun if (bpf->pseudo_random)
229*4882a593Smuzhiyun break;
230*4882a593Smuzhiyun pr_vlog(env, "bpf_get_prandom_u32(): FW doesn't support random number generation\n");
231*4882a593Smuzhiyun return -EOPNOTSUPP;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun case BPF_FUNC_perf_event_output:
234*4882a593Smuzhiyun BUILD_BUG_ON(NFP_BPF_SCALAR_VALUE != SCALAR_VALUE ||
235*4882a593Smuzhiyun NFP_BPF_MAP_VALUE != PTR_TO_MAP_VALUE ||
236*4882a593Smuzhiyun NFP_BPF_STACK != PTR_TO_STACK ||
237*4882a593Smuzhiyun NFP_BPF_PACKET_DATA != PTR_TO_PACKET);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun if (!bpf->helpers.perf_event_output) {
240*4882a593Smuzhiyun pr_vlog(env, "event_output: not supported by FW\n");
241*4882a593Smuzhiyun return -EOPNOTSUPP;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Force current CPU to make sure we can report the event
245*4882a593Smuzhiyun * wherever we get the control message from FW.
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun if (reg3->var_off.mask & BPF_F_INDEX_MASK ||
248*4882a593Smuzhiyun (reg3->var_off.value & BPF_F_INDEX_MASK) !=
249*4882a593Smuzhiyun BPF_F_CURRENT_CPU) {
250*4882a593Smuzhiyun char tn_buf[48];
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun tnum_strn(tn_buf, sizeof(tn_buf), reg3->var_off);
253*4882a593Smuzhiyun pr_vlog(env, "event_output: must use BPF_F_CURRENT_CPU, var_off: %s\n",
254*4882a593Smuzhiyun tn_buf);
255*4882a593Smuzhiyun return -EOPNOTSUPP;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* Save space in meta, we don't care about arguments other
259*4882a593Smuzhiyun * than 4th meta, shove it into arg1.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun reg1 = cur_regs(env) + BPF_REG_4;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (reg1->type != SCALAR_VALUE /* NULL ptr */ &&
264*4882a593Smuzhiyun reg1->type != PTR_TO_STACK &&
265*4882a593Smuzhiyun reg1->type != PTR_TO_MAP_VALUE &&
266*4882a593Smuzhiyun reg1->type != PTR_TO_PACKET) {
267*4882a593Smuzhiyun pr_vlog(env, "event_output: unsupported ptr type: %d\n",
268*4882a593Smuzhiyun reg1->type);
269*4882a593Smuzhiyun return -EOPNOTSUPP;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (reg1->type == PTR_TO_STACK &&
273*4882a593Smuzhiyun !nfp_bpf_stack_arg_ok("event_output", env, reg1, NULL))
274*4882a593Smuzhiyun return -EOPNOTSUPP;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Warn user that on offload NFP may return success even if map
277*4882a593Smuzhiyun * is not going to accept the event, since the event output is
278*4882a593Smuzhiyun * fully async and device won't know the state of the map.
279*4882a593Smuzhiyun * There is also FW limitation on the event length.
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * Lost events will not show up on the perf ring, driver
282*4882a593Smuzhiyun * won't see them at all. Events may also get reordered.
283*4882a593Smuzhiyun */
284*4882a593Smuzhiyun dev_warn_once(&nfp_prog->bpf->app->pf->pdev->dev,
285*4882a593Smuzhiyun "bpf: note: return codes and behavior of bpf_event_output() helper differs for offloaded programs!\n");
286*4882a593Smuzhiyun pr_vlog(env, "warning: return codes and behavior of event_output helper differ for offload!\n");
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!meta->func_id)
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (reg1->type != meta->arg1.type) {
292*4882a593Smuzhiyun pr_vlog(env, "event_output: ptr type changed: %d %d\n",
293*4882a593Smuzhiyun meta->arg1.type, reg1->type);
294*4882a593Smuzhiyun return -EINVAL;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun break;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun default:
299*4882a593Smuzhiyun pr_vlog(env, "unsupported function id: %d\n", func_id);
300*4882a593Smuzhiyun return -EOPNOTSUPP;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun meta->func_id = func_id;
304*4882a593Smuzhiyun meta->arg1 = *reg1;
305*4882a593Smuzhiyun meta->arg2.reg = *reg2;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun static int
nfp_bpf_check_exit(struct nfp_prog * nfp_prog,struct bpf_verifier_env * env)311*4882a593Smuzhiyun nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
312*4882a593Smuzhiyun struct bpf_verifier_env *env)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun const struct bpf_reg_state *reg0 = cur_regs(env) + BPF_REG_0;
315*4882a593Smuzhiyun u64 imm;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (nfp_prog->type == BPF_PROG_TYPE_XDP)
318*4882a593Smuzhiyun return 0;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (!(reg0->type == SCALAR_VALUE && tnum_is_const(reg0->var_off))) {
321*4882a593Smuzhiyun char tn_buf[48];
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
324*4882a593Smuzhiyun pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
325*4882a593Smuzhiyun reg0->type, tn_buf);
326*4882a593Smuzhiyun return -EINVAL;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun imm = reg0->var_off.value;
330*4882a593Smuzhiyun if (nfp_prog->type == BPF_PROG_TYPE_SCHED_CLS &&
331*4882a593Smuzhiyun imm <= TC_ACT_REDIRECT &&
332*4882a593Smuzhiyun imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
333*4882a593Smuzhiyun imm != TC_ACT_QUEUED) {
334*4882a593Smuzhiyun pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
335*4882a593Smuzhiyun reg0->type, imm);
336*4882a593Smuzhiyun return -EINVAL;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun return 0;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun static int
nfp_bpf_check_stack_access(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,const struct bpf_reg_state * reg,struct bpf_verifier_env * env)343*4882a593Smuzhiyun nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
344*4882a593Smuzhiyun struct nfp_insn_meta *meta,
345*4882a593Smuzhiyun const struct bpf_reg_state *reg,
346*4882a593Smuzhiyun struct bpf_verifier_env *env)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun s32 old_off, new_off;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (reg->frameno != env->cur_state->curframe)
351*4882a593Smuzhiyun meta->flags |= FLAG_INSN_PTR_CALLER_STACK_FRAME;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (!tnum_is_const(reg->var_off)) {
354*4882a593Smuzhiyun pr_vlog(env, "variable ptr stack access\n");
355*4882a593Smuzhiyun return -EINVAL;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (meta->ptr.type == NOT_INIT)
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun old_off = meta->ptr.off + meta->ptr.var_off.value;
362*4882a593Smuzhiyun new_off = reg->off + reg->var_off.value;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun meta->ptr_not_const |= old_off != new_off;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (!meta->ptr_not_const)
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (old_off % 4 == new_off % 4)
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun pr_vlog(env, "stack access changed location was:%d is:%d\n",
373*4882a593Smuzhiyun old_off, new_off);
374*4882a593Smuzhiyun return -EINVAL;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
nfp_bpf_map_use_name(enum nfp_bpf_map_use use)377*4882a593Smuzhiyun static const char *nfp_bpf_map_use_name(enum nfp_bpf_map_use use)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun static const char * const names[] = {
380*4882a593Smuzhiyun [NFP_MAP_UNUSED] = "unused",
381*4882a593Smuzhiyun [NFP_MAP_USE_READ] = "read",
382*4882a593Smuzhiyun [NFP_MAP_USE_WRITE] = "write",
383*4882a593Smuzhiyun [NFP_MAP_USE_ATOMIC_CNT] = "atomic",
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (use >= ARRAY_SIZE(names) || !names[use])
387*4882a593Smuzhiyun return "unknown";
388*4882a593Smuzhiyun return names[use];
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun static int
nfp_bpf_map_mark_used_one(struct bpf_verifier_env * env,struct nfp_bpf_map * nfp_map,unsigned int off,enum nfp_bpf_map_use use)392*4882a593Smuzhiyun nfp_bpf_map_mark_used_one(struct bpf_verifier_env *env,
393*4882a593Smuzhiyun struct nfp_bpf_map *nfp_map,
394*4882a593Smuzhiyun unsigned int off, enum nfp_bpf_map_use use)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun if (nfp_map->use_map[off / 4].type != NFP_MAP_UNUSED &&
397*4882a593Smuzhiyun nfp_map->use_map[off / 4].type != use) {
398*4882a593Smuzhiyun pr_vlog(env, "map value use type conflict %s vs %s off: %u\n",
399*4882a593Smuzhiyun nfp_bpf_map_use_name(nfp_map->use_map[off / 4].type),
400*4882a593Smuzhiyun nfp_bpf_map_use_name(use), off);
401*4882a593Smuzhiyun return -EOPNOTSUPP;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (nfp_map->use_map[off / 4].non_zero_update &&
405*4882a593Smuzhiyun use == NFP_MAP_USE_ATOMIC_CNT) {
406*4882a593Smuzhiyun pr_vlog(env, "atomic counter in map value may already be initialized to non-zero value off: %u\n",
407*4882a593Smuzhiyun off);
408*4882a593Smuzhiyun return -EOPNOTSUPP;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun nfp_map->use_map[off / 4].type = use;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun static int
nfp_bpf_map_mark_used(struct bpf_verifier_env * env,struct nfp_insn_meta * meta,const struct bpf_reg_state * reg,enum nfp_bpf_map_use use)417*4882a593Smuzhiyun nfp_bpf_map_mark_used(struct bpf_verifier_env *env, struct nfp_insn_meta *meta,
418*4882a593Smuzhiyun const struct bpf_reg_state *reg,
419*4882a593Smuzhiyun enum nfp_bpf_map_use use)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct bpf_offloaded_map *offmap;
422*4882a593Smuzhiyun struct nfp_bpf_map *nfp_map;
423*4882a593Smuzhiyun unsigned int size, off;
424*4882a593Smuzhiyun int i, err;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!tnum_is_const(reg->var_off)) {
427*4882a593Smuzhiyun pr_vlog(env, "map value offset is variable\n");
428*4882a593Smuzhiyun return -EOPNOTSUPP;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun off = reg->var_off.value + meta->insn.off + reg->off;
432*4882a593Smuzhiyun size = BPF_LDST_BYTES(&meta->insn);
433*4882a593Smuzhiyun offmap = map_to_offmap(reg->map_ptr);
434*4882a593Smuzhiyun nfp_map = offmap->dev_priv;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (off + size > offmap->map.value_size) {
437*4882a593Smuzhiyun pr_vlog(env, "map value access out-of-bounds\n");
438*4882a593Smuzhiyun return -EINVAL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun for (i = 0; i < size; i += 4 - (off + i) % 4) {
442*4882a593Smuzhiyun err = nfp_bpf_map_mark_used_one(env, nfp_map, off + i, use);
443*4882a593Smuzhiyun if (err)
444*4882a593Smuzhiyun return err;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun return 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun static int
nfp_bpf_check_ptr(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,struct bpf_verifier_env * env,u8 reg_no)451*4882a593Smuzhiyun nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
452*4882a593Smuzhiyun struct bpf_verifier_env *env, u8 reg_no)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun const struct bpf_reg_state *reg = cur_regs(env) + reg_no;
455*4882a593Smuzhiyun int err;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (reg->type != PTR_TO_CTX &&
458*4882a593Smuzhiyun reg->type != PTR_TO_STACK &&
459*4882a593Smuzhiyun reg->type != PTR_TO_MAP_VALUE &&
460*4882a593Smuzhiyun reg->type != PTR_TO_PACKET) {
461*4882a593Smuzhiyun pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
462*4882a593Smuzhiyun return -EINVAL;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (reg->type == PTR_TO_STACK) {
466*4882a593Smuzhiyun err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
467*4882a593Smuzhiyun if (err)
468*4882a593Smuzhiyun return err;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (reg->type == PTR_TO_MAP_VALUE) {
472*4882a593Smuzhiyun if (is_mbpf_load(meta)) {
473*4882a593Smuzhiyun err = nfp_bpf_map_mark_used(env, meta, reg,
474*4882a593Smuzhiyun NFP_MAP_USE_READ);
475*4882a593Smuzhiyun if (err)
476*4882a593Smuzhiyun return err;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun if (is_mbpf_store(meta)) {
479*4882a593Smuzhiyun pr_vlog(env, "map writes not supported\n");
480*4882a593Smuzhiyun return -EOPNOTSUPP;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun if (is_mbpf_xadd(meta)) {
483*4882a593Smuzhiyun err = nfp_bpf_map_mark_used(env, meta, reg,
484*4882a593Smuzhiyun NFP_MAP_USE_ATOMIC_CNT);
485*4882a593Smuzhiyun if (err)
486*4882a593Smuzhiyun return err;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
491*4882a593Smuzhiyun pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
492*4882a593Smuzhiyun meta->ptr.type, reg->type);
493*4882a593Smuzhiyun return -EINVAL;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun meta->ptr = *reg;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun static int
nfp_bpf_check_store(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,struct bpf_verifier_env * env)502*4882a593Smuzhiyun nfp_bpf_check_store(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
503*4882a593Smuzhiyun struct bpf_verifier_env *env)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun const struct bpf_reg_state *reg = cur_regs(env) + meta->insn.dst_reg;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (reg->type == PTR_TO_CTX) {
508*4882a593Smuzhiyun if (nfp_prog->type == BPF_PROG_TYPE_XDP) {
509*4882a593Smuzhiyun /* XDP ctx accesses must be 4B in size */
510*4882a593Smuzhiyun switch (meta->insn.off) {
511*4882a593Smuzhiyun case offsetof(struct xdp_md, rx_queue_index):
512*4882a593Smuzhiyun if (nfp_prog->bpf->queue_select)
513*4882a593Smuzhiyun goto exit_check_ptr;
514*4882a593Smuzhiyun pr_vlog(env, "queue selection not supported by FW\n");
515*4882a593Smuzhiyun return -EOPNOTSUPP;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun pr_vlog(env, "unsupported store to context field\n");
519*4882a593Smuzhiyun return -EOPNOTSUPP;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun exit_check_ptr:
522*4882a593Smuzhiyun return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun static int
nfp_bpf_check_xadd(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,struct bpf_verifier_env * env)526*4882a593Smuzhiyun nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
527*4882a593Smuzhiyun struct bpf_verifier_env *env)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
530*4882a593Smuzhiyun const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (dreg->type != PTR_TO_MAP_VALUE) {
533*4882a593Smuzhiyun pr_vlog(env, "atomic add not to a map value pointer: %d\n",
534*4882a593Smuzhiyun dreg->type);
535*4882a593Smuzhiyun return -EOPNOTSUPP;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun if (sreg->type != SCALAR_VALUE) {
538*4882a593Smuzhiyun pr_vlog(env, "atomic add not of a scalar: %d\n", sreg->type);
539*4882a593Smuzhiyun return -EOPNOTSUPP;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun meta->xadd_over_16bit |=
543*4882a593Smuzhiyun sreg->var_off.value > 0xffff || sreg->var_off.mask > 0xffff;
544*4882a593Smuzhiyun meta->xadd_maybe_16bit |=
545*4882a593Smuzhiyun (sreg->var_off.value & ~sreg->var_off.mask) <= 0xffff;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return nfp_bpf_check_ptr(nfp_prog, meta, env, meta->insn.dst_reg);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun static int
nfp_bpf_check_alu(struct nfp_prog * nfp_prog,struct nfp_insn_meta * meta,struct bpf_verifier_env * env)551*4882a593Smuzhiyun nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
552*4882a593Smuzhiyun struct bpf_verifier_env *env)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun const struct bpf_reg_state *sreg =
555*4882a593Smuzhiyun cur_regs(env) + meta->insn.src_reg;
556*4882a593Smuzhiyun const struct bpf_reg_state *dreg =
557*4882a593Smuzhiyun cur_regs(env) + meta->insn.dst_reg;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun meta->umin_src = min(meta->umin_src, sreg->umin_value);
560*4882a593Smuzhiyun meta->umax_src = max(meta->umax_src, sreg->umax_value);
561*4882a593Smuzhiyun meta->umin_dst = min(meta->umin_dst, dreg->umin_value);
562*4882a593Smuzhiyun meta->umax_dst = max(meta->umax_dst, dreg->umax_value);
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /* NFP supports u16 and u32 multiplication.
565*4882a593Smuzhiyun *
566*4882a593Smuzhiyun * For ALU64, if either operand is beyond u32's value range, we reject
567*4882a593Smuzhiyun * it. One thing to note, if the source operand is BPF_K, then we need
568*4882a593Smuzhiyun * to check "imm" field directly, and we'd reject it if it is negative.
569*4882a593Smuzhiyun * Because for ALU64, "imm" (with s32 type) is expected to be sign
570*4882a593Smuzhiyun * extended to s64 which NFP mul doesn't support.
571*4882a593Smuzhiyun *
572*4882a593Smuzhiyun * For ALU32, it is fine for "imm" be negative though, because the
573*4882a593Smuzhiyun * result is 32-bits and there is no difference on the low halve of
574*4882a593Smuzhiyun * the result for signed/unsigned mul, so we will get correct result.
575*4882a593Smuzhiyun */
576*4882a593Smuzhiyun if (is_mbpf_mul(meta)) {
577*4882a593Smuzhiyun if (meta->umax_dst > U32_MAX) {
578*4882a593Smuzhiyun pr_vlog(env, "multiplier is not within u32 value range\n");
579*4882a593Smuzhiyun return -EINVAL;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun if (mbpf_src(meta) == BPF_X && meta->umax_src > U32_MAX) {
582*4882a593Smuzhiyun pr_vlog(env, "multiplicand is not within u32 value range\n");
583*4882a593Smuzhiyun return -EINVAL;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun if (mbpf_class(meta) == BPF_ALU64 &&
586*4882a593Smuzhiyun mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
587*4882a593Smuzhiyun pr_vlog(env, "sign extended multiplicand won't be within u32 value range\n");
588*4882a593Smuzhiyun return -EINVAL;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* NFP doesn't have divide instructions, we support divide by constant
593*4882a593Smuzhiyun * through reciprocal multiplication. Given NFP support multiplication
594*4882a593Smuzhiyun * no bigger than u32, we'd require divisor and dividend no bigger than
595*4882a593Smuzhiyun * that as well.
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * Also eBPF doesn't support signed divide and has enforced this on C
598*4882a593Smuzhiyun * language level by failing compilation. However LLVM assembler hasn't
599*4882a593Smuzhiyun * enforced this, so it is possible for negative constant to leak in as
600*4882a593Smuzhiyun * a BPF_K operand through assembly code, we reject such cases as well.
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun if (is_mbpf_div(meta)) {
603*4882a593Smuzhiyun if (meta->umax_dst > U32_MAX) {
604*4882a593Smuzhiyun pr_vlog(env, "dividend is not within u32 value range\n");
605*4882a593Smuzhiyun return -EINVAL;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun if (mbpf_src(meta) == BPF_X) {
608*4882a593Smuzhiyun if (meta->umin_src != meta->umax_src) {
609*4882a593Smuzhiyun pr_vlog(env, "divisor is not constant\n");
610*4882a593Smuzhiyun return -EINVAL;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun if (meta->umax_src > U32_MAX) {
613*4882a593Smuzhiyun pr_vlog(env, "divisor is not within u32 value range\n");
614*4882a593Smuzhiyun return -EINVAL;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun if (mbpf_src(meta) == BPF_K && meta->insn.imm < 0) {
618*4882a593Smuzhiyun pr_vlog(env, "divide by negative constant is not supported\n");
619*4882a593Smuzhiyun return -EINVAL;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
nfp_verify_insn(struct bpf_verifier_env * env,int insn_idx,int prev_insn_idx)626*4882a593Smuzhiyun int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
627*4882a593Smuzhiyun int prev_insn_idx)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
630*4882a593Smuzhiyun struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx);
633*4882a593Smuzhiyun nfp_prog->verifier_meta = meta;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (!nfp_bpf_supported_opcode(meta->insn.code)) {
636*4882a593Smuzhiyun pr_vlog(env, "instruction %#02x not supported\n",
637*4882a593Smuzhiyun meta->insn.code);
638*4882a593Smuzhiyun return -EINVAL;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (meta->insn.src_reg >= MAX_BPF_REG ||
642*4882a593Smuzhiyun meta->insn.dst_reg >= MAX_BPF_REG) {
643*4882a593Smuzhiyun pr_vlog(env, "program uses extended registers - jit hardening?\n");
644*4882a593Smuzhiyun return -EINVAL;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun if (is_mbpf_helper_call(meta))
648*4882a593Smuzhiyun return nfp_bpf_check_helper_call(nfp_prog, env, meta);
649*4882a593Smuzhiyun if (meta->insn.code == (BPF_JMP | BPF_EXIT))
650*4882a593Smuzhiyun return nfp_bpf_check_exit(nfp_prog, env);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (is_mbpf_load(meta))
653*4882a593Smuzhiyun return nfp_bpf_check_ptr(nfp_prog, meta, env,
654*4882a593Smuzhiyun meta->insn.src_reg);
655*4882a593Smuzhiyun if (is_mbpf_store(meta))
656*4882a593Smuzhiyun return nfp_bpf_check_store(nfp_prog, meta, env);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (is_mbpf_xadd(meta))
659*4882a593Smuzhiyun return nfp_bpf_check_xadd(nfp_prog, meta, env);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (is_mbpf_alu(meta))
662*4882a593Smuzhiyun return nfp_bpf_check_alu(nfp_prog, meta, env);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun return 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun static int
nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env * env,struct nfp_prog * nfp_prog)668*4882a593Smuzhiyun nfp_assign_subprog_idx_and_regs(struct bpf_verifier_env *env,
669*4882a593Smuzhiyun struct nfp_prog *nfp_prog)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun struct nfp_insn_meta *meta;
672*4882a593Smuzhiyun int index = 0;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun list_for_each_entry(meta, &nfp_prog->insns, l) {
675*4882a593Smuzhiyun if (nfp_is_subprog_start(meta))
676*4882a593Smuzhiyun index++;
677*4882a593Smuzhiyun meta->subprog_idx = index;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (meta->insn.dst_reg >= BPF_REG_6 &&
680*4882a593Smuzhiyun meta->insn.dst_reg <= BPF_REG_9)
681*4882a593Smuzhiyun nfp_prog->subprog[index].needs_reg_push = 1;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (index + 1 != nfp_prog->subprog_cnt) {
685*4882a593Smuzhiyun pr_vlog(env, "BUG: number of processed BPF functions is not consistent (processed %d, expected %d)\n",
686*4882a593Smuzhiyun index + 1, nfp_prog->subprog_cnt);
687*4882a593Smuzhiyun return -EFAULT;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return 0;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
nfp_bpf_get_stack_usage(struct nfp_prog * nfp_prog)693*4882a593Smuzhiyun static unsigned int nfp_bpf_get_stack_usage(struct nfp_prog *nfp_prog)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun struct nfp_insn_meta *meta = nfp_prog_first_meta(nfp_prog);
696*4882a593Smuzhiyun unsigned int max_depth = 0, depth = 0, frame = 0;
697*4882a593Smuzhiyun struct nfp_insn_meta *ret_insn[MAX_CALL_FRAMES];
698*4882a593Smuzhiyun unsigned short frame_depths[MAX_CALL_FRAMES];
699*4882a593Smuzhiyun unsigned short ret_prog[MAX_CALL_FRAMES];
700*4882a593Smuzhiyun unsigned short idx = meta->subprog_idx;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Inspired from check_max_stack_depth() from kernel verifier.
703*4882a593Smuzhiyun * Starting from main subprogram, walk all instructions and recursively
704*4882a593Smuzhiyun * walk all callees that given subprogram can call. Since recursion is
705*4882a593Smuzhiyun * prevented by the kernel verifier, this algorithm only needs a local
706*4882a593Smuzhiyun * stack of MAX_CALL_FRAMES to remember callsites.
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun process_subprog:
709*4882a593Smuzhiyun frame_depths[frame] = nfp_prog->subprog[idx].stack_depth;
710*4882a593Smuzhiyun frame_depths[frame] = round_up(frame_depths[frame], STACK_FRAME_ALIGN);
711*4882a593Smuzhiyun depth += frame_depths[frame];
712*4882a593Smuzhiyun max_depth = max(max_depth, depth);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun continue_subprog:
715*4882a593Smuzhiyun for (; meta != nfp_prog_last_meta(nfp_prog) && meta->subprog_idx == idx;
716*4882a593Smuzhiyun meta = nfp_meta_next(meta)) {
717*4882a593Smuzhiyun if (!is_mbpf_pseudo_call(meta))
718*4882a593Smuzhiyun continue;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* We found a call to a subprogram. Remember instruction to
721*4882a593Smuzhiyun * return to and subprog id.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun ret_insn[frame] = nfp_meta_next(meta);
724*4882a593Smuzhiyun ret_prog[frame] = idx;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Find the callee and start processing it. */
727*4882a593Smuzhiyun meta = nfp_bpf_goto_meta(nfp_prog, meta,
728*4882a593Smuzhiyun meta->n + 1 + meta->insn.imm);
729*4882a593Smuzhiyun idx = meta->subprog_idx;
730*4882a593Smuzhiyun frame++;
731*4882a593Smuzhiyun goto process_subprog;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun /* End of for() loop means the last instruction of the subprog was
734*4882a593Smuzhiyun * reached. If we popped all stack frames, return; otherwise, go on
735*4882a593Smuzhiyun * processing remaining instructions from the caller.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun if (frame == 0)
738*4882a593Smuzhiyun return max_depth;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun depth -= frame_depths[frame];
741*4882a593Smuzhiyun frame--;
742*4882a593Smuzhiyun meta = ret_insn[frame];
743*4882a593Smuzhiyun idx = ret_prog[frame];
744*4882a593Smuzhiyun goto continue_subprog;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
nfp_bpf_insn_flag_zext(struct nfp_prog * nfp_prog,struct bpf_insn_aux_data * aux)747*4882a593Smuzhiyun static void nfp_bpf_insn_flag_zext(struct nfp_prog *nfp_prog,
748*4882a593Smuzhiyun struct bpf_insn_aux_data *aux)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun struct nfp_insn_meta *meta;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun list_for_each_entry(meta, &nfp_prog->insns, l) {
753*4882a593Smuzhiyun if (aux[meta->n].zext_dst)
754*4882a593Smuzhiyun meta->flags |= FLAG_INSN_DO_ZEXT;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
nfp_bpf_finalize(struct bpf_verifier_env * env)758*4882a593Smuzhiyun int nfp_bpf_finalize(struct bpf_verifier_env *env)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct bpf_subprog_info *info;
761*4882a593Smuzhiyun struct nfp_prog *nfp_prog;
762*4882a593Smuzhiyun unsigned int max_stack;
763*4882a593Smuzhiyun struct nfp_net *nn;
764*4882a593Smuzhiyun int i;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun nfp_prog = env->prog->aux->offload->dev_priv;
767*4882a593Smuzhiyun nfp_prog->subprog_cnt = env->subprog_cnt;
768*4882a593Smuzhiyun nfp_prog->subprog = kcalloc(nfp_prog->subprog_cnt,
769*4882a593Smuzhiyun sizeof(nfp_prog->subprog[0]), GFP_KERNEL);
770*4882a593Smuzhiyun if (!nfp_prog->subprog)
771*4882a593Smuzhiyun return -ENOMEM;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun nfp_assign_subprog_idx_and_regs(env, nfp_prog);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun info = env->subprog_info;
776*4882a593Smuzhiyun for (i = 0; i < nfp_prog->subprog_cnt; i++) {
777*4882a593Smuzhiyun nfp_prog->subprog[i].stack_depth = info[i].stack_depth;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (i == 0)
780*4882a593Smuzhiyun continue;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun /* Account for size of return address. */
783*4882a593Smuzhiyun nfp_prog->subprog[i].stack_depth += REG_WIDTH;
784*4882a593Smuzhiyun /* Account for size of saved registers, if necessary. */
785*4882a593Smuzhiyun if (nfp_prog->subprog[i].needs_reg_push)
786*4882a593Smuzhiyun nfp_prog->subprog[i].stack_depth += BPF_REG_SIZE * 4;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun nn = netdev_priv(env->prog->aux->offload->netdev);
790*4882a593Smuzhiyun max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
791*4882a593Smuzhiyun nfp_prog->stack_size = nfp_bpf_get_stack_usage(nfp_prog);
792*4882a593Smuzhiyun if (nfp_prog->stack_size > max_stack) {
793*4882a593Smuzhiyun pr_vlog(env, "stack too large: program %dB > FW stack %dB\n",
794*4882a593Smuzhiyun nfp_prog->stack_size, max_stack);
795*4882a593Smuzhiyun return -EOPNOTSUPP;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun nfp_bpf_insn_flag_zext(nfp_prog, env->insn_aux_data);
799*4882a593Smuzhiyun return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
nfp_bpf_opt_replace_insn(struct bpf_verifier_env * env,u32 off,struct bpf_insn * insn)802*4882a593Smuzhiyun int nfp_bpf_opt_replace_insn(struct bpf_verifier_env *env, u32 off,
803*4882a593Smuzhiyun struct bpf_insn *insn)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
806*4882a593Smuzhiyun struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
807*4882a593Smuzhiyun struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
810*4882a593Smuzhiyun nfp_prog->verifier_meta = meta;
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun /* conditional jump to jump conversion */
813*4882a593Smuzhiyun if (is_mbpf_cond_jump(meta) &&
814*4882a593Smuzhiyun insn->code == (BPF_JMP | BPF_JA | BPF_K)) {
815*4882a593Smuzhiyun unsigned int tgt_off;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun tgt_off = off + insn->off + 1;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (!insn->off) {
820*4882a593Smuzhiyun meta->jmp_dst = list_next_entry(meta, l);
821*4882a593Smuzhiyun meta->jump_neg_op = false;
822*4882a593Smuzhiyun } else if (meta->jmp_dst->n != aux_data[tgt_off].orig_idx) {
823*4882a593Smuzhiyun pr_vlog(env, "branch hard wire at %d changes target %d -> %d\n",
824*4882a593Smuzhiyun off, meta->jmp_dst->n,
825*4882a593Smuzhiyun aux_data[tgt_off].orig_idx);
826*4882a593Smuzhiyun return -EINVAL;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun return 0;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun pr_vlog(env, "unsupported instruction replacement %hhx -> %hhx\n",
832*4882a593Smuzhiyun meta->insn.code, insn->code);
833*4882a593Smuzhiyun return -EINVAL;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
nfp_bpf_opt_remove_insns(struct bpf_verifier_env * env,u32 off,u32 cnt)836*4882a593Smuzhiyun int nfp_bpf_opt_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
839*4882a593Smuzhiyun struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
840*4882a593Smuzhiyun struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
841*4882a593Smuzhiyun unsigned int i;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun meta = nfp_bpf_goto_meta(nfp_prog, meta, aux_data[off].orig_idx);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun for (i = 0; i < cnt; i++) {
846*4882a593Smuzhiyun if (WARN_ON_ONCE(&meta->l == &nfp_prog->insns))
847*4882a593Smuzhiyun return -EINVAL;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun /* doesn't count if it already has the flag */
850*4882a593Smuzhiyun if (meta->flags & FLAG_INSN_SKIP_VERIFIER_OPT)
851*4882a593Smuzhiyun i--;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun meta->flags |= FLAG_INSN_SKIP_VERIFIER_OPT;
854*4882a593Smuzhiyun meta = list_next_entry(meta, l);
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun return 0;
858*4882a593Smuzhiyun }
859