xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/netronome/nfp/bpf/offload.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2*4882a593Smuzhiyun /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * nfp_net_offload.c
6*4882a593Smuzhiyun  * Netronome network device driver: TC offload functions for PF and VF
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #define pr_fmt(fmt)	"NFP net bpf: " fmt
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/bpf.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/netdevice.h>
14*4882a593Smuzhiyun #include <linux/pci.h>
15*4882a593Smuzhiyun #include <linux/jiffies.h>
16*4882a593Smuzhiyun #include <linux/timer.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <net/pkt_cls.h>
21*4882a593Smuzhiyun #include <net/tc_act/tc_gact.h>
22*4882a593Smuzhiyun #include <net/tc_act/tc_mirred.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "main.h"
25*4882a593Smuzhiyun #include "../ccm.h"
26*4882a593Smuzhiyun #include "../nfp_app.h"
27*4882a593Smuzhiyun #include "../nfp_net_ctrl.h"
28*4882a593Smuzhiyun #include "../nfp_net.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static int
nfp_map_ptr_record(struct nfp_app_bpf * bpf,struct nfp_prog * nfp_prog,struct bpf_map * map)31*4882a593Smuzhiyun nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
32*4882a593Smuzhiyun 		   struct bpf_map *map)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	struct nfp_bpf_neutral_map *record;
35*4882a593Smuzhiyun 	int err;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* Reuse path - other offloaded program is already tracking this map. */
38*4882a593Smuzhiyun 	record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
39*4882a593Smuzhiyun 					nfp_bpf_maps_neutral_params);
40*4882a593Smuzhiyun 	if (record) {
41*4882a593Smuzhiyun 		nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
42*4882a593Smuzhiyun 		record->count++;
43*4882a593Smuzhiyun 		return 0;
44*4882a593Smuzhiyun 	}
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/* Grab a single ref to the map for our record.  The prog destroy ndo
47*4882a593Smuzhiyun 	 * happens after free_used_maps().
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	bpf_map_inc(map);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	record = kmalloc(sizeof(*record), GFP_KERNEL);
52*4882a593Smuzhiyun 	if (!record) {
53*4882a593Smuzhiyun 		err = -ENOMEM;
54*4882a593Smuzhiyun 		goto err_map_put;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	record->ptr = map;
58*4882a593Smuzhiyun 	record->map_id = map->id;
59*4882a593Smuzhiyun 	record->count = 1;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
62*4882a593Smuzhiyun 				     nfp_bpf_maps_neutral_params);
63*4882a593Smuzhiyun 	if (err)
64*4882a593Smuzhiyun 		goto err_free_rec;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun err_free_rec:
71*4882a593Smuzhiyun 	kfree(record);
72*4882a593Smuzhiyun err_map_put:
73*4882a593Smuzhiyun 	bpf_map_put(map);
74*4882a593Smuzhiyun 	return err;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static void
nfp_map_ptrs_forget(struct nfp_app_bpf * bpf,struct nfp_prog * nfp_prog)78*4882a593Smuzhiyun nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	bool freed = false;
81*4882a593Smuzhiyun 	int i;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	for (i = 0; i < nfp_prog->map_records_cnt; i++) {
84*4882a593Smuzhiyun 		if (--nfp_prog->map_records[i]->count) {
85*4882a593Smuzhiyun 			nfp_prog->map_records[i] = NULL;
86*4882a593Smuzhiyun 			continue;
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 		WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
90*4882a593Smuzhiyun 					       &nfp_prog->map_records[i]->l,
91*4882a593Smuzhiyun 					       nfp_bpf_maps_neutral_params));
92*4882a593Smuzhiyun 		freed = true;
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (freed) {
96*4882a593Smuzhiyun 		synchronize_rcu();
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 		for (i = 0; i < nfp_prog->map_records_cnt; i++)
99*4882a593Smuzhiyun 			if (nfp_prog->map_records[i]) {
100*4882a593Smuzhiyun 				bpf_map_put(nfp_prog->map_records[i]->ptr);
101*4882a593Smuzhiyun 				kfree(nfp_prog->map_records[i]);
102*4882a593Smuzhiyun 			}
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	kfree(nfp_prog->map_records);
106*4882a593Smuzhiyun 	nfp_prog->map_records = NULL;
107*4882a593Smuzhiyun 	nfp_prog->map_records_cnt = 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun static int
nfp_map_ptrs_record(struct nfp_app_bpf * bpf,struct nfp_prog * nfp_prog,struct bpf_prog * prog)111*4882a593Smuzhiyun nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
112*4882a593Smuzhiyun 		    struct bpf_prog *prog)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	int i, cnt, err = 0;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	mutex_lock(&prog->aux->used_maps_mutex);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Quickly count the maps we will have to remember */
119*4882a593Smuzhiyun 	cnt = 0;
120*4882a593Smuzhiyun 	for (i = 0; i < prog->aux->used_map_cnt; i++)
121*4882a593Smuzhiyun 		if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
122*4882a593Smuzhiyun 			cnt++;
123*4882a593Smuzhiyun 	if (!cnt)
124*4882a593Smuzhiyun 		goto out;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	nfp_prog->map_records = kmalloc_array(cnt,
127*4882a593Smuzhiyun 					      sizeof(nfp_prog->map_records[0]),
128*4882a593Smuzhiyun 					      GFP_KERNEL);
129*4882a593Smuzhiyun 	if (!nfp_prog->map_records) {
130*4882a593Smuzhiyun 		err = -ENOMEM;
131*4882a593Smuzhiyun 		goto out;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	for (i = 0; i < prog->aux->used_map_cnt; i++)
135*4882a593Smuzhiyun 		if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
136*4882a593Smuzhiyun 			err = nfp_map_ptr_record(bpf, nfp_prog,
137*4882a593Smuzhiyun 						 prog->aux->used_maps[i]);
138*4882a593Smuzhiyun 			if (err) {
139*4882a593Smuzhiyun 				nfp_map_ptrs_forget(bpf, nfp_prog);
140*4882a593Smuzhiyun 				goto out;
141*4882a593Smuzhiyun 			}
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 	WARN_ON(cnt != nfp_prog->map_records_cnt);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun out:
146*4882a593Smuzhiyun 	mutex_unlock(&prog->aux->used_maps_mutex);
147*4882a593Smuzhiyun 	return err;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun static int
nfp_prog_prepare(struct nfp_prog * nfp_prog,const struct bpf_insn * prog,unsigned int cnt)151*4882a593Smuzhiyun nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
152*4882a593Smuzhiyun 		 unsigned int cnt)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct nfp_insn_meta *meta;
155*4882a593Smuzhiyun 	unsigned int i;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for (i = 0; i < cnt; i++) {
158*4882a593Smuzhiyun 		meta = kzalloc(sizeof(*meta), GFP_KERNEL);
159*4882a593Smuzhiyun 		if (!meta)
160*4882a593Smuzhiyun 			return -ENOMEM;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		meta->insn = prog[i];
163*4882a593Smuzhiyun 		meta->n = i;
164*4882a593Smuzhiyun 		if (is_mbpf_alu(meta)) {
165*4882a593Smuzhiyun 			meta->umin_src = U64_MAX;
166*4882a593Smuzhiyun 			meta->umin_dst = U64_MAX;
167*4882a593Smuzhiyun 		}
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 		list_add_tail(&meta->l, &nfp_prog->insns);
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	nfp_prog->n_insns = cnt;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	nfp_bpf_jit_prepare(nfp_prog);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
nfp_prog_free(struct nfp_prog * nfp_prog)178*4882a593Smuzhiyun static void nfp_prog_free(struct nfp_prog *nfp_prog)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	struct nfp_insn_meta *meta, *tmp;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	kfree(nfp_prog->subprog);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
185*4882a593Smuzhiyun 		list_del(&meta->l);
186*4882a593Smuzhiyun 		kfree(meta);
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 	kfree(nfp_prog);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
nfp_bpf_verifier_prep(struct bpf_prog * prog)191*4882a593Smuzhiyun static int nfp_bpf_verifier_prep(struct bpf_prog *prog)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	struct nfp_prog *nfp_prog;
194*4882a593Smuzhiyun 	int ret;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
197*4882a593Smuzhiyun 	if (!nfp_prog)
198*4882a593Smuzhiyun 		return -ENOMEM;
199*4882a593Smuzhiyun 	prog->aux->offload->dev_priv = nfp_prog;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nfp_prog->insns);
202*4882a593Smuzhiyun 	nfp_prog->type = prog->type;
203*4882a593Smuzhiyun 	nfp_prog->bpf = bpf_offload_dev_priv(prog->aux->offload->offdev);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
206*4882a593Smuzhiyun 	if (ret)
207*4882a593Smuzhiyun 		goto err_free;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun err_free:
214*4882a593Smuzhiyun 	nfp_prog_free(nfp_prog);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	return ret;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
nfp_bpf_translate(struct bpf_prog * prog)219*4882a593Smuzhiyun static int nfp_bpf_translate(struct bpf_prog *prog)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev);
222*4882a593Smuzhiyun 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
223*4882a593Smuzhiyun 	unsigned int max_instr;
224*4882a593Smuzhiyun 	int err;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* We depend on dead code elimination succeeding */
227*4882a593Smuzhiyun 	if (prog->aux->offload->opt_failed)
228*4882a593Smuzhiyun 		return -EINVAL;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
231*4882a593Smuzhiyun 	nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
234*4882a593Smuzhiyun 	if (!nfp_prog->prog)
235*4882a593Smuzhiyun 		return -ENOMEM;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	err = nfp_bpf_jit(nfp_prog);
238*4882a593Smuzhiyun 	if (err)
239*4882a593Smuzhiyun 		return err;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
242*4882a593Smuzhiyun 	prog->aux->offload->jited_image = nfp_prog->prog;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
nfp_bpf_destroy(struct bpf_prog * prog)247*4882a593Smuzhiyun static void nfp_bpf_destroy(struct bpf_prog *prog)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	kvfree(nfp_prog->prog);
252*4882a593Smuzhiyun 	nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
253*4882a593Smuzhiyun 	nfp_prog_free(nfp_prog);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* Atomic engine requires values to be in big endian, we need to byte swap
257*4882a593Smuzhiyun  * the value words used with xadd.
258*4882a593Smuzhiyun  */
nfp_map_bpf_byte_swap(struct nfp_bpf_map * nfp_map,void * value)259*4882a593Smuzhiyun static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	u32 *word = value;
262*4882a593Smuzhiyun 	unsigned int i;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
265*4882a593Smuzhiyun 		if (nfp_map->use_map[i].type == NFP_MAP_USE_ATOMIC_CNT)
266*4882a593Smuzhiyun 			word[i] = (__force u32)cpu_to_be32(word[i]);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /* Mark value as unsafely initialized in case it becomes atomic later
270*4882a593Smuzhiyun  * and we didn't byte swap something non-byte swap neutral.
271*4882a593Smuzhiyun  */
272*4882a593Smuzhiyun static void
nfp_map_bpf_byte_swap_record(struct nfp_bpf_map * nfp_map,void * value)273*4882a593Smuzhiyun nfp_map_bpf_byte_swap_record(struct nfp_bpf_map *nfp_map, void *value)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	u32 *word = value;
276*4882a593Smuzhiyun 	unsigned int i;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
279*4882a593Smuzhiyun 		if (nfp_map->use_map[i].type == NFP_MAP_UNUSED &&
280*4882a593Smuzhiyun 		    word[i] != (__force u32)cpu_to_be32(word[i]))
281*4882a593Smuzhiyun 			nfp_map->use_map[i].non_zero_update = 1;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun static int
nfp_bpf_map_lookup_entry(struct bpf_offloaded_map * offmap,void * key,void * value)285*4882a593Smuzhiyun nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
286*4882a593Smuzhiyun 			 void *key, void *value)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	int err;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
291*4882a593Smuzhiyun 	if (err)
292*4882a593Smuzhiyun 		return err;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
295*4882a593Smuzhiyun 	return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun static int
nfp_bpf_map_update_entry(struct bpf_offloaded_map * offmap,void * key,void * value,u64 flags)299*4882a593Smuzhiyun nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
300*4882a593Smuzhiyun 			 void *key, void *value, u64 flags)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	nfp_map_bpf_byte_swap(offmap->dev_priv, value);
303*4882a593Smuzhiyun 	nfp_map_bpf_byte_swap_record(offmap->dev_priv, value);
304*4882a593Smuzhiyun 	return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun static int
nfp_bpf_map_get_next_key(struct bpf_offloaded_map * offmap,void * key,void * next_key)308*4882a593Smuzhiyun nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
309*4882a593Smuzhiyun 			 void *key, void *next_key)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	if (!key)
312*4882a593Smuzhiyun 		return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
313*4882a593Smuzhiyun 	return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun static int
nfp_bpf_map_delete_elem(struct bpf_offloaded_map * offmap,void * key)317*4882a593Smuzhiyun nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
320*4882a593Smuzhiyun 		return -EINVAL;
321*4882a593Smuzhiyun 	return nfp_bpf_ctrl_del_entry(offmap, key);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
325*4882a593Smuzhiyun 	.map_get_next_key	= nfp_bpf_map_get_next_key,
326*4882a593Smuzhiyun 	.map_lookup_elem	= nfp_bpf_map_lookup_entry,
327*4882a593Smuzhiyun 	.map_update_elem	= nfp_bpf_map_update_entry,
328*4882a593Smuzhiyun 	.map_delete_elem	= nfp_bpf_map_delete_elem,
329*4882a593Smuzhiyun };
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun static int
nfp_bpf_map_alloc(struct nfp_app_bpf * bpf,struct bpf_offloaded_map * offmap)332*4882a593Smuzhiyun nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct nfp_bpf_map *nfp_map;
335*4882a593Smuzhiyun 	unsigned int use_map_size;
336*4882a593Smuzhiyun 	long long int res;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (!bpf->maps.types)
339*4882a593Smuzhiyun 		return -EOPNOTSUPP;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (offmap->map.map_flags ||
342*4882a593Smuzhiyun 	    offmap->map.numa_node != NUMA_NO_NODE) {
343*4882a593Smuzhiyun 		pr_info("map flags are not supported\n");
344*4882a593Smuzhiyun 		return -EINVAL;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
348*4882a593Smuzhiyun 		pr_info("map type not supported\n");
349*4882a593Smuzhiyun 		return -EOPNOTSUPP;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 	if (bpf->maps.max_maps == bpf->maps_in_use) {
352*4882a593Smuzhiyun 		pr_info("too many maps for a device\n");
353*4882a593Smuzhiyun 		return -ENOMEM;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 	if (bpf->maps.max_elems - bpf->map_elems_in_use <
356*4882a593Smuzhiyun 	    offmap->map.max_entries) {
357*4882a593Smuzhiyun 		pr_info("map with too many elements: %u, left: %u\n",
358*4882a593Smuzhiyun 			offmap->map.max_entries,
359*4882a593Smuzhiyun 			bpf->maps.max_elems - bpf->map_elems_in_use);
360*4882a593Smuzhiyun 		return -ENOMEM;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (round_up(offmap->map.key_size, 8) +
364*4882a593Smuzhiyun 	    round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
365*4882a593Smuzhiyun 		pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
366*4882a593Smuzhiyun 			round_up(offmap->map.key_size, 8) +
367*4882a593Smuzhiyun 			round_up(offmap->map.value_size, 8),
368*4882a593Smuzhiyun 			bpf->maps.max_elem_sz);
369*4882a593Smuzhiyun 		return -ENOMEM;
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 	if (offmap->map.key_size > bpf->maps.max_key_sz) {
372*4882a593Smuzhiyun 		pr_info("map key size %u, FW max is %u\n",
373*4882a593Smuzhiyun 			offmap->map.key_size, bpf->maps.max_key_sz);
374*4882a593Smuzhiyun 		return -ENOMEM;
375*4882a593Smuzhiyun 	}
376*4882a593Smuzhiyun 	if (offmap->map.value_size > bpf->maps.max_val_sz) {
377*4882a593Smuzhiyun 		pr_info("map value size %u, FW max is %u\n",
378*4882a593Smuzhiyun 			offmap->map.value_size, bpf->maps.max_val_sz);
379*4882a593Smuzhiyun 		return -ENOMEM;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
383*4882a593Smuzhiyun 		       sizeof_field(struct nfp_bpf_map, use_map[0]);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
386*4882a593Smuzhiyun 	if (!nfp_map)
387*4882a593Smuzhiyun 		return -ENOMEM;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	offmap->dev_priv = nfp_map;
390*4882a593Smuzhiyun 	nfp_map->offmap = offmap;
391*4882a593Smuzhiyun 	nfp_map->bpf = bpf;
392*4882a593Smuzhiyun 	spin_lock_init(&nfp_map->cache_lock);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
395*4882a593Smuzhiyun 	if (res < 0) {
396*4882a593Smuzhiyun 		kfree(nfp_map);
397*4882a593Smuzhiyun 		return res;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	nfp_map->tid = res;
401*4882a593Smuzhiyun 	offmap->dev_ops = &nfp_bpf_map_ops;
402*4882a593Smuzhiyun 	bpf->maps_in_use++;
403*4882a593Smuzhiyun 	bpf->map_elems_in_use += offmap->map.max_entries;
404*4882a593Smuzhiyun 	list_add_tail(&nfp_map->l, &bpf->map_list);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return 0;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun static int
nfp_bpf_map_free(struct nfp_app_bpf * bpf,struct bpf_offloaded_map * offmap)410*4882a593Smuzhiyun nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct nfp_bpf_map *nfp_map = offmap->dev_priv;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	nfp_bpf_ctrl_free_map(bpf, nfp_map);
415*4882a593Smuzhiyun 	dev_consume_skb_any(nfp_map->cache);
416*4882a593Smuzhiyun 	WARN_ON_ONCE(nfp_map->cache_blockers);
417*4882a593Smuzhiyun 	list_del_init(&nfp_map->l);
418*4882a593Smuzhiyun 	bpf->map_elems_in_use -= offmap->map.max_entries;
419*4882a593Smuzhiyun 	bpf->maps_in_use--;
420*4882a593Smuzhiyun 	kfree(nfp_map);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
nfp_ndo_bpf(struct nfp_app * app,struct nfp_net * nn,struct netdev_bpf * bpf)425*4882a593Smuzhiyun int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	switch (bpf->command) {
428*4882a593Smuzhiyun 	case BPF_OFFLOAD_MAP_ALLOC:
429*4882a593Smuzhiyun 		return nfp_bpf_map_alloc(app->priv, bpf->offmap);
430*4882a593Smuzhiyun 	case BPF_OFFLOAD_MAP_FREE:
431*4882a593Smuzhiyun 		return nfp_bpf_map_free(app->priv, bpf->offmap);
432*4882a593Smuzhiyun 	default:
433*4882a593Smuzhiyun 		return -EINVAL;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun static unsigned long
nfp_bpf_perf_event_copy(void * dst,const void * src,unsigned long off,unsigned long len)438*4882a593Smuzhiyun nfp_bpf_perf_event_copy(void *dst, const void *src,
439*4882a593Smuzhiyun 			unsigned long off, unsigned long len)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	memcpy(dst, src + off, len);
442*4882a593Smuzhiyun 	return 0;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
nfp_bpf_event_output(struct nfp_app_bpf * bpf,const void * data,unsigned int len)445*4882a593Smuzhiyun int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
446*4882a593Smuzhiyun 			 unsigned int len)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct cmsg_bpf_event *cbe = (void *)data;
449*4882a593Smuzhiyun 	struct nfp_bpf_neutral_map *record;
450*4882a593Smuzhiyun 	u32 pkt_size, data_size, map_id;
451*4882a593Smuzhiyun 	u64 map_id_full;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (len < sizeof(struct cmsg_bpf_event))
454*4882a593Smuzhiyun 		return -EINVAL;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	pkt_size = be32_to_cpu(cbe->pkt_size);
457*4882a593Smuzhiyun 	data_size = be32_to_cpu(cbe->data_size);
458*4882a593Smuzhiyun 	map_id_full = be64_to_cpu(cbe->map_ptr);
459*4882a593Smuzhiyun 	map_id = map_id_full;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
462*4882a593Smuzhiyun 		return -EINVAL;
463*4882a593Smuzhiyun 	if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
464*4882a593Smuzhiyun 		return -EINVAL;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	rcu_read_lock();
467*4882a593Smuzhiyun 	record = rhashtable_lookup(&bpf->maps_neutral, &map_id,
468*4882a593Smuzhiyun 				   nfp_bpf_maps_neutral_params);
469*4882a593Smuzhiyun 	if (!record || map_id_full > U32_MAX) {
470*4882a593Smuzhiyun 		rcu_read_unlock();
471*4882a593Smuzhiyun 		cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
472*4882a593Smuzhiyun 			  map_id_full, map_id_full);
473*4882a593Smuzhiyun 		return -EINVAL;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
477*4882a593Smuzhiyun 			 &cbe->data[round_up(pkt_size, 4)], data_size,
478*4882a593Smuzhiyun 			 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
479*4882a593Smuzhiyun 	rcu_read_unlock();
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return 0;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
nfp_bpf_offload_check_mtu(struct nfp_net * nn,struct bpf_prog * prog,unsigned int mtu)484*4882a593Smuzhiyun bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
485*4882a593Smuzhiyun 			       unsigned int mtu)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	unsigned int fw_mtu, pkt_off;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
490*4882a593Smuzhiyun 	pkt_off = min(prog->aux->max_pkt_offset, mtu);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	return fw_mtu < pkt_off;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun static int
nfp_net_bpf_load(struct nfp_net * nn,struct bpf_prog * prog,struct netlink_ext_ack * extack)496*4882a593Smuzhiyun nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
497*4882a593Smuzhiyun 		 struct netlink_ext_ack *extack)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
500*4882a593Smuzhiyun 	unsigned int max_stack, max_prog_len;
501*4882a593Smuzhiyun 	dma_addr_t dma_addr;
502*4882a593Smuzhiyun 	void *img;
503*4882a593Smuzhiyun 	int err;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
506*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
507*4882a593Smuzhiyun 		return -EOPNOTSUPP;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	max_stack = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
511*4882a593Smuzhiyun 	if (nfp_prog->stack_size > max_stack) {
512*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "stack too large");
513*4882a593Smuzhiyun 		return -EOPNOTSUPP;
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	max_prog_len = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
517*4882a593Smuzhiyun 	if (nfp_prog->prog_len > max_prog_len) {
518*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "program too long");
519*4882a593Smuzhiyun 		return -EOPNOTSUPP;
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
523*4882a593Smuzhiyun 	if (IS_ERR(img))
524*4882a593Smuzhiyun 		return PTR_ERR(img);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	dma_addr = dma_map_single(nn->dp.dev, img,
527*4882a593Smuzhiyun 				  nfp_prog->prog_len * sizeof(u64),
528*4882a593Smuzhiyun 				  DMA_TO_DEVICE);
529*4882a593Smuzhiyun 	if (dma_mapping_error(nn->dp.dev, dma_addr)) {
530*4882a593Smuzhiyun 		kfree(img);
531*4882a593Smuzhiyun 		return -ENOMEM;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
535*4882a593Smuzhiyun 	nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	/* Load up the JITed code */
538*4882a593Smuzhiyun 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
539*4882a593Smuzhiyun 	if (err)
540*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack,
541*4882a593Smuzhiyun 				   "FW command error while loading BPF");
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
544*4882a593Smuzhiyun 			 DMA_TO_DEVICE);
545*4882a593Smuzhiyun 	kfree(img);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return err;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun static void
nfp_net_bpf_start(struct nfp_net * nn,struct netlink_ext_ack * extack)551*4882a593Smuzhiyun nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	int err;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/* Enable passing packets through BPF function */
556*4882a593Smuzhiyun 	nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
557*4882a593Smuzhiyun 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
558*4882a593Smuzhiyun 	err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
559*4882a593Smuzhiyun 	if (err)
560*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack,
561*4882a593Smuzhiyun 				   "FW command error while enabling BPF");
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
nfp_net_bpf_stop(struct nfp_net * nn)564*4882a593Smuzhiyun static int nfp_net_bpf_stop(struct nfp_net *nn)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun 	if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
567*4882a593Smuzhiyun 		return 0;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
570*4882a593Smuzhiyun 	nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
nfp_net_bpf_offload(struct nfp_net * nn,struct bpf_prog * prog,bool old_prog,struct netlink_ext_ack * extack)575*4882a593Smuzhiyun int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
576*4882a593Smuzhiyun 			bool old_prog, struct netlink_ext_ack *extack)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun 	int err;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
581*4882a593Smuzhiyun 		return -EINVAL;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (prog && old_prog) {
584*4882a593Smuzhiyun 		u8 cap;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
587*4882a593Smuzhiyun 		if (!(cap & NFP_NET_BPF_CAP_RELO)) {
588*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack,
589*4882a593Smuzhiyun 					   "FW does not support live reload");
590*4882a593Smuzhiyun 			return -EBUSY;
591*4882a593Smuzhiyun 		}
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* Something else is loaded, different program type? */
595*4882a593Smuzhiyun 	if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
596*4882a593Smuzhiyun 		return -EBUSY;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	if (old_prog && !prog)
599*4882a593Smuzhiyun 		return nfp_net_bpf_stop(nn);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	err = nfp_net_bpf_load(nn, prog, extack);
602*4882a593Smuzhiyun 	if (err)
603*4882a593Smuzhiyun 		return err;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (!old_prog)
606*4882a593Smuzhiyun 		nfp_net_bpf_start(nn, extack);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	return 0;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun const struct bpf_prog_offload_ops nfp_bpf_dev_ops = {
612*4882a593Smuzhiyun 	.insn_hook	= nfp_verify_insn,
613*4882a593Smuzhiyun 	.finalize	= nfp_bpf_finalize,
614*4882a593Smuzhiyun 	.replace_insn	= nfp_bpf_opt_replace_insn,
615*4882a593Smuzhiyun 	.remove_insns	= nfp_bpf_opt_remove_insns,
616*4882a593Smuzhiyun 	.prepare	= nfp_bpf_verifier_prep,
617*4882a593Smuzhiyun 	.translate	= nfp_bpf_translate,
618*4882a593Smuzhiyun 	.destroy	= nfp_bpf_destroy,
619*4882a593Smuzhiyun };
620