xref: /OK3568_Linux_fs/kernel/net/core/xdp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* net/core/xdp.c
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/bpf.h>
7*4882a593Smuzhiyun #include <linux/filter.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/idr.h>
13*4882a593Smuzhiyun #include <linux/rhashtable.h>
14*4882a593Smuzhiyun #include <linux/bug.h>
15*4882a593Smuzhiyun #include <net/page_pool.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <net/xdp.h>
18*4882a593Smuzhiyun #include <net/xdp_priv.h> /* struct xdp_mem_allocator */
19*4882a593Smuzhiyun #include <trace/events/xdp.h>
20*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define REG_STATE_NEW		0x0
23*4882a593Smuzhiyun #define REG_STATE_REGISTERED	0x1
24*4882a593Smuzhiyun #define REG_STATE_UNREGISTERED	0x2
25*4882a593Smuzhiyun #define REG_STATE_UNUSED	0x3
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static DEFINE_IDA(mem_id_pool);
28*4882a593Smuzhiyun static DEFINE_MUTEX(mem_id_lock);
29*4882a593Smuzhiyun #define MEM_ID_MAX 0xFFFE
30*4882a593Smuzhiyun #define MEM_ID_MIN 1
31*4882a593Smuzhiyun static int mem_id_next = MEM_ID_MIN;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static bool mem_id_init; /* false */
34*4882a593Smuzhiyun static struct rhashtable *mem_id_ht;
35*4882a593Smuzhiyun 
xdp_mem_id_hashfn(const void * data,u32 len,u32 seed)36*4882a593Smuzhiyun static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	const u32 *k = data;
39*4882a593Smuzhiyun 	const u32 key = *k;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)
42*4882a593Smuzhiyun 		     != sizeof(u32));
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/* Use cyclic increasing ID as direct hash key */
45*4882a593Smuzhiyun 	return key;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
xdp_mem_id_cmp(struct rhashtable_compare_arg * arg,const void * ptr)48*4882a593Smuzhiyun static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
49*4882a593Smuzhiyun 			  const void *ptr)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	const struct xdp_mem_allocator *xa = ptr;
52*4882a593Smuzhiyun 	u32 mem_id = *(u32 *)arg->key;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return xa->mem.id != mem_id;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun static const struct rhashtable_params mem_id_rht_params = {
58*4882a593Smuzhiyun 	.nelem_hint = 64,
59*4882a593Smuzhiyun 	.head_offset = offsetof(struct xdp_mem_allocator, node),
60*4882a593Smuzhiyun 	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
61*4882a593Smuzhiyun 	.key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
62*4882a593Smuzhiyun 	.max_size = MEM_ID_MAX,
63*4882a593Smuzhiyun 	.min_size = 8,
64*4882a593Smuzhiyun 	.automatic_shrinking = true,
65*4882a593Smuzhiyun 	.hashfn    = xdp_mem_id_hashfn,
66*4882a593Smuzhiyun 	.obj_cmpfn = xdp_mem_id_cmp,
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
__xdp_mem_allocator_rcu_free(struct rcu_head * rcu)69*4882a593Smuzhiyun static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct xdp_mem_allocator *xa;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	xa = container_of(rcu, struct xdp_mem_allocator, rcu);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Allow this ID to be reused */
76*4882a593Smuzhiyun 	ida_simple_remove(&mem_id_pool, xa->mem.id);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	kfree(xa);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
mem_xa_remove(struct xdp_mem_allocator * xa)81*4882a593Smuzhiyun static void mem_xa_remove(struct xdp_mem_allocator *xa)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	trace_mem_disconnect(xa);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86*4882a593Smuzhiyun 		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
mem_allocator_disconnect(void * allocator)89*4882a593Smuzhiyun static void mem_allocator_disconnect(void *allocator)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct xdp_mem_allocator *xa;
92*4882a593Smuzhiyun 	struct rhashtable_iter iter;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	mutex_lock(&mem_id_lock);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	rhashtable_walk_enter(mem_id_ht, &iter);
97*4882a593Smuzhiyun 	do {
98*4882a593Smuzhiyun 		rhashtable_walk_start(&iter);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
101*4882a593Smuzhiyun 			if (xa->allocator == allocator)
102*4882a593Smuzhiyun 				mem_xa_remove(xa);
103*4882a593Smuzhiyun 		}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		rhashtable_walk_stop(&iter);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	} while (xa == ERR_PTR(-EAGAIN));
108*4882a593Smuzhiyun 	rhashtable_walk_exit(&iter);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	mutex_unlock(&mem_id_lock);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info * xdp_rxq)113*4882a593Smuzhiyun void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct xdp_mem_allocator *xa;
116*4882a593Smuzhiyun 	int id = xdp_rxq->mem.id;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
119*4882a593Smuzhiyun 		WARN(1, "Missing register, driver bug");
120*4882a593Smuzhiyun 		return;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (id == 0)
124*4882a593Smuzhiyun 		return;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
127*4882a593Smuzhiyun 		rcu_read_lock();
128*4882a593Smuzhiyun 		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
129*4882a593Smuzhiyun 		page_pool_destroy(xa->page_pool);
130*4882a593Smuzhiyun 		rcu_read_unlock();
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
134*4882a593Smuzhiyun 
xdp_rxq_info_unreg(struct xdp_rxq_info * xdp_rxq)135*4882a593Smuzhiyun void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	/* Simplify driver cleanup code paths, allow unreg "unused" */
138*4882a593Smuzhiyun 	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
139*4882a593Smuzhiyun 		return;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	xdp_rxq_info_unreg_mem_model(xdp_rxq);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
146*4882a593Smuzhiyun 	xdp_rxq->dev = NULL;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/* Reset mem info to defaults */
149*4882a593Smuzhiyun 	xdp_rxq->mem.id = 0;
150*4882a593Smuzhiyun 	xdp_rxq->mem.type = 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);
153*4882a593Smuzhiyun 
xdp_rxq_info_init(struct xdp_rxq_info * xdp_rxq)154*4882a593Smuzhiyun static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /* Returns 0 on success, negative on failure */
xdp_rxq_info_reg(struct xdp_rxq_info * xdp_rxq,struct net_device * dev,u32 queue_index)160*4882a593Smuzhiyun int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
161*4882a593Smuzhiyun 		     struct net_device *dev, u32 queue_index)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
164*4882a593Smuzhiyun 		WARN(1, "Driver promised not to register this");
165*4882a593Smuzhiyun 		return -EINVAL;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
169*4882a593Smuzhiyun 		WARN(1, "Missing unregister, handled but fix driver");
170*4882a593Smuzhiyun 		xdp_rxq_info_unreg(xdp_rxq);
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (!dev) {
174*4882a593Smuzhiyun 		WARN(1, "Missing net_device from driver");
175*4882a593Smuzhiyun 		return -ENODEV;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* State either UNREGISTERED or NEW */
179*4882a593Smuzhiyun 	xdp_rxq_info_init(xdp_rxq);
180*4882a593Smuzhiyun 	xdp_rxq->dev = dev;
181*4882a593Smuzhiyun 	xdp_rxq->queue_index = queue_index;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	xdp_rxq->reg_state = REG_STATE_REGISTERED;
184*4882a593Smuzhiyun 	return 0;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);
187*4882a593Smuzhiyun 
xdp_rxq_info_unused(struct xdp_rxq_info * xdp_rxq)188*4882a593Smuzhiyun void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	xdp_rxq->reg_state = REG_STATE_UNUSED;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
193*4882a593Smuzhiyun 
xdp_rxq_info_is_reg(struct xdp_rxq_info * xdp_rxq)194*4882a593Smuzhiyun bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
199*4882a593Smuzhiyun 
__mem_id_init_hash_table(void)200*4882a593Smuzhiyun static int __mem_id_init_hash_table(void)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct rhashtable *rht;
203*4882a593Smuzhiyun 	int ret;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (unlikely(mem_id_init))
206*4882a593Smuzhiyun 		return 0;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
209*4882a593Smuzhiyun 	if (!rht)
210*4882a593Smuzhiyun 		return -ENOMEM;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	ret = rhashtable_init(rht, &mem_id_rht_params);
213*4882a593Smuzhiyun 	if (ret < 0) {
214*4882a593Smuzhiyun 		kfree(rht);
215*4882a593Smuzhiyun 		return ret;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 	mem_id_ht = rht;
218*4882a593Smuzhiyun 	smp_mb(); /* mutex lock should provide enough pairing */
219*4882a593Smuzhiyun 	mem_id_init = true;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /* Allocate a cyclic ID that maps to allocator pointer.
225*4882a593Smuzhiyun  * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  * Caller must lock mem_id_lock.
228*4882a593Smuzhiyun  */
__mem_id_cyclic_get(gfp_t gfp)229*4882a593Smuzhiyun static int __mem_id_cyclic_get(gfp_t gfp)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	int retries = 1;
232*4882a593Smuzhiyun 	int id;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun again:
235*4882a593Smuzhiyun 	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
236*4882a593Smuzhiyun 	if (id < 0) {
237*4882a593Smuzhiyun 		if (id == -ENOSPC) {
238*4882a593Smuzhiyun 			/* Cyclic allocator, reset next id */
239*4882a593Smuzhiyun 			if (retries--) {
240*4882a593Smuzhiyun 				mem_id_next = MEM_ID_MIN;
241*4882a593Smuzhiyun 				goto again;
242*4882a593Smuzhiyun 			}
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 		return id; /* errno */
245*4882a593Smuzhiyun 	}
246*4882a593Smuzhiyun 	mem_id_next = id + 1;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return id;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
__is_supported_mem_type(enum xdp_mem_type type)251*4882a593Smuzhiyun static bool __is_supported_mem_type(enum xdp_mem_type type)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	if (type == MEM_TYPE_PAGE_POOL)
254*4882a593Smuzhiyun 		return is_page_pool_compiled_in();
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (type >= MEM_TYPE_MAX)
257*4882a593Smuzhiyun 		return false;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	return true;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
xdp_rxq_info_reg_mem_model(struct xdp_rxq_info * xdp_rxq,enum xdp_mem_type type,void * allocator)262*4882a593Smuzhiyun int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
263*4882a593Smuzhiyun 			       enum xdp_mem_type type, void *allocator)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	struct xdp_mem_allocator *xdp_alloc;
266*4882a593Smuzhiyun 	gfp_t gfp = GFP_KERNEL;
267*4882a593Smuzhiyun 	int id, errno, ret;
268*4882a593Smuzhiyun 	void *ptr;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
271*4882a593Smuzhiyun 		WARN(1, "Missing register, driver bug");
272*4882a593Smuzhiyun 		return -EFAULT;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!__is_supported_mem_type(type))
276*4882a593Smuzhiyun 		return -EOPNOTSUPP;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	xdp_rxq->mem.type = type;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (!allocator) {
281*4882a593Smuzhiyun 		if (type == MEM_TYPE_PAGE_POOL)
282*4882a593Smuzhiyun 			return -EINVAL; /* Setup time check page_pool req */
283*4882a593Smuzhiyun 		return 0;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* Delay init of rhashtable to save memory if feature isn't used */
287*4882a593Smuzhiyun 	if (!mem_id_init) {
288*4882a593Smuzhiyun 		mutex_lock(&mem_id_lock);
289*4882a593Smuzhiyun 		ret = __mem_id_init_hash_table();
290*4882a593Smuzhiyun 		mutex_unlock(&mem_id_lock);
291*4882a593Smuzhiyun 		if (ret < 0) {
292*4882a593Smuzhiyun 			WARN_ON(1);
293*4882a593Smuzhiyun 			return ret;
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
298*4882a593Smuzhiyun 	if (!xdp_alloc)
299*4882a593Smuzhiyun 		return -ENOMEM;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	mutex_lock(&mem_id_lock);
302*4882a593Smuzhiyun 	id = __mem_id_cyclic_get(gfp);
303*4882a593Smuzhiyun 	if (id < 0) {
304*4882a593Smuzhiyun 		errno = id;
305*4882a593Smuzhiyun 		goto err;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	xdp_rxq->mem.id = id;
308*4882a593Smuzhiyun 	xdp_alloc->mem  = xdp_rxq->mem;
309*4882a593Smuzhiyun 	xdp_alloc->allocator = allocator;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* Insert allocator into ID lookup table */
312*4882a593Smuzhiyun 	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
313*4882a593Smuzhiyun 	if (IS_ERR(ptr)) {
314*4882a593Smuzhiyun 		ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
315*4882a593Smuzhiyun 		xdp_rxq->mem.id = 0;
316*4882a593Smuzhiyun 		errno = PTR_ERR(ptr);
317*4882a593Smuzhiyun 		goto err;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (type == MEM_TYPE_PAGE_POOL)
321*4882a593Smuzhiyun 		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	mutex_unlock(&mem_id_lock);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	trace_mem_connect(xdp_alloc, xdp_rxq);
326*4882a593Smuzhiyun 	return 0;
327*4882a593Smuzhiyun err:
328*4882a593Smuzhiyun 	mutex_unlock(&mem_id_lock);
329*4882a593Smuzhiyun 	kfree(xdp_alloc);
330*4882a593Smuzhiyun 	return errno;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /* XDP RX runs under NAPI protection, and in different delivery error
335*4882a593Smuzhiyun  * scenarios (e.g. queue full), it is possible to return the xdp_frame
336*4882a593Smuzhiyun  * while still leveraging this protection.  The @napi_direct boolean
337*4882a593Smuzhiyun  * is used for those calls sites.  Thus, allowing for faster recycling
338*4882a593Smuzhiyun  * of xdp_frames/pages in those cases.
339*4882a593Smuzhiyun  */
__xdp_return(void * data,struct xdp_mem_info * mem,bool napi_direct,struct xdp_buff * xdp)340*4882a593Smuzhiyun static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
341*4882a593Smuzhiyun 			 struct xdp_buff *xdp)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	struct xdp_mem_allocator *xa;
344*4882a593Smuzhiyun 	struct page *page;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	switch (mem->type) {
347*4882a593Smuzhiyun 	case MEM_TYPE_PAGE_POOL:
348*4882a593Smuzhiyun 		rcu_read_lock();
349*4882a593Smuzhiyun 		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
350*4882a593Smuzhiyun 		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
351*4882a593Smuzhiyun 		page = virt_to_head_page(data);
352*4882a593Smuzhiyun 		if (napi_direct && xdp_return_frame_no_direct())
353*4882a593Smuzhiyun 			napi_direct = false;
354*4882a593Smuzhiyun 		page_pool_put_full_page(xa->page_pool, page, napi_direct);
355*4882a593Smuzhiyun 		rcu_read_unlock();
356*4882a593Smuzhiyun 		break;
357*4882a593Smuzhiyun 	case MEM_TYPE_PAGE_SHARED:
358*4882a593Smuzhiyun 		page_frag_free(data);
359*4882a593Smuzhiyun 		break;
360*4882a593Smuzhiyun 	case MEM_TYPE_PAGE_ORDER0:
361*4882a593Smuzhiyun 		page = virt_to_page(data); /* Assumes order0 page*/
362*4882a593Smuzhiyun 		put_page(page);
363*4882a593Smuzhiyun 		break;
364*4882a593Smuzhiyun 	case MEM_TYPE_XSK_BUFF_POOL:
365*4882a593Smuzhiyun 		/* NB! Only valid from an xdp_buff! */
366*4882a593Smuzhiyun 		xsk_buff_free(xdp);
367*4882a593Smuzhiyun 		break;
368*4882a593Smuzhiyun 	default:
369*4882a593Smuzhiyun 		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
370*4882a593Smuzhiyun 		WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
371*4882a593Smuzhiyun 		break;
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
xdp_return_frame(struct xdp_frame * xdpf)375*4882a593Smuzhiyun void xdp_return_frame(struct xdp_frame *xdpf)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	__xdp_return(xdpf->data, &xdpf->mem, false, NULL);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_return_frame);
380*4882a593Smuzhiyun 
xdp_return_frame_rx_napi(struct xdp_frame * xdpf)381*4882a593Smuzhiyun void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	__xdp_return(xdpf->data, &xdpf->mem, true, NULL);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
386*4882a593Smuzhiyun 
xdp_return_buff(struct xdp_buff * xdp)387*4882a593Smuzhiyun void xdp_return_buff(struct xdp_buff *xdp)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
__xdp_release_frame(void * data,struct xdp_mem_info * mem)393*4882a593Smuzhiyun void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	struct xdp_mem_allocator *xa;
396*4882a593Smuzhiyun 	struct page *page;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	rcu_read_lock();
399*4882a593Smuzhiyun 	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
400*4882a593Smuzhiyun 	page = virt_to_head_page(data);
401*4882a593Smuzhiyun 	if (xa)
402*4882a593Smuzhiyun 		page_pool_release_page(xa->page_pool, page);
403*4882a593Smuzhiyun 	rcu_read_unlock();
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xdp_release_frame);
406*4882a593Smuzhiyun 
xdp_attachment_setup(struct xdp_attachment_info * info,struct netdev_bpf * bpf)407*4882a593Smuzhiyun void xdp_attachment_setup(struct xdp_attachment_info *info,
408*4882a593Smuzhiyun 			  struct netdev_bpf *bpf)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	if (info->prog)
411*4882a593Smuzhiyun 		bpf_prog_put(info->prog);
412*4882a593Smuzhiyun 	info->prog = bpf->prog;
413*4882a593Smuzhiyun 	info->flags = bpf->flags;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_attachment_setup);
416*4882a593Smuzhiyun 
xdp_convert_zc_to_xdp_frame(struct xdp_buff * xdp)417*4882a593Smuzhiyun struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	unsigned int metasize, totsize;
420*4882a593Smuzhiyun 	void *addr, *data_to_copy;
421*4882a593Smuzhiyun 	struct xdp_frame *xdpf;
422*4882a593Smuzhiyun 	struct page *page;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
425*4882a593Smuzhiyun 	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
426*4882a593Smuzhiyun 		   xdp->data - xdp->data_meta;
427*4882a593Smuzhiyun 	totsize = xdp->data_end - xdp->data + metasize;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
430*4882a593Smuzhiyun 		return NULL;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	page = dev_alloc_page();
433*4882a593Smuzhiyun 	if (!page)
434*4882a593Smuzhiyun 		return NULL;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	addr = page_to_virt(page);
437*4882a593Smuzhiyun 	xdpf = addr;
438*4882a593Smuzhiyun 	memset(xdpf, 0, sizeof(*xdpf));
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	addr += sizeof(*xdpf);
441*4882a593Smuzhiyun 	data_to_copy = metasize ? xdp->data_meta : xdp->data;
442*4882a593Smuzhiyun 	memcpy(addr, data_to_copy, totsize);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	xdpf->data = addr + metasize;
445*4882a593Smuzhiyun 	xdpf->len = totsize - metasize;
446*4882a593Smuzhiyun 	xdpf->headroom = 0;
447*4882a593Smuzhiyun 	xdpf->metasize = metasize;
448*4882a593Smuzhiyun 	xdpf->frame_sz = PAGE_SIZE;
449*4882a593Smuzhiyun 	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	xsk_buff_free(xdp);
452*4882a593Smuzhiyun 	return xdpf;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun /* Used by XDP_WARN macro, to avoid inlining WARN() in fast-path */
xdp_warn(const char * msg,const char * func,const int line)457*4882a593Smuzhiyun void xdp_warn(const char *msg, const char *func, const int line)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	WARN(1, "XDP_WARN: %s(line:%d): %s\n", func, line, msg);
460*4882a593Smuzhiyun };
461*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(xdp_warn);
462