xref: /OK3568_Linux_fs/kernel/net/xdp/xsk_queue.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* XDP user-space ring structure
3*4882a593Smuzhiyun  * Copyright(c) 2018 Intel Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/log2.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <linux/overflow.h>
9*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "xsk_queue.h"
12*4882a593Smuzhiyun 
xskq_get_ring_size(struct xsk_queue * q,bool umem_queue)13*4882a593Smuzhiyun static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	struct xdp_umem_ring *umem_ring;
16*4882a593Smuzhiyun 	struct xdp_rxtx_ring *rxtx_ring;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	if (umem_queue)
19*4882a593Smuzhiyun 		return struct_size(umem_ring, desc, q->nentries);
20*4882a593Smuzhiyun 	return struct_size(rxtx_ring, desc, q->nentries);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
xskq_create(u32 nentries,bool umem_queue)23*4882a593Smuzhiyun struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	struct xsk_queue *q;
26*4882a593Smuzhiyun 	gfp_t gfp_flags;
27*4882a593Smuzhiyun 	size_t size;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	q = kzalloc(sizeof(*q), GFP_KERNEL);
30*4882a593Smuzhiyun 	if (!q)
31*4882a593Smuzhiyun 		return NULL;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	q->nentries = nentries;
34*4882a593Smuzhiyun 	q->ring_mask = nentries - 1;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
37*4882a593Smuzhiyun 		    __GFP_COMP  | __GFP_NORETRY;
38*4882a593Smuzhiyun 	size = xskq_get_ring_size(q, umem_queue);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
41*4882a593Smuzhiyun 						      get_order(size));
42*4882a593Smuzhiyun 	if (!q->ring) {
43*4882a593Smuzhiyun 		kfree(q);
44*4882a593Smuzhiyun 		return NULL;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	return q;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
xskq_destroy(struct xsk_queue * q)50*4882a593Smuzhiyun void xskq_destroy(struct xsk_queue *q)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	if (!q)
53*4882a593Smuzhiyun 		return;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	page_frag_free(q->ring);
56*4882a593Smuzhiyun 	kfree(q);
57*4882a593Smuzhiyun }
58