xref: /OK3568_Linux_fs/kernel/net/xdp/xsk_buff_pool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include <net/xsk_buff_pool.h>
4*4882a593Smuzhiyun #include <net/xdp_sock.h>
5*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "xsk_queue.h"
8*4882a593Smuzhiyun #include "xdp_umem.h"
9*4882a593Smuzhiyun #include "xsk.h"
10*4882a593Smuzhiyun 
xp_add_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)11*4882a593Smuzhiyun void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	unsigned long flags;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun 	if (!xs->tx)
16*4882a593Smuzhiyun 		return;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
19*4882a593Smuzhiyun 	list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
20*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
xp_del_xsk(struct xsk_buff_pool * pool,struct xdp_sock * xs)23*4882a593Smuzhiyun void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	unsigned long flags;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	if (!xs->tx)
28*4882a593Smuzhiyun 		return;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
31*4882a593Smuzhiyun 	list_del_rcu(&xs->tx_list);
32*4882a593Smuzhiyun 	spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
xp_destroy(struct xsk_buff_pool * pool)35*4882a593Smuzhiyun void xp_destroy(struct xsk_buff_pool *pool)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	if (!pool)
38*4882a593Smuzhiyun 		return;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	kvfree(pool->heads);
41*4882a593Smuzhiyun 	kvfree(pool);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
xp_create_and_assign_umem(struct xdp_sock * xs,struct xdp_umem * umem)44*4882a593Smuzhiyun struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
45*4882a593Smuzhiyun 						struct xdp_umem *umem)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct xsk_buff_pool *pool;
48*4882a593Smuzhiyun 	struct xdp_buff_xsk *xskb;
49*4882a593Smuzhiyun 	u32 i;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
52*4882a593Smuzhiyun 			GFP_KERNEL);
53*4882a593Smuzhiyun 	if (!pool)
54*4882a593Smuzhiyun 		goto out;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
57*4882a593Smuzhiyun 	if (!pool->heads)
58*4882a593Smuzhiyun 		goto out;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	pool->chunk_mask = ~((u64)umem->chunk_size - 1);
61*4882a593Smuzhiyun 	pool->addrs_cnt = umem->size;
62*4882a593Smuzhiyun 	pool->heads_cnt = umem->chunks;
63*4882a593Smuzhiyun 	pool->free_heads_cnt = umem->chunks;
64*4882a593Smuzhiyun 	pool->headroom = umem->headroom;
65*4882a593Smuzhiyun 	pool->chunk_size = umem->chunk_size;
66*4882a593Smuzhiyun 	pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
67*4882a593Smuzhiyun 	pool->frame_len = umem->chunk_size - umem->headroom -
68*4882a593Smuzhiyun 		XDP_PACKET_HEADROOM;
69*4882a593Smuzhiyun 	pool->umem = umem;
70*4882a593Smuzhiyun 	pool->addrs = umem->addrs;
71*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->free_list);
72*4882a593Smuzhiyun 	INIT_LIST_HEAD(&pool->xsk_tx_list);
73*4882a593Smuzhiyun 	spin_lock_init(&pool->xsk_tx_list_lock);
74*4882a593Smuzhiyun 	spin_lock_init(&pool->cq_lock);
75*4882a593Smuzhiyun 	refcount_set(&pool->users, 1);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	pool->fq = xs->fq_tmp;
78*4882a593Smuzhiyun 	pool->cq = xs->cq_tmp;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	for (i = 0; i < pool->free_heads_cnt; i++) {
81*4882a593Smuzhiyun 		xskb = &pool->heads[i];
82*4882a593Smuzhiyun 		xskb->pool = pool;
83*4882a593Smuzhiyun 		xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
84*4882a593Smuzhiyun 		pool->free_heads[i] = xskb;
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	return pool;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun out:
90*4882a593Smuzhiyun 	xp_destroy(pool);
91*4882a593Smuzhiyun 	return NULL;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
xp_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)94*4882a593Smuzhiyun void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	u32 i;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	for (i = 0; i < pool->heads_cnt; i++)
99*4882a593Smuzhiyun 		pool->heads[i].xdp.rxq = rxq;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun EXPORT_SYMBOL(xp_set_rxq_info);
102*4882a593Smuzhiyun 
xp_disable_drv_zc(struct xsk_buff_pool * pool)103*4882a593Smuzhiyun static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct netdev_bpf bpf;
106*4882a593Smuzhiyun 	int err;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	ASSERT_RTNL();
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (pool->umem->zc) {
111*4882a593Smuzhiyun 		bpf.command = XDP_SETUP_XSK_POOL;
112*4882a593Smuzhiyun 		bpf.xsk.pool = NULL;
113*4882a593Smuzhiyun 		bpf.xsk.queue_id = pool->queue_id;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 		err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		if (err)
118*4882a593Smuzhiyun 			WARN(1, "Failed to disable zero-copy!\n");
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
__xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * netdev,u16 queue_id,u16 flags)122*4882a593Smuzhiyun static int __xp_assign_dev(struct xsk_buff_pool *pool,
123*4882a593Smuzhiyun 			   struct net_device *netdev, u16 queue_id, u16 flags)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	bool force_zc, force_copy;
126*4882a593Smuzhiyun 	struct netdev_bpf bpf;
127*4882a593Smuzhiyun 	int err = 0;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	ASSERT_RTNL();
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	force_zc = flags & XDP_ZEROCOPY;
132*4882a593Smuzhiyun 	force_copy = flags & XDP_COPY;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (force_zc && force_copy)
135*4882a593Smuzhiyun 		return -EINVAL;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	if (xsk_get_pool_from_qid(netdev, queue_id))
138*4882a593Smuzhiyun 		return -EBUSY;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	pool->netdev = netdev;
141*4882a593Smuzhiyun 	pool->queue_id = queue_id;
142*4882a593Smuzhiyun 	err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
143*4882a593Smuzhiyun 	if (err)
144*4882a593Smuzhiyun 		return err;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (flags & XDP_USE_NEED_WAKEUP) {
147*4882a593Smuzhiyun 		pool->uses_need_wakeup = true;
148*4882a593Smuzhiyun 		/* Tx needs to be explicitly woken up the first time.
149*4882a593Smuzhiyun 		 * Also for supporting drivers that do not implement this
150*4882a593Smuzhiyun 		 * feature. They will always have to call sendto().
151*4882a593Smuzhiyun 		 */
152*4882a593Smuzhiyun 		pool->cached_need_wakeup = XDP_WAKEUP_TX;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	dev_hold(netdev);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (force_copy)
158*4882a593Smuzhiyun 		/* For copy-mode, we are done. */
159*4882a593Smuzhiyun 		return 0;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (!netdev->netdev_ops->ndo_bpf ||
162*4882a593Smuzhiyun 	    !netdev->netdev_ops->ndo_xsk_wakeup) {
163*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
164*4882a593Smuzhiyun 		goto err_unreg_pool;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	bpf.command = XDP_SETUP_XSK_POOL;
168*4882a593Smuzhiyun 	bpf.xsk.pool = pool;
169*4882a593Smuzhiyun 	bpf.xsk.queue_id = queue_id;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
172*4882a593Smuzhiyun 	if (err)
173*4882a593Smuzhiyun 		goto err_unreg_pool;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!pool->dma_pages) {
176*4882a593Smuzhiyun 		WARN(1, "Driver did not DMA map zero-copy buffers");
177*4882a593Smuzhiyun 		err = -EINVAL;
178*4882a593Smuzhiyun 		goto err_unreg_xsk;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 	pool->umem->zc = true;
181*4882a593Smuzhiyun 	return 0;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun err_unreg_xsk:
184*4882a593Smuzhiyun 	xp_disable_drv_zc(pool);
185*4882a593Smuzhiyun err_unreg_pool:
186*4882a593Smuzhiyun 	if (!force_zc)
187*4882a593Smuzhiyun 		err = 0; /* fallback to copy mode */
188*4882a593Smuzhiyun 	if (err) {
189*4882a593Smuzhiyun 		xsk_clear_pool_at_qid(netdev, queue_id);
190*4882a593Smuzhiyun 		dev_put(netdev);
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 	return err;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
xp_assign_dev(struct xsk_buff_pool * pool,struct net_device * dev,u16 queue_id,u16 flags)195*4882a593Smuzhiyun int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
196*4882a593Smuzhiyun 		  u16 queue_id, u16 flags)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	return __xp_assign_dev(pool, dev, queue_id, flags);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
xp_assign_dev_shared(struct xsk_buff_pool * pool,struct xdp_sock * umem_xs,struct net_device * dev,u16 queue_id)201*4882a593Smuzhiyun int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
202*4882a593Smuzhiyun 			 struct net_device *dev, u16 queue_id)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	u16 flags;
205*4882a593Smuzhiyun 	struct xdp_umem *umem = umem_xs->umem;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* One fill and completion ring required for each queue id. */
208*4882a593Smuzhiyun 	if (!pool->fq || !pool->cq)
209*4882a593Smuzhiyun 		return -EINVAL;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
212*4882a593Smuzhiyun 	if (umem_xs->pool->uses_need_wakeup)
213*4882a593Smuzhiyun 		flags |= XDP_USE_NEED_WAKEUP;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return __xp_assign_dev(pool, dev, queue_id, flags);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
xp_clear_dev(struct xsk_buff_pool * pool)218*4882a593Smuzhiyun void xp_clear_dev(struct xsk_buff_pool *pool)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	if (!pool->netdev)
221*4882a593Smuzhiyun 		return;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	xp_disable_drv_zc(pool);
224*4882a593Smuzhiyun 	xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
225*4882a593Smuzhiyun 	dev_put(pool->netdev);
226*4882a593Smuzhiyun 	pool->netdev = NULL;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
xp_release_deferred(struct work_struct * work)229*4882a593Smuzhiyun static void xp_release_deferred(struct work_struct *work)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
232*4882a593Smuzhiyun 						  work);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	rtnl_lock();
235*4882a593Smuzhiyun 	xp_clear_dev(pool);
236*4882a593Smuzhiyun 	rtnl_unlock();
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (pool->fq) {
239*4882a593Smuzhiyun 		xskq_destroy(pool->fq);
240*4882a593Smuzhiyun 		pool->fq = NULL;
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (pool->cq) {
244*4882a593Smuzhiyun 		xskq_destroy(pool->cq);
245*4882a593Smuzhiyun 		pool->cq = NULL;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	xdp_put_umem(pool->umem, false);
249*4882a593Smuzhiyun 	xp_destroy(pool);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
xp_get_pool(struct xsk_buff_pool * pool)252*4882a593Smuzhiyun void xp_get_pool(struct xsk_buff_pool *pool)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	refcount_inc(&pool->users);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
xp_put_pool(struct xsk_buff_pool * pool)257*4882a593Smuzhiyun bool xp_put_pool(struct xsk_buff_pool *pool)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	if (!pool)
260*4882a593Smuzhiyun 		return false;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (refcount_dec_and_test(&pool->users)) {
263*4882a593Smuzhiyun 		INIT_WORK(&pool->work, xp_release_deferred);
264*4882a593Smuzhiyun 		schedule_work(&pool->work);
265*4882a593Smuzhiyun 		return true;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return false;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
xp_find_dma_map(struct xsk_buff_pool * pool)271*4882a593Smuzhiyun static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct xsk_dma_map *dma_map;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
276*4882a593Smuzhiyun 		if (dma_map->netdev == pool->netdev)
277*4882a593Smuzhiyun 			return dma_map;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	return NULL;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
xp_create_dma_map(struct device * dev,struct net_device * netdev,u32 nr_pages,struct xdp_umem * umem)283*4882a593Smuzhiyun static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
284*4882a593Smuzhiyun 					     u32 nr_pages, struct xdp_umem *umem)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct xsk_dma_map *dma_map;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
289*4882a593Smuzhiyun 	if (!dma_map)
290*4882a593Smuzhiyun 		return NULL;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
293*4882a593Smuzhiyun 	if (!dma_map->dma_pages) {
294*4882a593Smuzhiyun 		kfree(dma_map);
295*4882a593Smuzhiyun 		return NULL;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	dma_map->netdev = netdev;
299*4882a593Smuzhiyun 	dma_map->dev = dev;
300*4882a593Smuzhiyun 	dma_map->dma_need_sync = false;
301*4882a593Smuzhiyun 	dma_map->dma_pages_cnt = nr_pages;
302*4882a593Smuzhiyun 	refcount_set(&dma_map->users, 1);
303*4882a593Smuzhiyun 	list_add(&dma_map->list, &umem->xsk_dma_list);
304*4882a593Smuzhiyun 	return dma_map;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
xp_destroy_dma_map(struct xsk_dma_map * dma_map)307*4882a593Smuzhiyun static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	list_del(&dma_map->list);
310*4882a593Smuzhiyun 	kvfree(dma_map->dma_pages);
311*4882a593Smuzhiyun 	kfree(dma_map);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
__xp_dma_unmap(struct xsk_dma_map * dma_map,unsigned long attrs)314*4882a593Smuzhiyun static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	dma_addr_t *dma;
317*4882a593Smuzhiyun 	u32 i;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
320*4882a593Smuzhiyun 		dma = &dma_map->dma_pages[i];
321*4882a593Smuzhiyun 		if (*dma) {
322*4882a593Smuzhiyun 			*dma &= ~XSK_NEXT_PG_CONTIG_MASK;
323*4882a593Smuzhiyun 			dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
324*4882a593Smuzhiyun 					     DMA_BIDIRECTIONAL, attrs);
325*4882a593Smuzhiyun 			*dma = 0;
326*4882a593Smuzhiyun 		}
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	xp_destroy_dma_map(dma_map);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
xp_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)332*4882a593Smuzhiyun void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct xsk_dma_map *dma_map;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (pool->dma_pages_cnt == 0)
337*4882a593Smuzhiyun 		return;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	dma_map = xp_find_dma_map(pool);
340*4882a593Smuzhiyun 	if (!dma_map) {
341*4882a593Smuzhiyun 		WARN(1, "Could not find dma_map for device");
342*4882a593Smuzhiyun 		return;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	if (!refcount_dec_and_test(&dma_map->users))
346*4882a593Smuzhiyun 		return;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	__xp_dma_unmap(dma_map, attrs);
349*4882a593Smuzhiyun 	kvfree(pool->dma_pages);
350*4882a593Smuzhiyun 	pool->dma_pages_cnt = 0;
351*4882a593Smuzhiyun 	pool->dev = NULL;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun EXPORT_SYMBOL(xp_dma_unmap);
354*4882a593Smuzhiyun 
xp_check_dma_contiguity(struct xsk_dma_map * dma_map)355*4882a593Smuzhiyun static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	u32 i;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
360*4882a593Smuzhiyun 		if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
361*4882a593Smuzhiyun 			dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
362*4882a593Smuzhiyun 		else
363*4882a593Smuzhiyun 			dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
364*4882a593Smuzhiyun 	}
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
xp_init_dma_info(struct xsk_buff_pool * pool,struct xsk_dma_map * dma_map)367*4882a593Smuzhiyun static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
370*4882a593Smuzhiyun 	if (!pool->dma_pages)
371*4882a593Smuzhiyun 		return -ENOMEM;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	pool->dev = dma_map->dev;
374*4882a593Smuzhiyun 	pool->dma_pages_cnt = dma_map->dma_pages_cnt;
375*4882a593Smuzhiyun 	pool->dma_need_sync = dma_map->dma_need_sync;
376*4882a593Smuzhiyun 	memcpy(pool->dma_pages, dma_map->dma_pages,
377*4882a593Smuzhiyun 	       pool->dma_pages_cnt * sizeof(*pool->dma_pages));
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
xp_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs,struct page ** pages,u32 nr_pages)382*4882a593Smuzhiyun int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
383*4882a593Smuzhiyun 	       unsigned long attrs, struct page **pages, u32 nr_pages)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun 	struct xsk_dma_map *dma_map;
386*4882a593Smuzhiyun 	dma_addr_t dma;
387*4882a593Smuzhiyun 	int err;
388*4882a593Smuzhiyun 	u32 i;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	dma_map = xp_find_dma_map(pool);
391*4882a593Smuzhiyun 	if (dma_map) {
392*4882a593Smuzhiyun 		err = xp_init_dma_info(pool, dma_map);
393*4882a593Smuzhiyun 		if (err)
394*4882a593Smuzhiyun 			return err;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		refcount_inc(&dma_map->users);
397*4882a593Smuzhiyun 		return 0;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
401*4882a593Smuzhiyun 	if (!dma_map)
402*4882a593Smuzhiyun 		return -ENOMEM;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	for (i = 0; i < dma_map->dma_pages_cnt; i++) {
405*4882a593Smuzhiyun 		dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
406*4882a593Smuzhiyun 					 DMA_BIDIRECTIONAL, attrs);
407*4882a593Smuzhiyun 		if (dma_mapping_error(dev, dma)) {
408*4882a593Smuzhiyun 			__xp_dma_unmap(dma_map, attrs);
409*4882a593Smuzhiyun 			return -ENOMEM;
410*4882a593Smuzhiyun 		}
411*4882a593Smuzhiyun 		if (dma_need_sync(dev, dma))
412*4882a593Smuzhiyun 			dma_map->dma_need_sync = true;
413*4882a593Smuzhiyun 		dma_map->dma_pages[i] = dma;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (pool->unaligned)
417*4882a593Smuzhiyun 		xp_check_dma_contiguity(dma_map);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	err = xp_init_dma_info(pool, dma_map);
420*4882a593Smuzhiyun 	if (err) {
421*4882a593Smuzhiyun 		__xp_dma_unmap(dma_map, attrs);
422*4882a593Smuzhiyun 		return err;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return 0;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun EXPORT_SYMBOL(xp_dma_map);
428*4882a593Smuzhiyun 
xp_addr_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr)429*4882a593Smuzhiyun static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
430*4882a593Smuzhiyun 					  u64 addr)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
xp_check_unaligned(struct xsk_buff_pool * pool,u64 * addr)435*4882a593Smuzhiyun static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	*addr = xp_unaligned_extract_addr(*addr);
438*4882a593Smuzhiyun 	if (*addr >= pool->addrs_cnt ||
439*4882a593Smuzhiyun 	    *addr + pool->chunk_size > pool->addrs_cnt ||
440*4882a593Smuzhiyun 	    xp_addr_crosses_non_contig_pg(pool, *addr))
441*4882a593Smuzhiyun 		return false;
442*4882a593Smuzhiyun 	return true;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
xp_check_aligned(struct xsk_buff_pool * pool,u64 * addr)445*4882a593Smuzhiyun static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	*addr = xp_aligned_extract_addr(pool, *addr);
448*4882a593Smuzhiyun 	return *addr < pool->addrs_cnt;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun 
__xp_alloc(struct xsk_buff_pool * pool)451*4882a593Smuzhiyun static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	struct xdp_buff_xsk *xskb;
454*4882a593Smuzhiyun 	u64 addr;
455*4882a593Smuzhiyun 	bool ok;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (pool->free_heads_cnt == 0)
458*4882a593Smuzhiyun 		return NULL;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	xskb = pool->free_heads[--pool->free_heads_cnt];
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	for (;;) {
463*4882a593Smuzhiyun 		if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
464*4882a593Smuzhiyun 			pool->fq->queue_empty_descs++;
465*4882a593Smuzhiyun 			xp_release(xskb);
466*4882a593Smuzhiyun 			return NULL;
467*4882a593Smuzhiyun 		}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 		ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
470*4882a593Smuzhiyun 		     xp_check_aligned(pool, &addr);
471*4882a593Smuzhiyun 		if (!ok) {
472*4882a593Smuzhiyun 			pool->fq->invalid_descs++;
473*4882a593Smuzhiyun 			xskq_cons_release(pool->fq);
474*4882a593Smuzhiyun 			continue;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 		break;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 	xskq_cons_release(pool->fq);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	xskb->orig_addr = addr;
481*4882a593Smuzhiyun 	xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
482*4882a593Smuzhiyun 	if (pool->dma_pages_cnt) {
483*4882a593Smuzhiyun 		xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
484*4882a593Smuzhiyun 				   ~XSK_NEXT_PG_CONTIG_MASK) +
485*4882a593Smuzhiyun 				  (addr & ~PAGE_MASK);
486*4882a593Smuzhiyun 		xskb->dma = xskb->frame_dma + pool->headroom +
487*4882a593Smuzhiyun 			    XDP_PACKET_HEADROOM;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 	return xskb;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
xp_alloc(struct xsk_buff_pool * pool)492*4882a593Smuzhiyun struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun 	struct xdp_buff_xsk *xskb;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (!pool->free_list_cnt) {
497*4882a593Smuzhiyun 		xskb = __xp_alloc(pool);
498*4882a593Smuzhiyun 		if (!xskb)
499*4882a593Smuzhiyun 			return NULL;
500*4882a593Smuzhiyun 	} else {
501*4882a593Smuzhiyun 		pool->free_list_cnt--;
502*4882a593Smuzhiyun 		xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
503*4882a593Smuzhiyun 					free_list_node);
504*4882a593Smuzhiyun 		list_del(&xskb->free_list_node);
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
508*4882a593Smuzhiyun 	xskb->xdp.data_meta = xskb->xdp.data;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (pool->dma_need_sync) {
511*4882a593Smuzhiyun 		dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
512*4882a593Smuzhiyun 						 pool->frame_len,
513*4882a593Smuzhiyun 						 DMA_BIDIRECTIONAL);
514*4882a593Smuzhiyun 	}
515*4882a593Smuzhiyun 	return &xskb->xdp;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun EXPORT_SYMBOL(xp_alloc);
518*4882a593Smuzhiyun 
xp_can_alloc(struct xsk_buff_pool * pool,u32 count)519*4882a593Smuzhiyun bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	if (pool->free_list_cnt >= count)
522*4882a593Smuzhiyun 		return true;
523*4882a593Smuzhiyun 	return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun EXPORT_SYMBOL(xp_can_alloc);
526*4882a593Smuzhiyun 
xp_free(struct xdp_buff_xsk * xskb)527*4882a593Smuzhiyun void xp_free(struct xdp_buff_xsk *xskb)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	xskb->pool->free_list_cnt++;
530*4882a593Smuzhiyun 	list_add(&xskb->free_list_node, &xskb->pool->free_list);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun EXPORT_SYMBOL(xp_free);
533*4882a593Smuzhiyun 
xp_raw_get_data(struct xsk_buff_pool * pool,u64 addr)534*4882a593Smuzhiyun void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
537*4882a593Smuzhiyun 	return pool->addrs + addr;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun EXPORT_SYMBOL(xp_raw_get_data);
540*4882a593Smuzhiyun 
xp_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)541*4882a593Smuzhiyun dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
544*4882a593Smuzhiyun 	return (pool->dma_pages[addr >> PAGE_SHIFT] &
545*4882a593Smuzhiyun 		~XSK_NEXT_PG_CONTIG_MASK) +
546*4882a593Smuzhiyun 		(addr & ~PAGE_MASK);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun EXPORT_SYMBOL(xp_raw_get_dma);
549*4882a593Smuzhiyun 
xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk * xskb)550*4882a593Smuzhiyun void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
553*4882a593Smuzhiyun 				      xskb->pool->frame_len, DMA_BIDIRECTIONAL);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
556*4882a593Smuzhiyun 
xp_dma_sync_for_device_slow(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)557*4882a593Smuzhiyun void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
558*4882a593Smuzhiyun 				 size_t size)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	dma_sync_single_range_for_device(pool->dev, dma, 0,
561*4882a593Smuzhiyun 					 size, DMA_BIDIRECTIONAL);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
564