1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* XDP sockets
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * AF_XDP sockets allows a channel between XDP programs and userspace
5*4882a593Smuzhiyun * applications.
6*4882a593Smuzhiyun * Copyright(c) 2018 Intel Corporation.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Author(s): Björn Töpel <bjorn.topel@intel.com>
9*4882a593Smuzhiyun * Magnus Karlsson <magnus.karlsson@intel.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/if_xdp.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/sched/mm.h>
17*4882a593Smuzhiyun #include <linux/sched/signal.h>
18*4882a593Smuzhiyun #include <linux/sched/task.h>
19*4882a593Smuzhiyun #include <linux/socket.h>
20*4882a593Smuzhiyun #include <linux/file.h>
21*4882a593Smuzhiyun #include <linux/uaccess.h>
22*4882a593Smuzhiyun #include <linux/net.h>
23*4882a593Smuzhiyun #include <linux/netdevice.h>
24*4882a593Smuzhiyun #include <linux/rculist.h>
25*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
26*4882a593Smuzhiyun #include <net/xdp.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "xsk_queue.h"
29*4882a593Smuzhiyun #include "xdp_umem.h"
30*4882a593Smuzhiyun #include "xsk.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define TX_BATCH_SIZE 16
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35*4882a593Smuzhiyun
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)36*4882a593Smuzhiyun void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
39*4882a593Smuzhiyun return;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
42*4882a593Smuzhiyun pool->cached_need_wakeup |= XDP_WAKEUP_RX;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
45*4882a593Smuzhiyun
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)46*4882a593Smuzhiyun void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct xdp_sock *xs;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
51*4882a593Smuzhiyun return;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun rcu_read_lock();
54*4882a593Smuzhiyun list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
55*4882a593Smuzhiyun xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun rcu_read_unlock();
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun pool->cached_need_wakeup |= XDP_WAKEUP_TX;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
62*4882a593Smuzhiyun
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)63*4882a593Smuzhiyun void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
66*4882a593Smuzhiyun return;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
69*4882a593Smuzhiyun pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
72*4882a593Smuzhiyun
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)73*4882a593Smuzhiyun void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun struct xdp_sock *xs;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
78*4882a593Smuzhiyun return;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun rcu_read_lock();
81*4882a593Smuzhiyun list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
82*4882a593Smuzhiyun xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun rcu_read_unlock();
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
89*4882a593Smuzhiyun
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)90*4882a593Smuzhiyun bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return pool->uses_need_wakeup;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_uses_need_wakeup);
95*4882a593Smuzhiyun
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)96*4882a593Smuzhiyun struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
97*4882a593Smuzhiyun u16 queue_id)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun if (queue_id < dev->real_num_rx_queues)
100*4882a593Smuzhiyun return dev->_rx[queue_id].pool;
101*4882a593Smuzhiyun if (queue_id < dev->real_num_tx_queues)
102*4882a593Smuzhiyun return dev->_tx[queue_id].pool;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return NULL;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_get_pool_from_qid);
107*4882a593Smuzhiyun
xsk_clear_pool_at_qid(struct net_device * dev,u16 queue_id)108*4882a593Smuzhiyun void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun if (queue_id < dev->num_rx_queues)
111*4882a593Smuzhiyun dev->_rx[queue_id].pool = NULL;
112*4882a593Smuzhiyun if (queue_id < dev->num_tx_queues)
113*4882a593Smuzhiyun dev->_tx[queue_id].pool = NULL;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117*4882a593Smuzhiyun * not know if the device has more tx queues than rx, or the opposite.
118*4882a593Smuzhiyun * This might also change during run time.
119*4882a593Smuzhiyun */
xsk_reg_pool_at_qid(struct net_device * dev,struct xsk_buff_pool * pool,u16 queue_id)120*4882a593Smuzhiyun int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
121*4882a593Smuzhiyun u16 queue_id)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun if (queue_id >= max_t(unsigned int,
124*4882a593Smuzhiyun dev->real_num_rx_queues,
125*4882a593Smuzhiyun dev->real_num_tx_queues))
126*4882a593Smuzhiyun return -EINVAL;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun if (queue_id < dev->real_num_rx_queues)
129*4882a593Smuzhiyun dev->_rx[queue_id].pool = pool;
130*4882a593Smuzhiyun if (queue_id < dev->real_num_tx_queues)
131*4882a593Smuzhiyun dev->_tx[queue_id].pool = pool;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
xp_release(struct xdp_buff_xsk * xskb)136*4882a593Smuzhiyun void xp_release(struct xdp_buff_xsk *xskb)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
xp_get_handle(struct xdp_buff_xsk * xskb)141*4882a593Smuzhiyun static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun offset += xskb->pool->headroom;
146*4882a593Smuzhiyun if (!xskb->pool->unaligned)
147*4882a593Smuzhiyun return xskb->orig_addr + offset;
148*4882a593Smuzhiyun return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
__xsk_rcv_zc(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len)151*4882a593Smuzhiyun static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
154*4882a593Smuzhiyun u64 addr;
155*4882a593Smuzhiyun int err;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun addr = xp_get_handle(xskb);
158*4882a593Smuzhiyun err = xskq_prod_reserve_desc(xs->rx, addr, len);
159*4882a593Smuzhiyun if (err) {
160*4882a593Smuzhiyun xs->rx_queue_full++;
161*4882a593Smuzhiyun return err;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun xp_release(xskb);
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
xsk_copy_xdp(struct xdp_buff * to,struct xdp_buff * from,u32 len)168*4882a593Smuzhiyun static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun void *from_buf, *to_buf;
171*4882a593Smuzhiyun u32 metalen;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (unlikely(xdp_data_meta_unsupported(from))) {
174*4882a593Smuzhiyun from_buf = from->data;
175*4882a593Smuzhiyun to_buf = to->data;
176*4882a593Smuzhiyun metalen = 0;
177*4882a593Smuzhiyun } else {
178*4882a593Smuzhiyun from_buf = from->data_meta;
179*4882a593Smuzhiyun metalen = from->data - from->data_meta;
180*4882a593Smuzhiyun to_buf = to->data - metalen;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun memcpy(to_buf, from_buf, len + metalen);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
__xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,u32 len,bool explicit_free)186*4882a593Smuzhiyun static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
187*4882a593Smuzhiyun bool explicit_free)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct xdp_buff *xsk_xdp;
190*4882a593Smuzhiyun int err;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
193*4882a593Smuzhiyun xs->rx_dropped++;
194*4882a593Smuzhiyun return -ENOSPC;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun xsk_xdp = xsk_buff_alloc(xs->pool);
198*4882a593Smuzhiyun if (!xsk_xdp) {
199*4882a593Smuzhiyun xs->rx_dropped++;
200*4882a593Smuzhiyun return -ENOSPC;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun xsk_copy_xdp(xsk_xdp, xdp, len);
204*4882a593Smuzhiyun err = __xsk_rcv_zc(xs, xsk_xdp, len);
205*4882a593Smuzhiyun if (err) {
206*4882a593Smuzhiyun xsk_buff_free(xsk_xdp);
207*4882a593Smuzhiyun return err;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun if (explicit_free)
210*4882a593Smuzhiyun xdp_return_buff(xdp);
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
xsk_tx_writeable(struct xdp_sock * xs)214*4882a593Smuzhiyun static bool xsk_tx_writeable(struct xdp_sock *xs)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
217*4882a593Smuzhiyun return false;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return true;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
xsk_is_bound(struct xdp_sock * xs)222*4882a593Smuzhiyun static bool xsk_is_bound(struct xdp_sock *xs)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun if (READ_ONCE(xs->state) == XSK_BOUND) {
225*4882a593Smuzhiyun /* Matches smp_wmb() in bind(). */
226*4882a593Smuzhiyun smp_rmb();
227*4882a593Smuzhiyun return true;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun return false;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
xsk_rcv(struct xdp_sock * xs,struct xdp_buff * xdp,bool explicit_free)232*4882a593Smuzhiyun static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
233*4882a593Smuzhiyun bool explicit_free)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun u32 len;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (!xsk_is_bound(xs))
238*4882a593Smuzhiyun return -EINVAL;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
241*4882a593Smuzhiyun return -EINVAL;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun len = xdp->data_end - xdp->data;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
246*4882a593Smuzhiyun __xsk_rcv_zc(xs, xdp, len) :
247*4882a593Smuzhiyun __xsk_rcv(xs, xdp, len, explicit_free);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
xsk_flush(struct xdp_sock * xs)250*4882a593Smuzhiyun static void xsk_flush(struct xdp_sock *xs)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun xskq_prod_submit(xs->rx);
253*4882a593Smuzhiyun __xskq_cons_release(xs->pool->fq);
254*4882a593Smuzhiyun sock_def_readable(&xs->sk);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
xsk_generic_rcv(struct xdp_sock * xs,struct xdp_buff * xdp)257*4882a593Smuzhiyun int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun int err;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun spin_lock_bh(&xs->rx_lock);
262*4882a593Smuzhiyun err = xsk_rcv(xs, xdp, false);
263*4882a593Smuzhiyun xsk_flush(xs);
264*4882a593Smuzhiyun spin_unlock_bh(&xs->rx_lock);
265*4882a593Smuzhiyun return err;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
__xsk_map_redirect(struct xdp_sock * xs,struct xdp_buff * xdp)268*4882a593Smuzhiyun int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
271*4882a593Smuzhiyun int err;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun err = xsk_rcv(xs, xdp, true);
274*4882a593Smuzhiyun if (err)
275*4882a593Smuzhiyun return err;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (!xs->flush_node.prev)
278*4882a593Smuzhiyun list_add(&xs->flush_node, flush_list);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return 0;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
__xsk_map_flush(void)283*4882a593Smuzhiyun void __xsk_map_flush(void)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
286*4882a593Smuzhiyun struct xdp_sock *xs, *tmp;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
289*4882a593Smuzhiyun xsk_flush(xs);
290*4882a593Smuzhiyun __list_del_clearprev(&xs->flush_node);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)294*4882a593Smuzhiyun void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun xskq_prod_submit_n(pool->cq, nb_entries);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_tx_completed);
299*4882a593Smuzhiyun
xsk_tx_release(struct xsk_buff_pool * pool)300*4882a593Smuzhiyun void xsk_tx_release(struct xsk_buff_pool *pool)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct xdp_sock *xs;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun rcu_read_lock();
305*4882a593Smuzhiyun list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
306*4882a593Smuzhiyun __xskq_cons_release(xs->tx);
307*4882a593Smuzhiyun if (xsk_tx_writeable(xs))
308*4882a593Smuzhiyun xs->sk.sk_write_space(&xs->sk);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun rcu_read_unlock();
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_tx_release);
313*4882a593Smuzhiyun
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)314*4882a593Smuzhiyun bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun struct xdp_sock *xs;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun rcu_read_lock();
319*4882a593Smuzhiyun list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
320*4882a593Smuzhiyun if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
321*4882a593Smuzhiyun xs->tx->queue_empty_descs++;
322*4882a593Smuzhiyun continue;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /* This is the backpressure mechanism for the Tx path.
326*4882a593Smuzhiyun * Reserve space in the completion queue and only proceed
327*4882a593Smuzhiyun * if there is space in it. This avoids having to implement
328*4882a593Smuzhiyun * any buffering in the Tx path.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun if (xskq_prod_reserve_addr(pool->cq, desc->addr))
331*4882a593Smuzhiyun goto out;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun xskq_cons_release(xs->tx);
334*4882a593Smuzhiyun rcu_read_unlock();
335*4882a593Smuzhiyun return true;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun out:
339*4882a593Smuzhiyun rcu_read_unlock();
340*4882a593Smuzhiyun return false;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun EXPORT_SYMBOL(xsk_tx_peek_desc);
343*4882a593Smuzhiyun
xsk_wakeup(struct xdp_sock * xs,u8 flags)344*4882a593Smuzhiyun static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct net_device *dev = xs->dev;
347*4882a593Smuzhiyun int err;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun rcu_read_lock();
350*4882a593Smuzhiyun err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
351*4882a593Smuzhiyun rcu_read_unlock();
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun return err;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
xsk_zc_xmit(struct xdp_sock * xs)356*4882a593Smuzhiyun static int xsk_zc_xmit(struct xdp_sock *xs)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun return xsk_wakeup(xs, XDP_WAKEUP_TX);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
xsk_destruct_skb(struct sk_buff * skb)361*4882a593Smuzhiyun static void xsk_destruct_skb(struct sk_buff *skb)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
364*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(skb->sk);
365*4882a593Smuzhiyun unsigned long flags;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun spin_lock_irqsave(&xs->pool->cq_lock, flags);
368*4882a593Smuzhiyun xskq_prod_submit_addr(xs->pool->cq, addr);
369*4882a593Smuzhiyun spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun sock_wfree(skb);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
xsk_generic_xmit(struct sock * sk)374*4882a593Smuzhiyun static int xsk_generic_xmit(struct sock *sk)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
377*4882a593Smuzhiyun u32 max_batch = TX_BATCH_SIZE;
378*4882a593Smuzhiyun bool sent_frame = false;
379*4882a593Smuzhiyun struct xdp_desc desc;
380*4882a593Smuzhiyun struct sk_buff *skb;
381*4882a593Smuzhiyun unsigned long flags;
382*4882a593Smuzhiyun int err = 0;
383*4882a593Smuzhiyun u32 hr, tr;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun mutex_lock(&xs->mutex);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (xs->queue_id >= xs->dev->real_num_tx_queues)
388*4882a593Smuzhiyun goto out;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
391*4882a593Smuzhiyun tr = xs->dev->needed_tailroom;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
394*4882a593Smuzhiyun char *buffer;
395*4882a593Smuzhiyun u64 addr;
396*4882a593Smuzhiyun u32 len;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (max_batch-- == 0) {
399*4882a593Smuzhiyun err = -EAGAIN;
400*4882a593Smuzhiyun goto out;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun len = desc.len;
404*4882a593Smuzhiyun skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
405*4882a593Smuzhiyun if (unlikely(!skb))
406*4882a593Smuzhiyun goto out;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun skb_reserve(skb, hr);
409*4882a593Smuzhiyun skb_put(skb, len);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun addr = desc.addr;
412*4882a593Smuzhiyun buffer = xsk_buff_raw_get_data(xs->pool, addr);
413*4882a593Smuzhiyun err = skb_store_bits(skb, 0, buffer, len);
414*4882a593Smuzhiyun /* This is the backpressure mechanism for the Tx path.
415*4882a593Smuzhiyun * Reserve space in the completion queue and only proceed
416*4882a593Smuzhiyun * if there is space in it. This avoids having to implement
417*4882a593Smuzhiyun * any buffering in the Tx path.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun spin_lock_irqsave(&xs->pool->cq_lock, flags);
420*4882a593Smuzhiyun if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
421*4882a593Smuzhiyun spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
422*4882a593Smuzhiyun kfree_skb(skb);
423*4882a593Smuzhiyun goto out;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun skb->dev = xs->dev;
428*4882a593Smuzhiyun skb->priority = sk->sk_priority;
429*4882a593Smuzhiyun skb->mark = sk->sk_mark;
430*4882a593Smuzhiyun skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
431*4882a593Smuzhiyun skb->destructor = xsk_destruct_skb;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun err = __dev_direct_xmit(skb, xs->queue_id);
434*4882a593Smuzhiyun if (err == NETDEV_TX_BUSY) {
435*4882a593Smuzhiyun /* Tell user-space to retry the send */
436*4882a593Smuzhiyun skb->destructor = sock_wfree;
437*4882a593Smuzhiyun spin_lock_irqsave(&xs->pool->cq_lock, flags);
438*4882a593Smuzhiyun xskq_prod_cancel(xs->pool->cq);
439*4882a593Smuzhiyun spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
440*4882a593Smuzhiyun /* Free skb without triggering the perf drop trace */
441*4882a593Smuzhiyun consume_skb(skb);
442*4882a593Smuzhiyun err = -EAGAIN;
443*4882a593Smuzhiyun goto out;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun xskq_cons_release(xs->tx);
447*4882a593Smuzhiyun /* Ignore NET_XMIT_CN as packet might have been sent */
448*4882a593Smuzhiyun if (err == NET_XMIT_DROP) {
449*4882a593Smuzhiyun /* SKB completed but not sent */
450*4882a593Smuzhiyun err = -EBUSY;
451*4882a593Smuzhiyun goto out;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun sent_frame = true;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun xs->tx->queue_empty_descs++;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun out:
460*4882a593Smuzhiyun if (sent_frame)
461*4882a593Smuzhiyun if (xsk_tx_writeable(xs))
462*4882a593Smuzhiyun sk->sk_write_space(sk);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
465*4882a593Smuzhiyun return err;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
__xsk_sendmsg(struct sock * sk)468*4882a593Smuzhiyun static int __xsk_sendmsg(struct sock *sk)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (unlikely(!(xs->dev->flags & IFF_UP)))
473*4882a593Smuzhiyun return -ENETDOWN;
474*4882a593Smuzhiyun if (unlikely(!xs->tx))
475*4882a593Smuzhiyun return -ENOBUFS;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
xsk_sendmsg(struct socket * sock,struct msghdr * m,size_t total_len)480*4882a593Smuzhiyun static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
483*4882a593Smuzhiyun struct sock *sk = sock->sk;
484*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (unlikely(!xsk_is_bound(xs)))
487*4882a593Smuzhiyun return -ENXIO;
488*4882a593Smuzhiyun if (unlikely(need_wait))
489*4882a593Smuzhiyun return -EOPNOTSUPP;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun return __xsk_sendmsg(sk);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
xsk_poll(struct file * file,struct socket * sock,struct poll_table_struct * wait)494*4882a593Smuzhiyun static __poll_t xsk_poll(struct file *file, struct socket *sock,
495*4882a593Smuzhiyun struct poll_table_struct *wait)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun __poll_t mask = 0;
498*4882a593Smuzhiyun struct sock *sk = sock->sk;
499*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
500*4882a593Smuzhiyun struct xsk_buff_pool *pool;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun sock_poll_wait(file, sock, wait);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (unlikely(!xsk_is_bound(xs)))
505*4882a593Smuzhiyun return mask;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun pool = xs->pool;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun if (pool->cached_need_wakeup) {
510*4882a593Smuzhiyun if (xs->zc)
511*4882a593Smuzhiyun xsk_wakeup(xs, pool->cached_need_wakeup);
512*4882a593Smuzhiyun else
513*4882a593Smuzhiyun /* Poll needs to drive Tx also in copy mode */
514*4882a593Smuzhiyun __xsk_sendmsg(sk);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (xs->rx && !xskq_prod_is_empty(xs->rx))
518*4882a593Smuzhiyun mask |= EPOLLIN | EPOLLRDNORM;
519*4882a593Smuzhiyun if (xs->tx && xsk_tx_writeable(xs))
520*4882a593Smuzhiyun mask |= EPOLLOUT | EPOLLWRNORM;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun return mask;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
xsk_init_queue(u32 entries,struct xsk_queue ** queue,bool umem_queue)525*4882a593Smuzhiyun static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
526*4882a593Smuzhiyun bool umem_queue)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun struct xsk_queue *q;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (entries == 0 || *queue || !is_power_of_2(entries))
531*4882a593Smuzhiyun return -EINVAL;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun q = xskq_create(entries, umem_queue);
534*4882a593Smuzhiyun if (!q)
535*4882a593Smuzhiyun return -ENOMEM;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Make sure queue is ready before it can be seen by others */
538*4882a593Smuzhiyun smp_wmb();
539*4882a593Smuzhiyun WRITE_ONCE(*queue, q);
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
xsk_unbind_dev(struct xdp_sock * xs)543*4882a593Smuzhiyun static void xsk_unbind_dev(struct xdp_sock *xs)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct net_device *dev = xs->dev;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (xs->state != XSK_BOUND)
548*4882a593Smuzhiyun return;
549*4882a593Smuzhiyun WRITE_ONCE(xs->state, XSK_UNBOUND);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Wait for driver to stop using the xdp socket. */
552*4882a593Smuzhiyun xp_del_xsk(xs->pool, xs);
553*4882a593Smuzhiyun xs->dev = NULL;
554*4882a593Smuzhiyun synchronize_net();
555*4882a593Smuzhiyun dev_put(dev);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
xsk_get_map_list_entry(struct xdp_sock * xs,struct xdp_sock *** map_entry)558*4882a593Smuzhiyun static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
559*4882a593Smuzhiyun struct xdp_sock ***map_entry)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct xsk_map *map = NULL;
562*4882a593Smuzhiyun struct xsk_map_node *node;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun *map_entry = NULL;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun spin_lock_bh(&xs->map_list_lock);
567*4882a593Smuzhiyun node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
568*4882a593Smuzhiyun node);
569*4882a593Smuzhiyun if (node) {
570*4882a593Smuzhiyun WARN_ON(xsk_map_inc(node->map));
571*4882a593Smuzhiyun map = node->map;
572*4882a593Smuzhiyun *map_entry = node->map_entry;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun spin_unlock_bh(&xs->map_list_lock);
575*4882a593Smuzhiyun return map;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
xsk_delete_from_maps(struct xdp_sock * xs)578*4882a593Smuzhiyun static void xsk_delete_from_maps(struct xdp_sock *xs)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun /* This function removes the current XDP socket from all the
581*4882a593Smuzhiyun * maps it resides in. We need to take extra care here, due to
582*4882a593Smuzhiyun * the two locks involved. Each map has a lock synchronizing
583*4882a593Smuzhiyun * updates to the entries, and each socket has a lock that
584*4882a593Smuzhiyun * synchronizes access to the list of maps (map_list). For
585*4882a593Smuzhiyun * deadlock avoidance the locks need to be taken in the order
586*4882a593Smuzhiyun * "map lock"->"socket map list lock". We start off by
587*4882a593Smuzhiyun * accessing the socket map list, and take a reference to the
588*4882a593Smuzhiyun * map to guarantee existence between the
589*4882a593Smuzhiyun * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
590*4882a593Smuzhiyun * calls. Then we ask the map to remove the socket, which
591*4882a593Smuzhiyun * tries to remove the socket from the map. Note that there
592*4882a593Smuzhiyun * might be updates to the map between
593*4882a593Smuzhiyun * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun struct xdp_sock **map_entry = NULL;
596*4882a593Smuzhiyun struct xsk_map *map;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
599*4882a593Smuzhiyun xsk_map_try_sock_delete(map, xs, map_entry);
600*4882a593Smuzhiyun xsk_map_put(map);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
xsk_release(struct socket * sock)604*4882a593Smuzhiyun static int xsk_release(struct socket *sock)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct sock *sk = sock->sk;
607*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
608*4882a593Smuzhiyun struct net *net;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (!sk)
611*4882a593Smuzhiyun return 0;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun net = sock_net(sk);
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun mutex_lock(&net->xdp.lock);
616*4882a593Smuzhiyun sk_del_node_init_rcu(sk);
617*4882a593Smuzhiyun mutex_unlock(&net->xdp.lock);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun local_bh_disable();
620*4882a593Smuzhiyun sock_prot_inuse_add(net, sk->sk_prot, -1);
621*4882a593Smuzhiyun local_bh_enable();
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun xsk_delete_from_maps(xs);
624*4882a593Smuzhiyun mutex_lock(&xs->mutex);
625*4882a593Smuzhiyun xsk_unbind_dev(xs);
626*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun xskq_destroy(xs->rx);
629*4882a593Smuzhiyun xskq_destroy(xs->tx);
630*4882a593Smuzhiyun xskq_destroy(xs->fq_tmp);
631*4882a593Smuzhiyun xskq_destroy(xs->cq_tmp);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun sock_orphan(sk);
634*4882a593Smuzhiyun sock->sk = NULL;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun sk_refcnt_debug_release(sk);
637*4882a593Smuzhiyun sock_put(sk);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
xsk_lookup_xsk_from_fd(int fd)642*4882a593Smuzhiyun static struct socket *xsk_lookup_xsk_from_fd(int fd)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct socket *sock;
645*4882a593Smuzhiyun int err;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun sock = sockfd_lookup(fd, &err);
648*4882a593Smuzhiyun if (!sock)
649*4882a593Smuzhiyun return ERR_PTR(-ENOTSOCK);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (sock->sk->sk_family != PF_XDP) {
652*4882a593Smuzhiyun sockfd_put(sock);
653*4882a593Smuzhiyun return ERR_PTR(-ENOPROTOOPT);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun return sock;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
xsk_validate_queues(struct xdp_sock * xs)659*4882a593Smuzhiyun static bool xsk_validate_queues(struct xdp_sock *xs)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun return xs->fq_tmp && xs->cq_tmp;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
xsk_bind(struct socket * sock,struct sockaddr * addr,int addr_len)664*4882a593Smuzhiyun static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
667*4882a593Smuzhiyun struct sock *sk = sock->sk;
668*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
669*4882a593Smuzhiyun struct net_device *dev;
670*4882a593Smuzhiyun u32 flags, qid;
671*4882a593Smuzhiyun int err = 0;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (addr_len < sizeof(struct sockaddr_xdp))
674*4882a593Smuzhiyun return -EINVAL;
675*4882a593Smuzhiyun if (sxdp->sxdp_family != AF_XDP)
676*4882a593Smuzhiyun return -EINVAL;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun flags = sxdp->sxdp_flags;
679*4882a593Smuzhiyun if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
680*4882a593Smuzhiyun XDP_USE_NEED_WAKEUP))
681*4882a593Smuzhiyun return -EINVAL;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun rtnl_lock();
684*4882a593Smuzhiyun mutex_lock(&xs->mutex);
685*4882a593Smuzhiyun if (xs->state != XSK_READY) {
686*4882a593Smuzhiyun err = -EBUSY;
687*4882a593Smuzhiyun goto out_release;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
691*4882a593Smuzhiyun if (!dev) {
692*4882a593Smuzhiyun err = -ENODEV;
693*4882a593Smuzhiyun goto out_release;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (!xs->rx && !xs->tx) {
697*4882a593Smuzhiyun err = -EINVAL;
698*4882a593Smuzhiyun goto out_unlock;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun qid = sxdp->sxdp_queue_id;
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun if (flags & XDP_SHARED_UMEM) {
704*4882a593Smuzhiyun struct xdp_sock *umem_xs;
705*4882a593Smuzhiyun struct socket *sock;
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
708*4882a593Smuzhiyun (flags & XDP_USE_NEED_WAKEUP)) {
709*4882a593Smuzhiyun /* Cannot specify flags for shared sockets. */
710*4882a593Smuzhiyun err = -EINVAL;
711*4882a593Smuzhiyun goto out_unlock;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (xs->umem) {
715*4882a593Smuzhiyun /* We have already our own. */
716*4882a593Smuzhiyun err = -EINVAL;
717*4882a593Smuzhiyun goto out_unlock;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
721*4882a593Smuzhiyun if (IS_ERR(sock)) {
722*4882a593Smuzhiyun err = PTR_ERR(sock);
723*4882a593Smuzhiyun goto out_unlock;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun umem_xs = xdp_sk(sock->sk);
727*4882a593Smuzhiyun if (!xsk_is_bound(umem_xs)) {
728*4882a593Smuzhiyun err = -EBADF;
729*4882a593Smuzhiyun sockfd_put(sock);
730*4882a593Smuzhiyun goto out_unlock;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
734*4882a593Smuzhiyun /* Share the umem with another socket on another qid
735*4882a593Smuzhiyun * and/or device.
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun xs->pool = xp_create_and_assign_umem(xs,
738*4882a593Smuzhiyun umem_xs->umem);
739*4882a593Smuzhiyun if (!xs->pool) {
740*4882a593Smuzhiyun err = -ENOMEM;
741*4882a593Smuzhiyun sockfd_put(sock);
742*4882a593Smuzhiyun goto out_unlock;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
746*4882a593Smuzhiyun qid);
747*4882a593Smuzhiyun if (err) {
748*4882a593Smuzhiyun xp_destroy(xs->pool);
749*4882a593Smuzhiyun xs->pool = NULL;
750*4882a593Smuzhiyun sockfd_put(sock);
751*4882a593Smuzhiyun goto out_unlock;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun } else {
754*4882a593Smuzhiyun /* Share the buffer pool with the other socket. */
755*4882a593Smuzhiyun if (xs->fq_tmp || xs->cq_tmp) {
756*4882a593Smuzhiyun /* Do not allow setting your own fq or cq. */
757*4882a593Smuzhiyun err = -EINVAL;
758*4882a593Smuzhiyun sockfd_put(sock);
759*4882a593Smuzhiyun goto out_unlock;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun xp_get_pool(umem_xs->pool);
763*4882a593Smuzhiyun xs->pool = umem_xs->pool;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun xdp_get_umem(umem_xs->umem);
767*4882a593Smuzhiyun WRITE_ONCE(xs->umem, umem_xs->umem);
768*4882a593Smuzhiyun sockfd_put(sock);
769*4882a593Smuzhiyun } else if (!xs->umem || !xsk_validate_queues(xs)) {
770*4882a593Smuzhiyun err = -EINVAL;
771*4882a593Smuzhiyun goto out_unlock;
772*4882a593Smuzhiyun } else {
773*4882a593Smuzhiyun /* This xsk has its own umem. */
774*4882a593Smuzhiyun xs->pool = xp_create_and_assign_umem(xs, xs->umem);
775*4882a593Smuzhiyun if (!xs->pool) {
776*4882a593Smuzhiyun err = -ENOMEM;
777*4882a593Smuzhiyun goto out_unlock;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun err = xp_assign_dev(xs->pool, dev, qid, flags);
781*4882a593Smuzhiyun if (err) {
782*4882a593Smuzhiyun xp_destroy(xs->pool);
783*4882a593Smuzhiyun xs->pool = NULL;
784*4882a593Smuzhiyun goto out_unlock;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
789*4882a593Smuzhiyun xs->fq_tmp = NULL;
790*4882a593Smuzhiyun xs->cq_tmp = NULL;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun xs->dev = dev;
793*4882a593Smuzhiyun xs->zc = xs->umem->zc;
794*4882a593Smuzhiyun xs->queue_id = qid;
795*4882a593Smuzhiyun xp_add_xsk(xs->pool, xs);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun out_unlock:
798*4882a593Smuzhiyun if (err) {
799*4882a593Smuzhiyun dev_put(dev);
800*4882a593Smuzhiyun } else {
801*4882a593Smuzhiyun /* Matches smp_rmb() in bind() for shared umem
802*4882a593Smuzhiyun * sockets, and xsk_is_bound().
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun smp_wmb();
805*4882a593Smuzhiyun WRITE_ONCE(xs->state, XSK_BOUND);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun out_release:
808*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
809*4882a593Smuzhiyun rtnl_unlock();
810*4882a593Smuzhiyun return err;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun struct xdp_umem_reg_v1 {
814*4882a593Smuzhiyun __u64 addr; /* Start of packet data area */
815*4882a593Smuzhiyun __u64 len; /* Length of packet data area */
816*4882a593Smuzhiyun __u32 chunk_size;
817*4882a593Smuzhiyun __u32 headroom;
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun
xsk_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)820*4882a593Smuzhiyun static int xsk_setsockopt(struct socket *sock, int level, int optname,
821*4882a593Smuzhiyun sockptr_t optval, unsigned int optlen)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct sock *sk = sock->sk;
824*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
825*4882a593Smuzhiyun int err;
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun if (level != SOL_XDP)
828*4882a593Smuzhiyun return -ENOPROTOOPT;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun switch (optname) {
831*4882a593Smuzhiyun case XDP_RX_RING:
832*4882a593Smuzhiyun case XDP_TX_RING:
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct xsk_queue **q;
835*4882a593Smuzhiyun int entries;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (optlen < sizeof(entries))
838*4882a593Smuzhiyun return -EINVAL;
839*4882a593Smuzhiyun if (copy_from_sockptr(&entries, optval, sizeof(entries)))
840*4882a593Smuzhiyun return -EFAULT;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun mutex_lock(&xs->mutex);
843*4882a593Smuzhiyun if (xs->state != XSK_READY) {
844*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
845*4882a593Smuzhiyun return -EBUSY;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
848*4882a593Smuzhiyun err = xsk_init_queue(entries, q, false);
849*4882a593Smuzhiyun if (!err && optname == XDP_TX_RING)
850*4882a593Smuzhiyun /* Tx needs to be explicitly woken up the first time */
851*4882a593Smuzhiyun xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
852*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
853*4882a593Smuzhiyun return err;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun case XDP_UMEM_REG:
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun size_t mr_size = sizeof(struct xdp_umem_reg);
858*4882a593Smuzhiyun struct xdp_umem_reg mr = {};
859*4882a593Smuzhiyun struct xdp_umem *umem;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (optlen < sizeof(struct xdp_umem_reg_v1))
862*4882a593Smuzhiyun return -EINVAL;
863*4882a593Smuzhiyun else if (optlen < sizeof(mr))
864*4882a593Smuzhiyun mr_size = sizeof(struct xdp_umem_reg_v1);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (copy_from_sockptr(&mr, optval, mr_size))
867*4882a593Smuzhiyun return -EFAULT;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun mutex_lock(&xs->mutex);
870*4882a593Smuzhiyun if (xs->state != XSK_READY || xs->umem) {
871*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
872*4882a593Smuzhiyun return -EBUSY;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun umem = xdp_umem_create(&mr);
876*4882a593Smuzhiyun if (IS_ERR(umem)) {
877*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
878*4882a593Smuzhiyun return PTR_ERR(umem);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /* Make sure umem is ready before it can be seen by others */
882*4882a593Smuzhiyun smp_wmb();
883*4882a593Smuzhiyun WRITE_ONCE(xs->umem, umem);
884*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
885*4882a593Smuzhiyun return 0;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun case XDP_UMEM_FILL_RING:
888*4882a593Smuzhiyun case XDP_UMEM_COMPLETION_RING:
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun struct xsk_queue **q;
891*4882a593Smuzhiyun int entries;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (copy_from_sockptr(&entries, optval, sizeof(entries)))
894*4882a593Smuzhiyun return -EFAULT;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun mutex_lock(&xs->mutex);
897*4882a593Smuzhiyun if (xs->state != XSK_READY) {
898*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
899*4882a593Smuzhiyun return -EBUSY;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
903*4882a593Smuzhiyun &xs->cq_tmp;
904*4882a593Smuzhiyun err = xsk_init_queue(entries, q, true);
905*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
906*4882a593Smuzhiyun return err;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun default:
909*4882a593Smuzhiyun break;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun return -ENOPROTOOPT;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 * ring)915*4882a593Smuzhiyun static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
918*4882a593Smuzhiyun ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
919*4882a593Smuzhiyun ring->desc = offsetof(struct xdp_rxtx_ring, desc);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
xsk_enter_umem_offsets(struct xdp_ring_offset_v1 * ring)922*4882a593Smuzhiyun static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
925*4882a593Smuzhiyun ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
926*4882a593Smuzhiyun ring->desc = offsetof(struct xdp_umem_ring, desc);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun struct xdp_statistics_v1 {
930*4882a593Smuzhiyun __u64 rx_dropped;
931*4882a593Smuzhiyun __u64 rx_invalid_descs;
932*4882a593Smuzhiyun __u64 tx_invalid_descs;
933*4882a593Smuzhiyun };
934*4882a593Smuzhiyun
xsk_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)935*4882a593Smuzhiyun static int xsk_getsockopt(struct socket *sock, int level, int optname,
936*4882a593Smuzhiyun char __user *optval, int __user *optlen)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun struct sock *sk = sock->sk;
939*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
940*4882a593Smuzhiyun int len;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (level != SOL_XDP)
943*4882a593Smuzhiyun return -ENOPROTOOPT;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun if (get_user(len, optlen))
946*4882a593Smuzhiyun return -EFAULT;
947*4882a593Smuzhiyun if (len < 0)
948*4882a593Smuzhiyun return -EINVAL;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun switch (optname) {
951*4882a593Smuzhiyun case XDP_STATISTICS:
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct xdp_statistics stats = {};
954*4882a593Smuzhiyun bool extra_stats = true;
955*4882a593Smuzhiyun size_t stats_size;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (len < sizeof(struct xdp_statistics_v1)) {
958*4882a593Smuzhiyun return -EINVAL;
959*4882a593Smuzhiyun } else if (len < sizeof(stats)) {
960*4882a593Smuzhiyun extra_stats = false;
961*4882a593Smuzhiyun stats_size = sizeof(struct xdp_statistics_v1);
962*4882a593Smuzhiyun } else {
963*4882a593Smuzhiyun stats_size = sizeof(stats);
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun mutex_lock(&xs->mutex);
967*4882a593Smuzhiyun stats.rx_dropped = xs->rx_dropped;
968*4882a593Smuzhiyun if (extra_stats) {
969*4882a593Smuzhiyun stats.rx_ring_full = xs->rx_queue_full;
970*4882a593Smuzhiyun stats.rx_fill_ring_empty_descs =
971*4882a593Smuzhiyun xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
972*4882a593Smuzhiyun stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
973*4882a593Smuzhiyun } else {
974*4882a593Smuzhiyun stats.rx_dropped += xs->rx_queue_full;
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
977*4882a593Smuzhiyun stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
978*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if (copy_to_user(optval, &stats, stats_size))
981*4882a593Smuzhiyun return -EFAULT;
982*4882a593Smuzhiyun if (put_user(stats_size, optlen))
983*4882a593Smuzhiyun return -EFAULT;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun return 0;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun case XDP_MMAP_OFFSETS:
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun struct xdp_mmap_offsets off;
990*4882a593Smuzhiyun struct xdp_mmap_offsets_v1 off_v1;
991*4882a593Smuzhiyun bool flags_supported = true;
992*4882a593Smuzhiyun void *to_copy;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (len < sizeof(off_v1))
995*4882a593Smuzhiyun return -EINVAL;
996*4882a593Smuzhiyun else if (len < sizeof(off))
997*4882a593Smuzhiyun flags_supported = false;
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun if (flags_supported) {
1000*4882a593Smuzhiyun /* xdp_ring_offset is identical to xdp_ring_offset_v1
1001*4882a593Smuzhiyun * except for the flags field added to the end.
1002*4882a593Smuzhiyun */
1003*4882a593Smuzhiyun xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1004*4882a593Smuzhiyun &off.rx);
1005*4882a593Smuzhiyun xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1006*4882a593Smuzhiyun &off.tx);
1007*4882a593Smuzhiyun xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1008*4882a593Smuzhiyun &off.fr);
1009*4882a593Smuzhiyun xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1010*4882a593Smuzhiyun &off.cr);
1011*4882a593Smuzhiyun off.rx.flags = offsetof(struct xdp_rxtx_ring,
1012*4882a593Smuzhiyun ptrs.flags);
1013*4882a593Smuzhiyun off.tx.flags = offsetof(struct xdp_rxtx_ring,
1014*4882a593Smuzhiyun ptrs.flags);
1015*4882a593Smuzhiyun off.fr.flags = offsetof(struct xdp_umem_ring,
1016*4882a593Smuzhiyun ptrs.flags);
1017*4882a593Smuzhiyun off.cr.flags = offsetof(struct xdp_umem_ring,
1018*4882a593Smuzhiyun ptrs.flags);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun len = sizeof(off);
1021*4882a593Smuzhiyun to_copy = &off;
1022*4882a593Smuzhiyun } else {
1023*4882a593Smuzhiyun xsk_enter_rxtx_offsets(&off_v1.rx);
1024*4882a593Smuzhiyun xsk_enter_rxtx_offsets(&off_v1.tx);
1025*4882a593Smuzhiyun xsk_enter_umem_offsets(&off_v1.fr);
1026*4882a593Smuzhiyun xsk_enter_umem_offsets(&off_v1.cr);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun len = sizeof(off_v1);
1029*4882a593Smuzhiyun to_copy = &off_v1;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun if (copy_to_user(optval, to_copy, len))
1033*4882a593Smuzhiyun return -EFAULT;
1034*4882a593Smuzhiyun if (put_user(len, optlen))
1035*4882a593Smuzhiyun return -EFAULT;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun case XDP_OPTIONS:
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun struct xdp_options opts = {};
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun if (len < sizeof(opts))
1044*4882a593Smuzhiyun return -EINVAL;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun mutex_lock(&xs->mutex);
1047*4882a593Smuzhiyun if (xs->zc)
1048*4882a593Smuzhiyun opts.flags |= XDP_OPTIONS_ZEROCOPY;
1049*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun len = sizeof(opts);
1052*4882a593Smuzhiyun if (copy_to_user(optval, &opts, len))
1053*4882a593Smuzhiyun return -EFAULT;
1054*4882a593Smuzhiyun if (put_user(len, optlen))
1055*4882a593Smuzhiyun return -EFAULT;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun return 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun default:
1060*4882a593Smuzhiyun break;
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun return -EOPNOTSUPP;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
xsk_mmap(struct file * file,struct socket * sock,struct vm_area_struct * vma)1066*4882a593Smuzhiyun static int xsk_mmap(struct file *file, struct socket *sock,
1067*4882a593Smuzhiyun struct vm_area_struct *vma)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
1070*4882a593Smuzhiyun unsigned long size = vma->vm_end - vma->vm_start;
1071*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sock->sk);
1072*4882a593Smuzhiyun struct xsk_queue *q = NULL;
1073*4882a593Smuzhiyun unsigned long pfn;
1074*4882a593Smuzhiyun struct page *qpg;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (READ_ONCE(xs->state) != XSK_READY)
1077*4882a593Smuzhiyun return -EBUSY;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (offset == XDP_PGOFF_RX_RING) {
1080*4882a593Smuzhiyun q = READ_ONCE(xs->rx);
1081*4882a593Smuzhiyun } else if (offset == XDP_PGOFF_TX_RING) {
1082*4882a593Smuzhiyun q = READ_ONCE(xs->tx);
1083*4882a593Smuzhiyun } else {
1084*4882a593Smuzhiyun /* Matches the smp_wmb() in XDP_UMEM_REG */
1085*4882a593Smuzhiyun smp_rmb();
1086*4882a593Smuzhiyun if (offset == XDP_UMEM_PGOFF_FILL_RING)
1087*4882a593Smuzhiyun q = READ_ONCE(xs->fq_tmp);
1088*4882a593Smuzhiyun else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1089*4882a593Smuzhiyun q = READ_ONCE(xs->cq_tmp);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (!q)
1093*4882a593Smuzhiyun return -EINVAL;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* Matches the smp_wmb() in xsk_init_queue */
1096*4882a593Smuzhiyun smp_rmb();
1097*4882a593Smuzhiyun qpg = virt_to_head_page(q->ring);
1098*4882a593Smuzhiyun if (size > page_size(qpg))
1099*4882a593Smuzhiyun return -EINVAL;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1102*4882a593Smuzhiyun return remap_pfn_range(vma, vma->vm_start, pfn,
1103*4882a593Smuzhiyun size, vma->vm_page_prot);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
xsk_notifier(struct notifier_block * this,unsigned long msg,void * ptr)1106*4882a593Smuzhiyun static int xsk_notifier(struct notifier_block *this,
1107*4882a593Smuzhiyun unsigned long msg, void *ptr)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1110*4882a593Smuzhiyun struct net *net = dev_net(dev);
1111*4882a593Smuzhiyun struct sock *sk;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun switch (msg) {
1114*4882a593Smuzhiyun case NETDEV_UNREGISTER:
1115*4882a593Smuzhiyun mutex_lock(&net->xdp.lock);
1116*4882a593Smuzhiyun sk_for_each(sk, &net->xdp.list) {
1117*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun mutex_lock(&xs->mutex);
1120*4882a593Smuzhiyun if (xs->dev == dev) {
1121*4882a593Smuzhiyun sk->sk_err = ENETDOWN;
1122*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD))
1123*4882a593Smuzhiyun sk->sk_error_report(sk);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun xsk_unbind_dev(xs);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun /* Clear device references. */
1128*4882a593Smuzhiyun xp_clear_dev(xs->pool);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun mutex_unlock(&xs->mutex);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun mutex_unlock(&net->xdp.lock);
1133*4882a593Smuzhiyun break;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun return NOTIFY_DONE;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun static struct proto xsk_proto = {
1139*4882a593Smuzhiyun .name = "XDP",
1140*4882a593Smuzhiyun .owner = THIS_MODULE,
1141*4882a593Smuzhiyun .obj_size = sizeof(struct xdp_sock),
1142*4882a593Smuzhiyun };
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun static const struct proto_ops xsk_proto_ops = {
1145*4882a593Smuzhiyun .family = PF_XDP,
1146*4882a593Smuzhiyun .owner = THIS_MODULE,
1147*4882a593Smuzhiyun .release = xsk_release,
1148*4882a593Smuzhiyun .bind = xsk_bind,
1149*4882a593Smuzhiyun .connect = sock_no_connect,
1150*4882a593Smuzhiyun .socketpair = sock_no_socketpair,
1151*4882a593Smuzhiyun .accept = sock_no_accept,
1152*4882a593Smuzhiyun .getname = sock_no_getname,
1153*4882a593Smuzhiyun .poll = xsk_poll,
1154*4882a593Smuzhiyun .ioctl = sock_no_ioctl,
1155*4882a593Smuzhiyun .listen = sock_no_listen,
1156*4882a593Smuzhiyun .shutdown = sock_no_shutdown,
1157*4882a593Smuzhiyun .setsockopt = xsk_setsockopt,
1158*4882a593Smuzhiyun .getsockopt = xsk_getsockopt,
1159*4882a593Smuzhiyun .sendmsg = xsk_sendmsg,
1160*4882a593Smuzhiyun .recvmsg = sock_no_recvmsg,
1161*4882a593Smuzhiyun .mmap = xsk_mmap,
1162*4882a593Smuzhiyun .sendpage = sock_no_sendpage,
1163*4882a593Smuzhiyun };
1164*4882a593Smuzhiyun
xsk_destruct(struct sock * sk)1165*4882a593Smuzhiyun static void xsk_destruct(struct sock *sk)
1166*4882a593Smuzhiyun {
1167*4882a593Smuzhiyun struct xdp_sock *xs = xdp_sk(sk);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (!sock_flag(sk, SOCK_DEAD))
1170*4882a593Smuzhiyun return;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun if (!xp_put_pool(xs->pool))
1173*4882a593Smuzhiyun xdp_put_umem(xs->umem, !xs->pool);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun sk_refcnt_debug_dec(sk);
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
xsk_create(struct net * net,struct socket * sock,int protocol,int kern)1178*4882a593Smuzhiyun static int xsk_create(struct net *net, struct socket *sock, int protocol,
1179*4882a593Smuzhiyun int kern)
1180*4882a593Smuzhiyun {
1181*4882a593Smuzhiyun struct xdp_sock *xs;
1182*4882a593Smuzhiyun struct sock *sk;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if (!ns_capable(net->user_ns, CAP_NET_RAW))
1185*4882a593Smuzhiyun return -EPERM;
1186*4882a593Smuzhiyun if (sock->type != SOCK_RAW)
1187*4882a593Smuzhiyun return -ESOCKTNOSUPPORT;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (protocol)
1190*4882a593Smuzhiyun return -EPROTONOSUPPORT;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun sock->state = SS_UNCONNECTED;
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1195*4882a593Smuzhiyun if (!sk)
1196*4882a593Smuzhiyun return -ENOBUFS;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun sock->ops = &xsk_proto_ops;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun sock_init_data(sock, sk);
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun sk->sk_family = PF_XDP;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun sk->sk_destruct = xsk_destruct;
1205*4882a593Smuzhiyun sk_refcnt_debug_inc(sk);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun sock_set_flag(sk, SOCK_RCU_FREE);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun xs = xdp_sk(sk);
1210*4882a593Smuzhiyun xs->state = XSK_READY;
1211*4882a593Smuzhiyun mutex_init(&xs->mutex);
1212*4882a593Smuzhiyun spin_lock_init(&xs->rx_lock);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun INIT_LIST_HEAD(&xs->map_list);
1215*4882a593Smuzhiyun spin_lock_init(&xs->map_list_lock);
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun mutex_lock(&net->xdp.lock);
1218*4882a593Smuzhiyun sk_add_node_rcu(sk, &net->xdp.list);
1219*4882a593Smuzhiyun mutex_unlock(&net->xdp.lock);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun local_bh_disable();
1222*4882a593Smuzhiyun sock_prot_inuse_add(net, &xsk_proto, 1);
1223*4882a593Smuzhiyun local_bh_enable();
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun return 0;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun static const struct net_proto_family xsk_family_ops = {
1229*4882a593Smuzhiyun .family = PF_XDP,
1230*4882a593Smuzhiyun .create = xsk_create,
1231*4882a593Smuzhiyun .owner = THIS_MODULE,
1232*4882a593Smuzhiyun };
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun static struct notifier_block xsk_netdev_notifier = {
1235*4882a593Smuzhiyun .notifier_call = xsk_notifier,
1236*4882a593Smuzhiyun };
1237*4882a593Smuzhiyun
xsk_net_init(struct net * net)1238*4882a593Smuzhiyun static int __net_init xsk_net_init(struct net *net)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun mutex_init(&net->xdp.lock);
1241*4882a593Smuzhiyun INIT_HLIST_HEAD(&net->xdp.list);
1242*4882a593Smuzhiyun return 0;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
xsk_net_exit(struct net * net)1245*4882a593Smuzhiyun static void __net_exit xsk_net_exit(struct net *net)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun static struct pernet_operations xsk_net_ops = {
1251*4882a593Smuzhiyun .init = xsk_net_init,
1252*4882a593Smuzhiyun .exit = xsk_net_exit,
1253*4882a593Smuzhiyun };
1254*4882a593Smuzhiyun
xsk_init(void)1255*4882a593Smuzhiyun static int __init xsk_init(void)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun int err, cpu;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun err = proto_register(&xsk_proto, 0 /* no slab */);
1260*4882a593Smuzhiyun if (err)
1261*4882a593Smuzhiyun goto out;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun err = sock_register(&xsk_family_ops);
1264*4882a593Smuzhiyun if (err)
1265*4882a593Smuzhiyun goto out_proto;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun err = register_pernet_subsys(&xsk_net_ops);
1268*4882a593Smuzhiyun if (err)
1269*4882a593Smuzhiyun goto out_sk;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun err = register_netdevice_notifier(&xsk_netdev_notifier);
1272*4882a593Smuzhiyun if (err)
1273*4882a593Smuzhiyun goto out_pernet;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun for_each_possible_cpu(cpu)
1276*4882a593Smuzhiyun INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1277*4882a593Smuzhiyun return 0;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun out_pernet:
1280*4882a593Smuzhiyun unregister_pernet_subsys(&xsk_net_ops);
1281*4882a593Smuzhiyun out_sk:
1282*4882a593Smuzhiyun sock_unregister(PF_XDP);
1283*4882a593Smuzhiyun out_proto:
1284*4882a593Smuzhiyun proto_unregister(&xsk_proto);
1285*4882a593Smuzhiyun out:
1286*4882a593Smuzhiyun return err;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun fs_initcall(xsk_init);
1290