Lines Matching refs:xs
48 struct xdp_sock *xs; in xsk_set_tx_need_wakeup() local
54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { in xsk_set_tx_need_wakeup()
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_tx_need_wakeup()
75 struct xdp_sock *xs; in xsk_clear_tx_need_wakeup() local
81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { in xsk_clear_tx_need_wakeup()
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_tx_need_wakeup()
151 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) in __xsk_rcv_zc() argument
158 err = xskq_prod_reserve_desc(xs->rx, addr, len); in __xsk_rcv_zc()
160 xs->rx_queue_full++; in __xsk_rcv_zc()
186 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, in __xsk_rcv() argument
192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) { in __xsk_rcv()
193 xs->rx_dropped++; in __xsk_rcv()
197 xsk_xdp = xsk_buff_alloc(xs->pool); in __xsk_rcv()
199 xs->rx_dropped++; in __xsk_rcv()
204 err = __xsk_rcv_zc(xs, xsk_xdp, len); in __xsk_rcv()
214 static bool xsk_tx_writeable(struct xdp_sock *xs) in xsk_tx_writeable() argument
216 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) in xsk_tx_writeable()
222 static bool xsk_is_bound(struct xdp_sock *xs) in xsk_is_bound() argument
224 if (READ_ONCE(xs->state) == XSK_BOUND) { in xsk_is_bound()
232 static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, in xsk_rcv() argument
237 if (!xsk_is_bound(xs)) in xsk_rcv()
240 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) in xsk_rcv()
246 __xsk_rcv_zc(xs, xdp, len) : in xsk_rcv()
247 __xsk_rcv(xs, xdp, len, explicit_free); in xsk_rcv()
250 static void xsk_flush(struct xdp_sock *xs) in xsk_flush() argument
252 xskq_prod_submit(xs->rx); in xsk_flush()
253 __xskq_cons_release(xs->pool->fq); in xsk_flush()
254 sock_def_readable(&xs->sk); in xsk_flush()
257 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) in xsk_generic_rcv() argument
261 spin_lock_bh(&xs->rx_lock); in xsk_generic_rcv()
262 err = xsk_rcv(xs, xdp, false); in xsk_generic_rcv()
263 xsk_flush(xs); in xsk_generic_rcv()
264 spin_unlock_bh(&xs->rx_lock); in xsk_generic_rcv()
268 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) in __xsk_map_redirect() argument
273 err = xsk_rcv(xs, xdp, true); in __xsk_map_redirect()
277 if (!xs->flush_node.prev) in __xsk_map_redirect()
278 list_add(&xs->flush_node, flush_list); in __xsk_map_redirect()
286 struct xdp_sock *xs, *tmp; in __xsk_map_flush() local
288 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { in __xsk_map_flush()
289 xsk_flush(xs); in __xsk_map_flush()
290 __list_del_clearprev(&xs->flush_node); in __xsk_map_flush()
302 struct xdp_sock *xs; in xsk_tx_release() local
305 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { in xsk_tx_release()
306 __xskq_cons_release(xs->tx); in xsk_tx_release()
307 if (xsk_tx_writeable(xs)) in xsk_tx_release()
308 xs->sk.sk_write_space(&xs->sk); in xsk_tx_release()
316 struct xdp_sock *xs; in xsk_tx_peek_desc() local
319 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { in xsk_tx_peek_desc()
320 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { in xsk_tx_peek_desc()
321 xs->tx->queue_empty_descs++; in xsk_tx_peek_desc()
333 xskq_cons_release(xs->tx); in xsk_tx_peek_desc()
344 static int xsk_wakeup(struct xdp_sock *xs, u8 flags) in xsk_wakeup() argument
346 struct net_device *dev = xs->dev; in xsk_wakeup()
350 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); in xsk_wakeup()
356 static int xsk_zc_xmit(struct xdp_sock *xs) in xsk_zc_xmit() argument
358 return xsk_wakeup(xs, XDP_WAKEUP_TX); in xsk_zc_xmit()
364 struct xdp_sock *xs = xdp_sk(skb->sk); in xsk_destruct_skb() local
367 spin_lock_irqsave(&xs->pool->cq_lock, flags); in xsk_destruct_skb()
368 xskq_prod_submit_addr(xs->pool->cq, addr); in xsk_destruct_skb()
369 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); in xsk_destruct_skb()
376 struct xdp_sock *xs = xdp_sk(sk); in xsk_generic_xmit() local
385 mutex_lock(&xs->mutex); in xsk_generic_xmit()
387 if (xs->queue_id >= xs->dev->real_num_tx_queues) in xsk_generic_xmit()
390 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); in xsk_generic_xmit()
391 tr = xs->dev->needed_tailroom; in xsk_generic_xmit()
393 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { in xsk_generic_xmit()
412 buffer = xsk_buff_raw_get_data(xs->pool, addr); in xsk_generic_xmit()
419 spin_lock_irqsave(&xs->pool->cq_lock, flags); in xsk_generic_xmit()
420 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) { in xsk_generic_xmit()
421 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); in xsk_generic_xmit()
425 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); in xsk_generic_xmit()
427 skb->dev = xs->dev; in xsk_generic_xmit()
433 err = __dev_direct_xmit(skb, xs->queue_id); in xsk_generic_xmit()
437 spin_lock_irqsave(&xs->pool->cq_lock, flags); in xsk_generic_xmit()
438 xskq_prod_cancel(xs->pool->cq); in xsk_generic_xmit()
439 spin_unlock_irqrestore(&xs->pool->cq_lock, flags); in xsk_generic_xmit()
446 xskq_cons_release(xs->tx); in xsk_generic_xmit()
457 xs->tx->queue_empty_descs++; in xsk_generic_xmit()
461 if (xsk_tx_writeable(xs)) in xsk_generic_xmit()
464 mutex_unlock(&xs->mutex); in xsk_generic_xmit()
470 struct xdp_sock *xs = xdp_sk(sk); in __xsk_sendmsg() local
472 if (unlikely(!(xs->dev->flags & IFF_UP))) in __xsk_sendmsg()
474 if (unlikely(!xs->tx)) in __xsk_sendmsg()
477 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk); in __xsk_sendmsg()
484 struct xdp_sock *xs = xdp_sk(sk); in xsk_sendmsg() local
486 if (unlikely(!xsk_is_bound(xs))) in xsk_sendmsg()
499 struct xdp_sock *xs = xdp_sk(sk); in xsk_poll() local
504 if (unlikely(!xsk_is_bound(xs))) in xsk_poll()
507 pool = xs->pool; in xsk_poll()
510 if (xs->zc) in xsk_poll()
511 xsk_wakeup(xs, pool->cached_need_wakeup); in xsk_poll()
517 if (xs->rx && !xskq_prod_is_empty(xs->rx)) in xsk_poll()
519 if (xs->tx && xsk_tx_writeable(xs)) in xsk_poll()
543 static void xsk_unbind_dev(struct xdp_sock *xs) in xsk_unbind_dev() argument
545 struct net_device *dev = xs->dev; in xsk_unbind_dev()
547 if (xs->state != XSK_BOUND) in xsk_unbind_dev()
549 WRITE_ONCE(xs->state, XSK_UNBOUND); in xsk_unbind_dev()
552 xp_del_xsk(xs->pool, xs); in xsk_unbind_dev()
553 xs->dev = NULL; in xsk_unbind_dev()
558 static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, in xsk_get_map_list_entry() argument
566 spin_lock_bh(&xs->map_list_lock); in xsk_get_map_list_entry()
567 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, in xsk_get_map_list_entry()
574 spin_unlock_bh(&xs->map_list_lock); in xsk_get_map_list_entry()
578 static void xsk_delete_from_maps(struct xdp_sock *xs) in xsk_delete_from_maps() argument
598 while ((map = xsk_get_map_list_entry(xs, &map_entry))) { in xsk_delete_from_maps()
599 xsk_map_try_sock_delete(map, xs, map_entry); in xsk_delete_from_maps()
607 struct xdp_sock *xs = xdp_sk(sk); in xsk_release() local
623 xsk_delete_from_maps(xs); in xsk_release()
624 mutex_lock(&xs->mutex); in xsk_release()
625 xsk_unbind_dev(xs); in xsk_release()
626 mutex_unlock(&xs->mutex); in xsk_release()
628 xskq_destroy(xs->rx); in xsk_release()
629 xskq_destroy(xs->tx); in xsk_release()
630 xskq_destroy(xs->fq_tmp); in xsk_release()
631 xskq_destroy(xs->cq_tmp); in xsk_release()
659 static bool xsk_validate_queues(struct xdp_sock *xs) in xsk_validate_queues() argument
661 return xs->fq_tmp && xs->cq_tmp; in xsk_validate_queues()
668 struct xdp_sock *xs = xdp_sk(sk); in xsk_bind() local
684 mutex_lock(&xs->mutex); in xsk_bind()
685 if (xs->state != XSK_READY) { in xsk_bind()
696 if (!xs->rx && !xs->tx) { in xsk_bind()
714 if (xs->umem) { in xsk_bind()
737 xs->pool = xp_create_and_assign_umem(xs, in xsk_bind()
739 if (!xs->pool) { in xsk_bind()
745 err = xp_assign_dev_shared(xs->pool, umem_xs, dev, in xsk_bind()
748 xp_destroy(xs->pool); in xsk_bind()
749 xs->pool = NULL; in xsk_bind()
755 if (xs->fq_tmp || xs->cq_tmp) { in xsk_bind()
763 xs->pool = umem_xs->pool; in xsk_bind()
767 WRITE_ONCE(xs->umem, umem_xs->umem); in xsk_bind()
769 } else if (!xs->umem || !xsk_validate_queues(xs)) { in xsk_bind()
774 xs->pool = xp_create_and_assign_umem(xs, xs->umem); in xsk_bind()
775 if (!xs->pool) { in xsk_bind()
780 err = xp_assign_dev(xs->pool, dev, qid, flags); in xsk_bind()
782 xp_destroy(xs->pool); in xsk_bind()
783 xs->pool = NULL; in xsk_bind()
789 xs->fq_tmp = NULL; in xsk_bind()
790 xs->cq_tmp = NULL; in xsk_bind()
792 xs->dev = dev; in xsk_bind()
793 xs->zc = xs->umem->zc; in xsk_bind()
794 xs->queue_id = qid; in xsk_bind()
795 xp_add_xsk(xs->pool, xs); in xsk_bind()
805 WRITE_ONCE(xs->state, XSK_BOUND); in xsk_bind()
808 mutex_unlock(&xs->mutex); in xsk_bind()
824 struct xdp_sock *xs = xdp_sk(sk); in xsk_setsockopt() local
842 mutex_lock(&xs->mutex); in xsk_setsockopt()
843 if (xs->state != XSK_READY) { in xsk_setsockopt()
844 mutex_unlock(&xs->mutex); in xsk_setsockopt()
847 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; in xsk_setsockopt()
851 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_setsockopt()
852 mutex_unlock(&xs->mutex); in xsk_setsockopt()
869 mutex_lock(&xs->mutex); in xsk_setsockopt()
870 if (xs->state != XSK_READY || xs->umem) { in xsk_setsockopt()
871 mutex_unlock(&xs->mutex); in xsk_setsockopt()
877 mutex_unlock(&xs->mutex); in xsk_setsockopt()
883 WRITE_ONCE(xs->umem, umem); in xsk_setsockopt()
884 mutex_unlock(&xs->mutex); in xsk_setsockopt()
896 mutex_lock(&xs->mutex); in xsk_setsockopt()
897 if (xs->state != XSK_READY) { in xsk_setsockopt()
898 mutex_unlock(&xs->mutex); in xsk_setsockopt()
902 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : in xsk_setsockopt()
903 &xs->cq_tmp; in xsk_setsockopt()
905 mutex_unlock(&xs->mutex); in xsk_setsockopt()
939 struct xdp_sock *xs = xdp_sk(sk); in xsk_getsockopt() local
966 mutex_lock(&xs->mutex); in xsk_getsockopt()
967 stats.rx_dropped = xs->rx_dropped; in xsk_getsockopt()
969 stats.rx_ring_full = xs->rx_queue_full; in xsk_getsockopt()
971 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; in xsk_getsockopt()
972 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); in xsk_getsockopt()
974 stats.rx_dropped += xs->rx_queue_full; in xsk_getsockopt()
976 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); in xsk_getsockopt()
977 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); in xsk_getsockopt()
978 mutex_unlock(&xs->mutex); in xsk_getsockopt()
1046 mutex_lock(&xs->mutex); in xsk_getsockopt()
1047 if (xs->zc) in xsk_getsockopt()
1049 mutex_unlock(&xs->mutex); in xsk_getsockopt()
1071 struct xdp_sock *xs = xdp_sk(sock->sk); in xsk_mmap() local
1076 if (READ_ONCE(xs->state) != XSK_READY) in xsk_mmap()
1080 q = READ_ONCE(xs->rx); in xsk_mmap()
1082 q = READ_ONCE(xs->tx); in xsk_mmap()
1087 q = READ_ONCE(xs->fq_tmp); in xsk_mmap()
1089 q = READ_ONCE(xs->cq_tmp); in xsk_mmap()
1117 struct xdp_sock *xs = xdp_sk(sk); in xsk_notifier() local
1119 mutex_lock(&xs->mutex); in xsk_notifier()
1120 if (xs->dev == dev) { in xsk_notifier()
1125 xsk_unbind_dev(xs); in xsk_notifier()
1128 xp_clear_dev(xs->pool); in xsk_notifier()
1130 mutex_unlock(&xs->mutex); in xsk_notifier()
1167 struct xdp_sock *xs = xdp_sk(sk); in xsk_destruct() local
1172 if (!xp_put_pool(xs->pool)) in xsk_destruct()
1173 xdp_put_umem(xs->umem, !xs->pool); in xsk_destruct()
1181 struct xdp_sock *xs; in xsk_create() local
1209 xs = xdp_sk(sk); in xsk_create()
1210 xs->state = XSK_READY; in xsk_create()
1211 mutex_init(&xs->mutex); in xsk_create()
1212 spin_lock_init(&xs->rx_lock); in xsk_create()
1214 INIT_LIST_HEAD(&xs->map_list); in xsk_create()
1215 spin_lock_init(&xs->map_list_lock); in xsk_create()