xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ice/ice_xsk.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright (c) 2019, Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef _ICE_XSK_H_
5*4882a593Smuzhiyun #define _ICE_XSK_H_
6*4882a593Smuzhiyun #include "ice_txrx.h"
7*4882a593Smuzhiyun #include "ice.h"
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun struct ice_vsi;
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #ifdef CONFIG_XDP_SOCKETS
12*4882a593Smuzhiyun int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
13*4882a593Smuzhiyun 		       u16 qid);
14*4882a593Smuzhiyun int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
15*4882a593Smuzhiyun bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
16*4882a593Smuzhiyun int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
17*4882a593Smuzhiyun bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
18*4882a593Smuzhiyun bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
19*4882a593Smuzhiyun void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
20*4882a593Smuzhiyun void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
21*4882a593Smuzhiyun #else
22*4882a593Smuzhiyun static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused * vsi,struct xsk_buff_pool __always_unused * pool,u16 __always_unused qid)23*4882a593Smuzhiyun ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
24*4882a593Smuzhiyun 		   struct xsk_buff_pool __always_unused *pool,
25*4882a593Smuzhiyun 		   u16 __always_unused qid)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun 	return -EOPNOTSUPP;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused * rx_ring,int __always_unused budget)31*4882a593Smuzhiyun ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
32*4882a593Smuzhiyun 		    int __always_unused budget)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	return 0;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static inline bool
ice_clean_tx_irq_zc(struct ice_ring __always_unused * xdp_ring,int __always_unused budget)38*4882a593Smuzhiyun ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
39*4882a593Smuzhiyun 		    int __always_unused budget)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	return false;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static inline bool
ice_alloc_rx_bufs_zc(struct ice_ring __always_unused * rx_ring,u16 __always_unused count)45*4882a593Smuzhiyun ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
46*4882a593Smuzhiyun 		     u16 __always_unused count)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	return false;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused * vsi)51*4882a593Smuzhiyun static inline bool ice_xsk_any_rx_ring_ena(struct ice_vsi __always_unused *vsi)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	return false;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun static inline int
ice_xsk_wakeup(struct net_device __always_unused * netdev,u32 __always_unused queue_id,u32 __always_unused flags)57*4882a593Smuzhiyun ice_xsk_wakeup(struct net_device __always_unused *netdev,
58*4882a593Smuzhiyun 	       u32 __always_unused queue_id, u32 __always_unused flags)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	return -EOPNOTSUPP;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
64*4882a593Smuzhiyun #define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
65*4882a593Smuzhiyun #endif /* CONFIG_XDP_SOCKETS */
66*4882a593Smuzhiyun #endif /* !_ICE_XSK_H_ */
67