xref: /OK3568_Linux_fs/kernel/include/uapi/linux/if_xdp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * if_xdp: XDP socket user-space interface
4*4882a593Smuzhiyun  * Copyright(c) 2018 Intel Corporation.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author(s): Björn Töpel <bjorn.topel@intel.com>
7*4882a593Smuzhiyun  *	      Magnus Karlsson <magnus.karlsson@intel.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef _LINUX_IF_XDP_H
11*4882a593Smuzhiyun #define _LINUX_IF_XDP_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* Options for the sxdp_flags field */
16*4882a593Smuzhiyun #define XDP_SHARED_UMEM	(1 << 0)
17*4882a593Smuzhiyun #define XDP_COPY	(1 << 1) /* Force copy-mode */
18*4882a593Smuzhiyun #define XDP_ZEROCOPY	(1 << 2) /* Force zero-copy mode */
19*4882a593Smuzhiyun /* If this option is set, the driver might go sleep and in that case
20*4882a593Smuzhiyun  * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be
21*4882a593Smuzhiyun  * set. If it is set, the application need to explicitly wake up the
22*4882a593Smuzhiyun  * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are
23*4882a593Smuzhiyun  * running the driver and the application on the same core, you should
24*4882a593Smuzhiyun  * use this option so that the kernel will yield to the user space
25*4882a593Smuzhiyun  * application.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #define XDP_USE_NEED_WAKEUP (1 << 3)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Flags for xsk_umem_config flags */
30*4882a593Smuzhiyun #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct sockaddr_xdp {
33*4882a593Smuzhiyun 	__u16 sxdp_family;
34*4882a593Smuzhiyun 	__u16 sxdp_flags;
35*4882a593Smuzhiyun 	__u32 sxdp_ifindex;
36*4882a593Smuzhiyun 	__u32 sxdp_queue_id;
37*4882a593Smuzhiyun 	__u32 sxdp_shared_umem_fd;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* XDP_RING flags */
41*4882a593Smuzhiyun #define XDP_RING_NEED_WAKEUP (1 << 0)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct xdp_ring_offset {
44*4882a593Smuzhiyun 	__u64 producer;
45*4882a593Smuzhiyun 	__u64 consumer;
46*4882a593Smuzhiyun 	__u64 desc;
47*4882a593Smuzhiyun 	__u64 flags;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun struct xdp_mmap_offsets {
51*4882a593Smuzhiyun 	struct xdp_ring_offset rx;
52*4882a593Smuzhiyun 	struct xdp_ring_offset tx;
53*4882a593Smuzhiyun 	struct xdp_ring_offset fr; /* Fill */
54*4882a593Smuzhiyun 	struct xdp_ring_offset cr; /* Completion */
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* XDP socket options */
58*4882a593Smuzhiyun #define XDP_MMAP_OFFSETS		1
59*4882a593Smuzhiyun #define XDP_RX_RING			2
60*4882a593Smuzhiyun #define XDP_TX_RING			3
61*4882a593Smuzhiyun #define XDP_UMEM_REG			4
62*4882a593Smuzhiyun #define XDP_UMEM_FILL_RING		5
63*4882a593Smuzhiyun #define XDP_UMEM_COMPLETION_RING	6
64*4882a593Smuzhiyun #define XDP_STATISTICS			7
65*4882a593Smuzhiyun #define XDP_OPTIONS			8
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun struct xdp_umem_reg {
68*4882a593Smuzhiyun 	__u64 addr; /* Start of packet data area */
69*4882a593Smuzhiyun 	__u64 len; /* Length of packet data area */
70*4882a593Smuzhiyun 	__u32 chunk_size;
71*4882a593Smuzhiyun 	__u32 headroom;
72*4882a593Smuzhiyun 	__u32 flags;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun struct xdp_statistics {
76*4882a593Smuzhiyun 	__u64 rx_dropped; /* Dropped for other reasons */
77*4882a593Smuzhiyun 	__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
78*4882a593Smuzhiyun 	__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
79*4882a593Smuzhiyun 	__u64 rx_ring_full; /* Dropped due to rx ring being full */
80*4882a593Smuzhiyun 	__u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
81*4882a593Smuzhiyun 	__u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct xdp_options {
85*4882a593Smuzhiyun 	__u32 flags;
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Flags for the flags field of struct xdp_options */
89*4882a593Smuzhiyun #define XDP_OPTIONS_ZEROCOPY (1 << 0)
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* Pgoff for mmaping the rings */
92*4882a593Smuzhiyun #define XDP_PGOFF_RX_RING			  0
93*4882a593Smuzhiyun #define XDP_PGOFF_TX_RING		 0x80000000
94*4882a593Smuzhiyun #define XDP_UMEM_PGOFF_FILL_RING	0x100000000ULL
95*4882a593Smuzhiyun #define XDP_UMEM_PGOFF_COMPLETION_RING	0x180000000ULL
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Masks for unaligned chunks mode */
98*4882a593Smuzhiyun #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48
99*4882a593Smuzhiyun #define XSK_UNALIGNED_BUF_ADDR_MASK \
100*4882a593Smuzhiyun 	((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1)
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* Rx/Tx descriptor */
103*4882a593Smuzhiyun struct xdp_desc {
104*4882a593Smuzhiyun 	__u64 addr;
105*4882a593Smuzhiyun 	__u32 len;
106*4882a593Smuzhiyun 	__u32 options;
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* UMEM descriptor is __u64 */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #endif /* _LINUX_IF_XDP_H */
112