xref: /OK3568_Linux_fs/kernel/include/net/xsk_buff_pool.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright(c) 2020 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #ifndef XSK_BUFF_POOL_H_
5*4882a593Smuzhiyun #define XSK_BUFF_POOL_H_
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/if_xdp.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/dma-mapping.h>
10*4882a593Smuzhiyun #include <net/xdp.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun struct xsk_buff_pool;
13*4882a593Smuzhiyun struct xdp_rxq_info;
14*4882a593Smuzhiyun struct xsk_queue;
15*4882a593Smuzhiyun struct xdp_desc;
16*4882a593Smuzhiyun struct xdp_umem;
17*4882a593Smuzhiyun struct xdp_sock;
18*4882a593Smuzhiyun struct device;
19*4882a593Smuzhiyun struct page;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun struct xdp_buff_xsk {
22*4882a593Smuzhiyun 	struct xdp_buff xdp;
23*4882a593Smuzhiyun 	dma_addr_t dma;
24*4882a593Smuzhiyun 	dma_addr_t frame_dma;
25*4882a593Smuzhiyun 	struct xsk_buff_pool *pool;
26*4882a593Smuzhiyun 	bool unaligned;
27*4882a593Smuzhiyun 	u64 orig_addr;
28*4882a593Smuzhiyun 	struct list_head free_list_node;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct xsk_dma_map {
32*4882a593Smuzhiyun 	dma_addr_t *dma_pages;
33*4882a593Smuzhiyun 	struct device *dev;
34*4882a593Smuzhiyun 	struct net_device *netdev;
35*4882a593Smuzhiyun 	refcount_t users;
36*4882a593Smuzhiyun 	struct list_head list; /* Protected by the RTNL_LOCK */
37*4882a593Smuzhiyun 	u32 dma_pages_cnt;
38*4882a593Smuzhiyun 	bool dma_need_sync;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun struct xsk_buff_pool {
42*4882a593Smuzhiyun 	/* Members only used in the control path first. */
43*4882a593Smuzhiyun 	struct device *dev;
44*4882a593Smuzhiyun 	struct net_device *netdev;
45*4882a593Smuzhiyun 	struct list_head xsk_tx_list;
46*4882a593Smuzhiyun 	/* Protects modifications to the xsk_tx_list */
47*4882a593Smuzhiyun 	spinlock_t xsk_tx_list_lock;
48*4882a593Smuzhiyun 	refcount_t users;
49*4882a593Smuzhiyun 	struct xdp_umem *umem;
50*4882a593Smuzhiyun 	struct work_struct work;
51*4882a593Smuzhiyun 	struct list_head free_list;
52*4882a593Smuzhiyun 	u32 heads_cnt;
53*4882a593Smuzhiyun 	u16 queue_id;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Data path members as close to free_heads at the end as possible. */
56*4882a593Smuzhiyun 	struct xsk_queue *fq ____cacheline_aligned_in_smp;
57*4882a593Smuzhiyun 	struct xsk_queue *cq;
58*4882a593Smuzhiyun 	/* For performance reasons, each buff pool has its own array of dma_pages
59*4882a593Smuzhiyun 	 * even when they are identical.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	dma_addr_t *dma_pages;
62*4882a593Smuzhiyun 	struct xdp_buff_xsk *heads;
63*4882a593Smuzhiyun 	u64 chunk_mask;
64*4882a593Smuzhiyun 	u64 addrs_cnt;
65*4882a593Smuzhiyun 	u32 free_list_cnt;
66*4882a593Smuzhiyun 	u32 dma_pages_cnt;
67*4882a593Smuzhiyun 	u32 free_heads_cnt;
68*4882a593Smuzhiyun 	u32 headroom;
69*4882a593Smuzhiyun 	u32 chunk_size;
70*4882a593Smuzhiyun 	u32 frame_len;
71*4882a593Smuzhiyun 	u8 cached_need_wakeup;
72*4882a593Smuzhiyun 	bool uses_need_wakeup;
73*4882a593Smuzhiyun 	bool dma_need_sync;
74*4882a593Smuzhiyun 	bool unaligned;
75*4882a593Smuzhiyun 	void *addrs;
76*4882a593Smuzhiyun 	/* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
77*4882a593Smuzhiyun 	 * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
78*4882a593Smuzhiyun 	 * sockets share a single cq when the same netdev and queue id is shared.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	spinlock_t cq_lock;
81*4882a593Smuzhiyun 	struct xdp_buff_xsk *free_heads[];
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* AF_XDP core. */
85*4882a593Smuzhiyun struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
86*4882a593Smuzhiyun 						struct xdp_umem *umem);
87*4882a593Smuzhiyun int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
88*4882a593Smuzhiyun 		  u16 queue_id, u16 flags);
89*4882a593Smuzhiyun int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs,
90*4882a593Smuzhiyun 			 struct net_device *dev, u16 queue_id);
91*4882a593Smuzhiyun void xp_destroy(struct xsk_buff_pool *pool);
92*4882a593Smuzhiyun void xp_release(struct xdp_buff_xsk *xskb);
93*4882a593Smuzhiyun void xp_get_pool(struct xsk_buff_pool *pool);
94*4882a593Smuzhiyun bool xp_put_pool(struct xsk_buff_pool *pool);
95*4882a593Smuzhiyun void xp_clear_dev(struct xsk_buff_pool *pool);
96*4882a593Smuzhiyun void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
97*4882a593Smuzhiyun void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* AF_XDP, and XDP core. */
100*4882a593Smuzhiyun void xp_free(struct xdp_buff_xsk *xskb);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* AF_XDP ZC drivers, via xdp_sock_buff.h */
103*4882a593Smuzhiyun void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq);
104*4882a593Smuzhiyun int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
105*4882a593Smuzhiyun 	       unsigned long attrs, struct page **pages, u32 nr_pages);
106*4882a593Smuzhiyun void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs);
107*4882a593Smuzhiyun struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool);
108*4882a593Smuzhiyun bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count);
109*4882a593Smuzhiyun void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr);
110*4882a593Smuzhiyun dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr);
xp_get_dma(struct xdp_buff_xsk * xskb)111*4882a593Smuzhiyun static inline dma_addr_t xp_get_dma(struct xdp_buff_xsk *xskb)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	return xskb->dma;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
xp_get_frame_dma(struct xdp_buff_xsk * xskb)116*4882a593Smuzhiyun static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	return xskb->frame_dma;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
xp_dma_sync_for_cpu(struct xdp_buff_xsk * xskb)122*4882a593Smuzhiyun static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	xp_dma_sync_for_cpu_slow(xskb);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
128*4882a593Smuzhiyun 				 size_t size);
xp_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)129*4882a593Smuzhiyun static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
130*4882a593Smuzhiyun 					  dma_addr_t dma, size_t size)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	if (!pool->dma_need_sync)
133*4882a593Smuzhiyun 		return;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	xp_dma_sync_for_device_slow(pool, dma, size);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /* Masks for xdp_umem_page flags.
139*4882a593Smuzhiyun  * The low 12-bits of the addr will be 0 since this is the page address, so we
140*4882a593Smuzhiyun  * can use them for flags.
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun #define XSK_NEXT_PG_CONTIG_SHIFT 0
143*4882a593Smuzhiyun #define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
144*4882a593Smuzhiyun 
xp_desc_crosses_non_contig_pg(struct xsk_buff_pool * pool,u64 addr,u32 len)145*4882a593Smuzhiyun static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
146*4882a593Smuzhiyun 						 u64 addr, u32 len)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (likely(!cross_pg))
151*4882a593Smuzhiyun 		return false;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (pool->dma_pages_cnt) {
154*4882a593Smuzhiyun 		return !(pool->dma_pages[addr >> PAGE_SHIFT] &
155*4882a593Smuzhiyun 			 XSK_NEXT_PG_CONTIG_MASK);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* skb path */
159*4882a593Smuzhiyun 	return addr + len > pool->addrs_cnt;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
xp_aligned_extract_addr(struct xsk_buff_pool * pool,u64 addr)162*4882a593Smuzhiyun static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	return addr & pool->chunk_mask;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
xp_unaligned_extract_addr(u64 addr)167*4882a593Smuzhiyun static inline u64 xp_unaligned_extract_addr(u64 addr)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	return addr & XSK_UNALIGNED_BUF_ADDR_MASK;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
xp_unaligned_extract_offset(u64 addr)172*4882a593Smuzhiyun static inline u64 xp_unaligned_extract_offset(u64 addr)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
xp_unaligned_add_offset_to_addr(u64 addr)177*4882a593Smuzhiyun static inline u64 xp_unaligned_add_offset_to_addr(u64 addr)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	return xp_unaligned_extract_addr(addr) +
180*4882a593Smuzhiyun 		xp_unaligned_extract_offset(addr);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #endif /* XSK_BUFF_POOL_H_ */
184