1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* XDP user-space ring structure
3*4882a593Smuzhiyun * Copyright(c) 2018 Intel Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _LINUX_XSK_QUEUE_H
7*4882a593Smuzhiyun #define _LINUX_XSK_QUEUE_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/if_xdp.h>
11*4882a593Smuzhiyun #include <net/xdp_sock.h>
12*4882a593Smuzhiyun #include <net/xsk_buff_pool.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include "xsk.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun struct xdp_ring {
17*4882a593Smuzhiyun u32 producer ____cacheline_aligned_in_smp;
18*4882a593Smuzhiyun /* Hinder the adjacent cache prefetcher to prefetch the consumer
19*4882a593Smuzhiyun * pointer if the producer pointer is touched and vice versa.
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun u32 pad ____cacheline_aligned_in_smp;
22*4882a593Smuzhiyun u32 consumer ____cacheline_aligned_in_smp;
23*4882a593Smuzhiyun u32 flags;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Used for the RX and TX queues for packets */
27*4882a593Smuzhiyun struct xdp_rxtx_ring {
28*4882a593Smuzhiyun struct xdp_ring ptrs;
29*4882a593Smuzhiyun struct xdp_desc desc[] ____cacheline_aligned_in_smp;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Used for the fill and completion queues for buffers */
33*4882a593Smuzhiyun struct xdp_umem_ring {
34*4882a593Smuzhiyun struct xdp_ring ptrs;
35*4882a593Smuzhiyun u64 desc[] ____cacheline_aligned_in_smp;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun struct xsk_queue {
39*4882a593Smuzhiyun u32 ring_mask;
40*4882a593Smuzhiyun u32 nentries;
41*4882a593Smuzhiyun u32 cached_prod;
42*4882a593Smuzhiyun u32 cached_cons;
43*4882a593Smuzhiyun struct xdp_ring *ring;
44*4882a593Smuzhiyun u64 invalid_descs;
45*4882a593Smuzhiyun u64 queue_empty_descs;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* The structure of the shared state of the rings are the same as the
49*4882a593Smuzhiyun * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
50*4882a593Smuzhiyun * ring, the kernel is the producer and user space is the consumer. For
51*4882a593Smuzhiyun * the Tx and fill rings, the kernel is the consumer and user space is
52*4882a593Smuzhiyun * the producer.
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * producer consumer
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * if (LOAD ->consumer) { LOAD ->producer
57*4882a593Smuzhiyun * (A) smp_rmb() (C)
58*4882a593Smuzhiyun * STORE $data LOAD $data
59*4882a593Smuzhiyun * smp_wmb() (B) smp_mb() (D)
60*4882a593Smuzhiyun * STORE ->producer STORE ->consumer
61*4882a593Smuzhiyun * }
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * (A) pairs with (D), and (B) pairs with (C).
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * Starting with (B), it protects the data from being written after
66*4882a593Smuzhiyun * the producer pointer. If this barrier was missing, the consumer
67*4882a593Smuzhiyun * could observe the producer pointer being set and thus load the data
68*4882a593Smuzhiyun * before the producer has written the new data. The consumer would in
69*4882a593Smuzhiyun * this case load the old data.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * (C) protects the consumer from speculatively loading the data before
72*4882a593Smuzhiyun * the producer pointer actually has been read. If we do not have this
73*4882a593Smuzhiyun * barrier, some architectures could load old data as speculative loads
74*4882a593Smuzhiyun * are not discarded as the CPU does not know there is a dependency
75*4882a593Smuzhiyun * between ->producer and data.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * (A) is a control dependency that separates the load of ->consumer
78*4882a593Smuzhiyun * from the stores of $data. In case ->consumer indicates there is no
79*4882a593Smuzhiyun * room in the buffer to store $data we do not. So no barrier is needed.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * (D) protects the load of the data to be observed to happen after the
82*4882a593Smuzhiyun * store of the consumer pointer. If we did not have this memory
83*4882a593Smuzhiyun * barrier, the producer could observe the consumer pointer being set
84*4882a593Smuzhiyun * and overwrite the data with a new value before the consumer got the
85*4882a593Smuzhiyun * chance to read the old value. The consumer would thus miss reading
86*4882a593Smuzhiyun * the old entry and very likely read the new entry twice, once right
87*4882a593Smuzhiyun * now and again after circling through the ring.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* The operations on the rings are the following:
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * producer consumer
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * RESERVE entries PEEK in the ring for entries
95*4882a593Smuzhiyun * WRITE data into the ring READ data from the ring
96*4882a593Smuzhiyun * SUBMIT entries RELEASE entries
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * The producer reserves one or more entries in the ring. It can then
99*4882a593Smuzhiyun * fill in these entries and finally submit them so that they can be
100*4882a593Smuzhiyun * seen and read by the consumer.
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun * The consumer peeks into the ring to see if the producer has written
103*4882a593Smuzhiyun * any new entries. If so, the consumer can then read these entries
104*4882a593Smuzhiyun * and when it is done reading them release them back to the producer
105*4882a593Smuzhiyun * so that the producer can use these slots to fill in new entries.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * The function names below reflect these operations.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Functions that read and validate content from consumer rings. */
111*4882a593Smuzhiyun
xskq_cons_read_addr_unchecked(struct xsk_queue * q,u64 * addr)112*4882a593Smuzhiyun static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (q->cached_cons != q->cached_prod) {
117*4882a593Smuzhiyun u32 idx = q->cached_cons & q->ring_mask;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun *addr = ring->desc[idx];
120*4882a593Smuzhiyun return true;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun return false;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
xp_aligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)126*4882a593Smuzhiyun static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
127*4882a593Smuzhiyun struct xdp_desc *desc)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun u64 chunk, chunk_end;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun chunk = xp_aligned_extract_addr(pool, desc->addr);
132*4882a593Smuzhiyun if (likely(desc->len)) {
133*4882a593Smuzhiyun chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
134*4882a593Smuzhiyun if (chunk != chunk_end)
135*4882a593Smuzhiyun return false;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (chunk >= pool->addrs_cnt)
139*4882a593Smuzhiyun return false;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (desc->options)
142*4882a593Smuzhiyun return false;
143*4882a593Smuzhiyun return true;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
xp_unaligned_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)146*4882a593Smuzhiyun static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
147*4882a593Smuzhiyun struct xdp_desc *desc)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun u64 addr, base_addr;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun base_addr = xp_unaligned_extract_addr(desc->addr);
152*4882a593Smuzhiyun addr = xp_unaligned_add_offset_to_addr(desc->addr);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (desc->len > pool->chunk_size)
155*4882a593Smuzhiyun return false;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
158*4882a593Smuzhiyun xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
159*4882a593Smuzhiyun return false;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (desc->options)
162*4882a593Smuzhiyun return false;
163*4882a593Smuzhiyun return true;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
xp_validate_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)166*4882a593Smuzhiyun static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
167*4882a593Smuzhiyun struct xdp_desc *desc)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
170*4882a593Smuzhiyun xp_aligned_validate_desc(pool, desc);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
xskq_cons_is_valid_desc(struct xsk_queue * q,struct xdp_desc * d,struct xsk_buff_pool * pool)173*4882a593Smuzhiyun static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
174*4882a593Smuzhiyun struct xdp_desc *d,
175*4882a593Smuzhiyun struct xsk_buff_pool *pool)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun if (!xp_validate_desc(pool, d)) {
178*4882a593Smuzhiyun q->invalid_descs++;
179*4882a593Smuzhiyun return false;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun return true;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
xskq_cons_read_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)184*4882a593Smuzhiyun static inline bool xskq_cons_read_desc(struct xsk_queue *q,
185*4882a593Smuzhiyun struct xdp_desc *desc,
186*4882a593Smuzhiyun struct xsk_buff_pool *pool)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun while (q->cached_cons != q->cached_prod) {
189*4882a593Smuzhiyun struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
190*4882a593Smuzhiyun u32 idx = q->cached_cons & q->ring_mask;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun *desc = ring->desc[idx];
193*4882a593Smuzhiyun if (xskq_cons_is_valid_desc(q, desc, pool))
194*4882a593Smuzhiyun return true;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun q->cached_cons++;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return false;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Functions for consumers */
203*4882a593Smuzhiyun
__xskq_cons_release(struct xsk_queue * q)204*4882a593Smuzhiyun static inline void __xskq_cons_release(struct xsk_queue *q)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun smp_mb(); /* D, matches A */
207*4882a593Smuzhiyun WRITE_ONCE(q->ring->consumer, q->cached_cons);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
__xskq_cons_peek(struct xsk_queue * q)210*4882a593Smuzhiyun static inline void __xskq_cons_peek(struct xsk_queue *q)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun /* Refresh the local pointer */
213*4882a593Smuzhiyun q->cached_prod = READ_ONCE(q->ring->producer);
214*4882a593Smuzhiyun smp_rmb(); /* C, matches B */
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
xskq_cons_get_entries(struct xsk_queue * q)217*4882a593Smuzhiyun static inline void xskq_cons_get_entries(struct xsk_queue *q)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun __xskq_cons_release(q);
220*4882a593Smuzhiyun __xskq_cons_peek(q);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
xskq_cons_has_entries(struct xsk_queue * q,u32 cnt)223*4882a593Smuzhiyun static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun u32 entries = q->cached_prod - q->cached_cons;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (entries >= cnt)
228*4882a593Smuzhiyun return true;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun __xskq_cons_peek(q);
231*4882a593Smuzhiyun entries = q->cached_prod - q->cached_cons;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return entries >= cnt;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
xskq_cons_peek_addr_unchecked(struct xsk_queue * q,u64 * addr)236*4882a593Smuzhiyun static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun if (q->cached_prod == q->cached_cons)
239*4882a593Smuzhiyun xskq_cons_get_entries(q);
240*4882a593Smuzhiyun return xskq_cons_read_addr_unchecked(q, addr);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
xskq_cons_peek_desc(struct xsk_queue * q,struct xdp_desc * desc,struct xsk_buff_pool * pool)243*4882a593Smuzhiyun static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
244*4882a593Smuzhiyun struct xdp_desc *desc,
245*4882a593Smuzhiyun struct xsk_buff_pool *pool)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun if (q->cached_prod == q->cached_cons)
248*4882a593Smuzhiyun xskq_cons_get_entries(q);
249*4882a593Smuzhiyun return xskq_cons_read_desc(q, desc, pool);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
xskq_cons_release(struct xsk_queue * q)252*4882a593Smuzhiyun static inline void xskq_cons_release(struct xsk_queue *q)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun /* To improve performance, only update local state here.
255*4882a593Smuzhiyun * Reflect this to global state when we get new entries
256*4882a593Smuzhiyun * from the ring in xskq_cons_get_entries() and whenever
257*4882a593Smuzhiyun * Rx or Tx processing are completed in the NAPI loop.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun q->cached_cons++;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
xskq_cons_is_full(struct xsk_queue * q)262*4882a593Smuzhiyun static inline bool xskq_cons_is_full(struct xsk_queue *q)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun /* No barriers needed since data is not accessed */
265*4882a593Smuzhiyun return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
266*4882a593Smuzhiyun q->nentries;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
xskq_cons_present_entries(struct xsk_queue * q)269*4882a593Smuzhiyun static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun /* No barriers needed since data is not accessed */
272*4882a593Smuzhiyun return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* Functions for producers */
276*4882a593Smuzhiyun
xskq_prod_is_full(struct xsk_queue * q)277*4882a593Smuzhiyun static inline bool xskq_prod_is_full(struct xsk_queue *q)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (free_entries)
282*4882a593Smuzhiyun return false;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Refresh the local tail pointer */
285*4882a593Smuzhiyun q->cached_cons = READ_ONCE(q->ring->consumer);
286*4882a593Smuzhiyun free_entries = q->nentries - (q->cached_prod - q->cached_cons);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun return !free_entries;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
xskq_prod_cancel(struct xsk_queue * q)291*4882a593Smuzhiyun static inline void xskq_prod_cancel(struct xsk_queue *q)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun q->cached_prod--;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
xskq_prod_reserve(struct xsk_queue * q)296*4882a593Smuzhiyun static inline int xskq_prod_reserve(struct xsk_queue *q)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun if (xskq_prod_is_full(q))
299*4882a593Smuzhiyun return -ENOSPC;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* A, matches D */
302*4882a593Smuzhiyun q->cached_prod++;
303*4882a593Smuzhiyun return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
xskq_prod_reserve_addr(struct xsk_queue * q,u64 addr)306*4882a593Smuzhiyun static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (xskq_prod_is_full(q))
311*4882a593Smuzhiyun return -ENOSPC;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* A, matches D */
314*4882a593Smuzhiyun ring->desc[q->cached_prod++ & q->ring_mask] = addr;
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
xskq_prod_reserve_desc(struct xsk_queue * q,u64 addr,u32 len)318*4882a593Smuzhiyun static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
319*4882a593Smuzhiyun u64 addr, u32 len)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
322*4882a593Smuzhiyun u32 idx;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (xskq_prod_is_full(q))
325*4882a593Smuzhiyun return -ENOSPC;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* A, matches D */
328*4882a593Smuzhiyun idx = q->cached_prod++ & q->ring_mask;
329*4882a593Smuzhiyun ring->desc[idx].addr = addr;
330*4882a593Smuzhiyun ring->desc[idx].len = len;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
__xskq_prod_submit(struct xsk_queue * q,u32 idx)335*4882a593Smuzhiyun static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun smp_wmb(); /* B, matches C */
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun WRITE_ONCE(q->ring->producer, idx);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
xskq_prod_submit(struct xsk_queue * q)342*4882a593Smuzhiyun static inline void xskq_prod_submit(struct xsk_queue *q)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun __xskq_prod_submit(q, q->cached_prod);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
xskq_prod_submit_addr(struct xsk_queue * q,u64 addr)347*4882a593Smuzhiyun static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
350*4882a593Smuzhiyun u32 idx = q->ring->producer;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun ring->desc[idx++ & q->ring_mask] = addr;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun __xskq_prod_submit(q, idx);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
xskq_prod_submit_n(struct xsk_queue * q,u32 nb_entries)357*4882a593Smuzhiyun static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun __xskq_prod_submit(q, q->ring->producer + nb_entries);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
xskq_prod_is_empty(struct xsk_queue * q)362*4882a593Smuzhiyun static inline bool xskq_prod_is_empty(struct xsk_queue *q)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun /* No barriers needed since data is not accessed */
365*4882a593Smuzhiyun return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* For both producers and consumers */
369*4882a593Smuzhiyun
xskq_nb_invalid_descs(struct xsk_queue * q)370*4882a593Smuzhiyun static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun return q ? q->invalid_descs : 0;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
xskq_nb_queue_empty_descs(struct xsk_queue * q)375*4882a593Smuzhiyun static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun return q ? q->queue_empty_descs : 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
381*4882a593Smuzhiyun void xskq_destroy(struct xsk_queue *q_ops);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #endif /* _LINUX_XSK_QUEUE_H */
384