xref: /OK3568_Linux_fs/kernel/include/linux/skb_array.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	Definitions for the 'struct skb_array' datastructure.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *	Author:
6*4882a593Smuzhiyun  *		Michael S. Tsirkin <mst@redhat.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *	Copyright (C) 2016 Red Hat, Inc.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *	Limited-size FIFO of skbs. Can be used more or less whenever
11*4882a593Smuzhiyun  *	sk_buff_head can be used, except you need to know the queue size in
12*4882a593Smuzhiyun  *	advance.
13*4882a593Smuzhiyun  *	Implemented as a type-safe wrapper around ptr_ring.
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #ifndef _LINUX_SKB_ARRAY_H
17*4882a593Smuzhiyun #define _LINUX_SKB_ARRAY_H 1
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #ifdef __KERNEL__
20*4882a593Smuzhiyun #include <linux/ptr_ring.h>
21*4882a593Smuzhiyun #include <linux/skbuff.h>
22*4882a593Smuzhiyun #include <linux/if_vlan.h>
23*4882a593Smuzhiyun #endif
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct skb_array {
26*4882a593Smuzhiyun 	struct ptr_ring ring;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Might be slightly faster than skb_array_full below, but callers invoking
30*4882a593Smuzhiyun  * this in a loop must use a compiler barrier, for example cpu_relax().
31*4882a593Smuzhiyun  */
__skb_array_full(struct skb_array * a)32*4882a593Smuzhiyun static inline bool __skb_array_full(struct skb_array *a)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	return __ptr_ring_full(&a->ring);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
skb_array_full(struct skb_array * a)37*4882a593Smuzhiyun static inline bool skb_array_full(struct skb_array *a)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	return ptr_ring_full(&a->ring);
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
skb_array_produce(struct skb_array * a,struct sk_buff * skb)42*4882a593Smuzhiyun static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	return ptr_ring_produce(&a->ring, skb);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
skb_array_produce_irq(struct skb_array * a,struct sk_buff * skb)47*4882a593Smuzhiyun static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	return ptr_ring_produce_irq(&a->ring, skb);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
skb_array_produce_bh(struct skb_array * a,struct sk_buff * skb)52*4882a593Smuzhiyun static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	return ptr_ring_produce_bh(&a->ring, skb);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
skb_array_produce_any(struct skb_array * a,struct sk_buff * skb)57*4882a593Smuzhiyun static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return ptr_ring_produce_any(&a->ring, skb);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Might be slightly faster than skb_array_empty below, but only safe if the
63*4882a593Smuzhiyun  * array is never resized. Also, callers invoking this in a loop must take care
64*4882a593Smuzhiyun  * to use a compiler barrier, for example cpu_relax().
65*4882a593Smuzhiyun  */
__skb_array_empty(struct skb_array * a)66*4882a593Smuzhiyun static inline bool __skb_array_empty(struct skb_array *a)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	return __ptr_ring_empty(&a->ring);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
__skb_array_peek(struct skb_array * a)71*4882a593Smuzhiyun static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return __ptr_ring_peek(&a->ring);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
skb_array_empty(struct skb_array * a)76*4882a593Smuzhiyun static inline bool skb_array_empty(struct skb_array *a)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	return ptr_ring_empty(&a->ring);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
skb_array_empty_bh(struct skb_array * a)81*4882a593Smuzhiyun static inline bool skb_array_empty_bh(struct skb_array *a)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	return ptr_ring_empty_bh(&a->ring);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
skb_array_empty_irq(struct skb_array * a)86*4882a593Smuzhiyun static inline bool skb_array_empty_irq(struct skb_array *a)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	return ptr_ring_empty_irq(&a->ring);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
skb_array_empty_any(struct skb_array * a)91*4882a593Smuzhiyun static inline bool skb_array_empty_any(struct skb_array *a)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	return ptr_ring_empty_any(&a->ring);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
__skb_array_consume(struct skb_array * a)96*4882a593Smuzhiyun static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return __ptr_ring_consume(&a->ring);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
skb_array_consume(struct skb_array * a)101*4882a593Smuzhiyun static inline struct sk_buff *skb_array_consume(struct skb_array *a)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	return ptr_ring_consume(&a->ring);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
skb_array_consume_batched(struct skb_array * a,struct sk_buff ** array,int n)106*4882a593Smuzhiyun static inline int skb_array_consume_batched(struct skb_array *a,
107*4882a593Smuzhiyun 					    struct sk_buff **array, int n)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	return ptr_ring_consume_batched(&a->ring, (void **)array, n);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
skb_array_consume_irq(struct skb_array * a)112*4882a593Smuzhiyun static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return ptr_ring_consume_irq(&a->ring);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
skb_array_consume_batched_irq(struct skb_array * a,struct sk_buff ** array,int n)117*4882a593Smuzhiyun static inline int skb_array_consume_batched_irq(struct skb_array *a,
118*4882a593Smuzhiyun 						struct sk_buff **array, int n)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
skb_array_consume_any(struct skb_array * a)123*4882a593Smuzhiyun static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	return ptr_ring_consume_any(&a->ring);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
skb_array_consume_batched_any(struct skb_array * a,struct sk_buff ** array,int n)128*4882a593Smuzhiyun static inline int skb_array_consume_batched_any(struct skb_array *a,
129*4882a593Smuzhiyun 						struct sk_buff **array, int n)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 
skb_array_consume_bh(struct skb_array * a)135*4882a593Smuzhiyun static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	return ptr_ring_consume_bh(&a->ring);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
skb_array_consume_batched_bh(struct skb_array * a,struct sk_buff ** array,int n)140*4882a593Smuzhiyun static inline int skb_array_consume_batched_bh(struct skb_array *a,
141*4882a593Smuzhiyun 					       struct sk_buff **array, int n)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
__skb_array_len_with_tag(struct sk_buff * skb)146*4882a593Smuzhiyun static inline int __skb_array_len_with_tag(struct sk_buff *skb)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	if (likely(skb)) {
149*4882a593Smuzhiyun 		int len = skb->len;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		if (skb_vlan_tag_present(skb))
152*4882a593Smuzhiyun 			len += VLAN_HLEN;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		return len;
155*4882a593Smuzhiyun 	} else {
156*4882a593Smuzhiyun 		return 0;
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
skb_array_peek_len(struct skb_array * a)160*4882a593Smuzhiyun static inline int skb_array_peek_len(struct skb_array *a)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
skb_array_peek_len_irq(struct skb_array * a)165*4882a593Smuzhiyun static inline int skb_array_peek_len_irq(struct skb_array *a)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
skb_array_peek_len_bh(struct skb_array * a)170*4882a593Smuzhiyun static inline int skb_array_peek_len_bh(struct skb_array *a)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
skb_array_peek_len_any(struct skb_array * a)175*4882a593Smuzhiyun static inline int skb_array_peek_len_any(struct skb_array *a)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
skb_array_init(struct skb_array * a,int size,gfp_t gfp)180*4882a593Smuzhiyun static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	return ptr_ring_init(&a->ring, size, gfp);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
__skb_array_destroy_skb(void * ptr)185*4882a593Smuzhiyun static void __skb_array_destroy_skb(void *ptr)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	kfree_skb(ptr);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
skb_array_unconsume(struct skb_array * a,struct sk_buff ** skbs,int n)190*4882a593Smuzhiyun static inline void skb_array_unconsume(struct skb_array *a,
191*4882a593Smuzhiyun 				       struct sk_buff **skbs, int n)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
skb_array_resize(struct skb_array * a,int size,gfp_t gfp)196*4882a593Smuzhiyun static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
skb_array_resize_multiple(struct skb_array ** rings,int nrings,unsigned int size,gfp_t gfp)201*4882a593Smuzhiyun static inline int skb_array_resize_multiple(struct skb_array **rings,
202*4882a593Smuzhiyun 					    int nrings, unsigned int size,
203*4882a593Smuzhiyun 					    gfp_t gfp)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct skb_array, ring));
206*4882a593Smuzhiyun 	return ptr_ring_resize_multiple((struct ptr_ring **)rings,
207*4882a593Smuzhiyun 					nrings, size, gfp,
208*4882a593Smuzhiyun 					__skb_array_destroy_skb);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
skb_array_cleanup(struct skb_array * a)211*4882a593Smuzhiyun static inline void skb_array_cleanup(struct skb_array *a)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #endif /* _LINUX_SKB_ARRAY_H  */
217