1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * bcm_ring.h : Ring context abstraction
3*4882a593Smuzhiyun * The ring context tracks the WRITE and READ indices where elements may be
4*4882a593Smuzhiyun * produced and consumed respectively. All elements in the ring need to be
5*4882a593Smuzhiyun * fixed size.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * NOTE: A ring of size N, may only hold N-1 elements.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (C) 2020, Broadcom.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
12*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
13*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
14*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
15*4882a593Smuzhiyun * following added to such license:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
18*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
19*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
20*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
21*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
22*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
23*4882a593Smuzhiyun * modifications of the software.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Dual:>>
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun #ifndef __bcm_ring_included__
29*4882a593Smuzhiyun #define __bcm_ring_included__
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * API Notes:
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Ring manipulation API allows for:
34*4882a593Smuzhiyun * Pending operations: Often before some work can be completed, it may be
35*4882a593Smuzhiyun * desired that several resources are available, e.g. space for production in
36*4882a593Smuzhiyun * a ring. Approaches such as, #1) reserve resources one by one and return them
37*4882a593Smuzhiyun * if another required resource is not available, or #2) employ a two pass
38*4882a593Smuzhiyun * algorithm of first testing whether all resources are available, have a
39*4882a593Smuzhiyun * an impact on performance critical code. The approach taken here is more akin
40*4882a593Smuzhiyun * to approach #2, where a test for resource availability essentially also
41*4882a593Smuzhiyun * provides the index for production in an un-committed state.
42*4882a593Smuzhiyun * The same approach is taken for the consumer side.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * - Pending production: Fetch the next index where a ring element may be
45*4882a593Smuzhiyun * produced. The caller may not commit the WRITE of the element.
46*4882a593Smuzhiyun * - Pending consumption: Fetch the next index where a ring element may be
47*4882a593Smuzhiyun * consumed. The caller may not commut the READ of the element.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Producer side API:
50*4882a593Smuzhiyun * - bcm_ring_is_full : Test whether ring is full
51*4882a593Smuzhiyun * - bcm_ring_prod : Fetch index where an element may be produced (commit)
52*4882a593Smuzhiyun * - bcm_ring_prod_pend: Fetch index where an element may be produced (pending)
53*4882a593Smuzhiyun * - bcm_ring_prod_done: Commit a previous pending produce fetch
54*4882a593Smuzhiyun * - bcm_ring_prod_avail: Fetch total number free slots eligible for production
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Consumer side API:
57*4882a593Smuzhiyun * - bcm_ring_is_empty : Test whether ring is empty
58*4882a593Smuzhiyun * - bcm_ring_cons : Fetch index where an element may be consumed (commit)
59*4882a593Smuzhiyun * - bcm_ring_cons_pend: Fetch index where an element may be consumed (pending)
60*4882a593Smuzhiyun * - bcm_ring_cons_done: Commit a previous pending consume fetch
61*4882a593Smuzhiyun * - bcm_ring_cons_avail: Fetch total number elements eligible for consumption
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * - bcm_ring_sync_read: Sync read offset in peer ring, from local ring
64*4882a593Smuzhiyun * - bcm_ring_sync_write: Sync write offset in peer ring, from local ring
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * +----------------------------------------------------------------------------
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun * Design Notes:
69*4882a593Smuzhiyun * Following items are not tracked in a ring context (design decision)
70*4882a593Smuzhiyun * - width of a ring element.
71*4882a593Smuzhiyun * - depth of the ring.
72*4882a593Smuzhiyun * - base of the buffer, where the elements are stored.
73*4882a593Smuzhiyun * - count of number of free slots in the ring
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * Implementation Notes:
76*4882a593Smuzhiyun * - When BCM_RING_DEBUG is enabled, need explicit bcm_ring_init().
77*4882a593Smuzhiyun * - BCM_RING_EMPTY and BCM_RING_FULL are (-1)
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun * +----------------------------------------------------------------------------
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Usage Notes:
82*4882a593Smuzhiyun * An application may incarnate a ring of some fixed sized elements, by defining
83*4882a593Smuzhiyun * - a ring data buffer to store the ring elements.
84*4882a593Smuzhiyun * - depth of the ring (max number of elements managed by ring context).
85*4882a593Smuzhiyun * Preferrably, depth may be represented as a constant.
86*4882a593Smuzhiyun * - width of a ring element: to be used in pointer arithmetic with the ring's
87*4882a593Smuzhiyun * data buffer base and an index to fetch the ring element.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * Use bcm_workq_t to instantiate a pair of workq constructs, one for the
90*4882a593Smuzhiyun * producer and the other for the consumer, both pointing to the same circular
91*4882a593Smuzhiyun * buffer. The producer may operate on it's own local workq and flush the write
92*4882a593Smuzhiyun * index to the consumer. Likewise the consumer may use its local workq and
93*4882a593Smuzhiyun * flush the read index to the producer. This way we do not repeatedly access
94*4882a593Smuzhiyun * the peer's context. The two peers may reside on different CPU cores with a
95*4882a593Smuzhiyun * private L1 data cache.
96*4882a593Smuzhiyun * +----------------------------------------------------------------------------
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*-
99*4882a593Smuzhiyun * vim: set ts=4 noet sw=4 tw=80:
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * +----------------------------------------------------------------------------
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #ifdef ____cacheline_aligned
105*4882a593Smuzhiyun #define __ring_aligned ____cacheline_aligned
106*4882a593Smuzhiyun #else
107*4882a593Smuzhiyun #define __ring_aligned
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Conditional compile for debug */
111*4882a593Smuzhiyun /* #define BCM_RING_DEBUG */
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #define BCM_RING_EMPTY (-1)
114*4882a593Smuzhiyun #define BCM_RING_FULL (-1)
115*4882a593Smuzhiyun #define BCM_RING_NULL ((bcm_ring_t *)NULL)
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #if defined(BCM_RING_DEBUG)
118*4882a593Smuzhiyun #define RING_ASSERT(exp) ASSERT(exp)
119*4882a593Smuzhiyun #define BCM_RING_IS_VALID(ring) (((ring) != BCM_RING_NULL) && \
120*4882a593Smuzhiyun ((ring)->self == (ring)))
121*4882a593Smuzhiyun #else /* ! BCM_RING_DEBUG */
122*4882a593Smuzhiyun #define RING_ASSERT(exp) do {} while (0)
123*4882a593Smuzhiyun #define BCM_RING_IS_VALID(ring) ((ring) != BCM_RING_NULL)
124*4882a593Smuzhiyun #endif /* ! BCM_RING_DEBUG */
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun #define BCM_RING_SIZE_IS_VALID(ring_size) ((ring_size) > 0)
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /*
129*4882a593Smuzhiyun * +----------------------------------------------------------------------------
130*4882a593Smuzhiyun * Ring Context
131*4882a593Smuzhiyun * +----------------------------------------------------------------------------
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun typedef struct bcm_ring { /* Ring context */
134*4882a593Smuzhiyun #if defined(BCM_RING_DEBUG)
135*4882a593Smuzhiyun struct bcm_ring *self; /* ptr to self for IS VALID test */
136*4882a593Smuzhiyun #endif /* BCM_RING_DEBUG */
137*4882a593Smuzhiyun int write __ring_aligned; /* WRITE index in a circular ring */
138*4882a593Smuzhiyun int read __ring_aligned; /* READ index in a circular ring */
139*4882a593Smuzhiyun } bcm_ring_t;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static INLINE void bcm_ring_init(bcm_ring_t *ring);
142*4882a593Smuzhiyun static INLINE void bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from);
143*4882a593Smuzhiyun static INLINE bool bcm_ring_is_empty(const bcm_ring_t *ring);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun static INLINE int __bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun static INLINE bool __bcm_ring_full(const bcm_ring_t *ring, int next_write);
148*4882a593Smuzhiyun static INLINE bool bcm_ring_is_full(bcm_ring_t *ring, const int ring_size);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static INLINE void bcm_ring_prod_done(bcm_ring_t *ring, int write);
151*4882a593Smuzhiyun static INLINE int bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write,
152*4882a593Smuzhiyun const int ring_size);
153*4882a593Smuzhiyun static INLINE int bcm_ring_prod(bcm_ring_t *ring, const int ring_size);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun static INLINE void bcm_ring_cons_done(bcm_ring_t *ring, int read);
156*4882a593Smuzhiyun static INLINE int bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read,
157*4882a593Smuzhiyun const int ring_size);
158*4882a593Smuzhiyun static INLINE int bcm_ring_cons(bcm_ring_t *ring, const int ring_size);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static INLINE void bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self);
161*4882a593Smuzhiyun static INLINE void bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static INLINE int bcm_ring_prod_avail(const bcm_ring_t *ring,
164*4882a593Smuzhiyun const int ring_size);
165*4882a593Smuzhiyun static INLINE int bcm_ring_cons_avail(const bcm_ring_t *ring,
166*4882a593Smuzhiyun const int ring_size);
167*4882a593Smuzhiyun static INLINE void bcm_ring_cons_all(bcm_ring_t *ring);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun * bcm_ring_init - initialize a ring context.
171*4882a593Smuzhiyun * @ring: pointer to a ring context
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun static INLINE void
bcm_ring_init(bcm_ring_t * ring)174*4882a593Smuzhiyun bcm_ring_init(bcm_ring_t *ring)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun ASSERT(ring != (bcm_ring_t *)NULL);
177*4882a593Smuzhiyun #if defined(BCM_RING_DEBUG)
178*4882a593Smuzhiyun ring->self = ring;
179*4882a593Smuzhiyun #endif /* BCM_RING_DEBUG */
180*4882a593Smuzhiyun ring->write = 0;
181*4882a593Smuzhiyun ring->read = 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /**
185*4882a593Smuzhiyun * bcm_ring_copy - copy construct a ring
186*4882a593Smuzhiyun * @to: pointer to the new ring context
187*4882a593Smuzhiyun * @from: pointer to orig ring context
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun static INLINE void
bcm_ring_copy(bcm_ring_t * to,bcm_ring_t * from)190*4882a593Smuzhiyun bcm_ring_copy(bcm_ring_t *to, bcm_ring_t *from)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun bcm_ring_init(to);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun to->write = from->write;
195*4882a593Smuzhiyun to->read = from->read;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * bcm_ring_is_empty - "Boolean" test whether ring is empty.
200*4882a593Smuzhiyun * @ring: pointer to a ring context
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * PS. does not return BCM_RING_EMPTY value.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun static INLINE bool
bcm_ring_is_empty(const bcm_ring_t * ring)205*4882a593Smuzhiyun bcm_ring_is_empty(const bcm_ring_t *ring)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring));
208*4882a593Smuzhiyun return (ring->read == ring->write);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /**
212*4882a593Smuzhiyun * __bcm_ring_next_write - determine the index where the next write may occur
213*4882a593Smuzhiyun * (with wrap-around).
214*4882a593Smuzhiyun * @ring: pointer to a ring context
215*4882a593Smuzhiyun * @ring_size: size of the ring
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * PRIVATE INTERNAL USE ONLY.
218*4882a593Smuzhiyun */
219*4882a593Smuzhiyun static INLINE int
__bcm_ring_next_write(const bcm_ring_t * ring,const int ring_size)220*4882a593Smuzhiyun __bcm_ring_next_write(const bcm_ring_t *ring, const int ring_size)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
223*4882a593Smuzhiyun return ((ring->write + 1) % ring_size);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /**
227*4882a593Smuzhiyun * __bcm_ring_full - support function for ring full test.
228*4882a593Smuzhiyun * @ring: pointer to a ring context
229*4882a593Smuzhiyun * @next_write: next location in ring where an element is to be produced
230*4882a593Smuzhiyun *
231*4882a593Smuzhiyun * PRIVATE INTERNAL USE ONLY.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun static INLINE bool
__bcm_ring_full(const bcm_ring_t * ring,int next_write)234*4882a593Smuzhiyun __bcm_ring_full(const bcm_ring_t *ring, int next_write)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun return (next_write == ring->read);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * bcm_ring_is_full - "Boolean" test whether a ring is full.
241*4882a593Smuzhiyun * @ring: pointer to a ring context
242*4882a593Smuzhiyun * @ring_size: size of the ring
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun * PS. does not return BCM_RING_FULL value.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun static INLINE bool
bcm_ring_is_full(bcm_ring_t * ring,const int ring_size)247*4882a593Smuzhiyun bcm_ring_is_full(bcm_ring_t *ring, const int ring_size)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun int next_write;
250*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
251*4882a593Smuzhiyun next_write = __bcm_ring_next_write(ring, ring_size);
252*4882a593Smuzhiyun return __bcm_ring_full(ring, next_write);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /**
256*4882a593Smuzhiyun * bcm_ring_prod_done - commit a previously pending index where production
257*4882a593Smuzhiyun * was requested.
258*4882a593Smuzhiyun * @ring: pointer to a ring context
259*4882a593Smuzhiyun * @write: index into ring upto where production was done.
260*4882a593Smuzhiyun * +----------------------------------------------------------------------------
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun static INLINE void
bcm_ring_prod_done(bcm_ring_t * ring,int write)263*4882a593Smuzhiyun bcm_ring_prod_done(bcm_ring_t *ring, int write)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring));
266*4882a593Smuzhiyun ring->write = write;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /**
270*4882a593Smuzhiyun * bcm_ring_prod_pend - Fetch in "pend" mode, the index where an element may be
271*4882a593Smuzhiyun * produced.
272*4882a593Smuzhiyun * @ring: pointer to a ring context
273*4882a593Smuzhiyun * @pend_write: next index, after the returned index
274*4882a593Smuzhiyun * @ring_size: size of the ring
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun static INLINE int
bcm_ring_prod_pend(const bcm_ring_t * ring,int * pend_write,const int ring_size)277*4882a593Smuzhiyun bcm_ring_prod_pend(const bcm_ring_t *ring, int *pend_write, const int ring_size)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun int rtn;
280*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
281*4882a593Smuzhiyun *pend_write = __bcm_ring_next_write(ring, ring_size);
282*4882a593Smuzhiyun if (__bcm_ring_full(ring, *pend_write)) {
283*4882a593Smuzhiyun *pend_write = BCM_RING_FULL;
284*4882a593Smuzhiyun rtn = BCM_RING_FULL;
285*4882a593Smuzhiyun } else {
286*4882a593Smuzhiyun /* production is not committed, caller needs to explicitly commit */
287*4882a593Smuzhiyun rtn = ring->write;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun return rtn;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun * bcm_ring_prod - Fetch and "commit" the next index where a ring element may
294*4882a593Smuzhiyun * be produced.
295*4882a593Smuzhiyun * @ring: pointer to a ring context
296*4882a593Smuzhiyun * @ring_size: size of the ring
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun static INLINE int
bcm_ring_prod(bcm_ring_t * ring,const int ring_size)299*4882a593Smuzhiyun bcm_ring_prod(bcm_ring_t *ring, const int ring_size)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun int next_write, prod_write;
302*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun next_write = __bcm_ring_next_write(ring, ring_size);
305*4882a593Smuzhiyun if (__bcm_ring_full(ring, next_write)) {
306*4882a593Smuzhiyun prod_write = BCM_RING_FULL;
307*4882a593Smuzhiyun } else {
308*4882a593Smuzhiyun prod_write = ring->write;
309*4882a593Smuzhiyun bcm_ring_prod_done(ring, next_write); /* "commit" production */
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun return prod_write;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * bcm_ring_cons_done - commit a previously pending read
316*4882a593Smuzhiyun * @ring: pointer to a ring context
317*4882a593Smuzhiyun * @read: index upto which elements have been consumed.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun static INLINE void
bcm_ring_cons_done(bcm_ring_t * ring,int read)320*4882a593Smuzhiyun bcm_ring_cons_done(bcm_ring_t *ring, int read)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring));
323*4882a593Smuzhiyun ring->read = read;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * bcm_ring_cons_pend - fetch in "pend" mode, the next index where a ring
328*4882a593Smuzhiyun * element may be consumed.
329*4882a593Smuzhiyun * @ring: pointer to a ring context
330*4882a593Smuzhiyun * @pend_read: index into ring upto which elements may be consumed.
331*4882a593Smuzhiyun * @ring_size: size of the ring
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun static INLINE int
bcm_ring_cons_pend(const bcm_ring_t * ring,int * pend_read,const int ring_size)334*4882a593Smuzhiyun bcm_ring_cons_pend(const bcm_ring_t *ring, int *pend_read, const int ring_size)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun int rtn;
337*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
338*4882a593Smuzhiyun if (bcm_ring_is_empty(ring)) {
339*4882a593Smuzhiyun *pend_read = BCM_RING_EMPTY;
340*4882a593Smuzhiyun rtn = BCM_RING_EMPTY;
341*4882a593Smuzhiyun } else {
342*4882a593Smuzhiyun *pend_read = (ring->read + 1) % ring_size;
343*4882a593Smuzhiyun /* production is not committed, caller needs to explicitly commit */
344*4882a593Smuzhiyun rtn = ring->read;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun return rtn;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /**
350*4882a593Smuzhiyun * bcm_ring_cons - fetch and "commit" the next index where a ring element may
351*4882a593Smuzhiyun * be consumed.
352*4882a593Smuzhiyun * @ring: pointer to a ring context
353*4882a593Smuzhiyun * @ring_size: size of the ring
354*4882a593Smuzhiyun */
355*4882a593Smuzhiyun static INLINE int
bcm_ring_cons(bcm_ring_t * ring,const int ring_size)356*4882a593Smuzhiyun bcm_ring_cons(bcm_ring_t *ring, const int ring_size)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun int cons_read;
359*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
360*4882a593Smuzhiyun if (bcm_ring_is_empty(ring)) {
361*4882a593Smuzhiyun cons_read = BCM_RING_EMPTY;
362*4882a593Smuzhiyun } else {
363*4882a593Smuzhiyun cons_read = ring->read;
364*4882a593Smuzhiyun ring->read = (ring->read + 1) % ring_size; /* read is committed */
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun return cons_read;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun * bcm_ring_sync_read - on consumption, update peer's read index.
371*4882a593Smuzhiyun * @peer: pointer to peer's producer ring context
372*4882a593Smuzhiyun * @self: pointer to consumer's ring context
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun static INLINE void
bcm_ring_sync_read(bcm_ring_t * peer,const bcm_ring_t * self)375*4882a593Smuzhiyun bcm_ring_sync_read(bcm_ring_t *peer, const bcm_ring_t *self)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(peer));
378*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(self));
379*4882a593Smuzhiyun peer->read = self->read; /* flush read update to peer producer */
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun * bcm_ring_sync_write - on consumption, update peer's write index.
384*4882a593Smuzhiyun * @peer: pointer to peer's consumer ring context
385*4882a593Smuzhiyun * @self: pointer to producer's ring context
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun static INLINE void
bcm_ring_sync_write(bcm_ring_t * peer,const bcm_ring_t * self)388*4882a593Smuzhiyun bcm_ring_sync_write(bcm_ring_t *peer, const bcm_ring_t *self)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(peer));
391*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(self));
392*4882a593Smuzhiyun peer->write = self->write; /* flush write update to peer consumer */
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * bcm_ring_prod_avail - fetch total number of available empty slots in the
397*4882a593Smuzhiyun * ring for production.
398*4882a593Smuzhiyun * @ring: pointer to a ring context
399*4882a593Smuzhiyun * @ring_size: size of the ring
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun static INLINE int
bcm_ring_prod_avail(const bcm_ring_t * ring,const int ring_size)402*4882a593Smuzhiyun bcm_ring_prod_avail(const bcm_ring_t *ring, const int ring_size)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun int prod_avail;
405*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
406*4882a593Smuzhiyun if (ring->write >= ring->read) {
407*4882a593Smuzhiyun prod_avail = (ring_size - (ring->write - ring->read) - 1);
408*4882a593Smuzhiyun } else {
409*4882a593Smuzhiyun prod_avail = (ring->read - (ring->write + 1));
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun ASSERT(prod_avail < ring_size);
412*4882a593Smuzhiyun return prod_avail;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /**
416*4882a593Smuzhiyun * bcm_ring_cons_avail - fetch total number of available elements for consumption.
417*4882a593Smuzhiyun * @ring: pointer to a ring context
418*4882a593Smuzhiyun * @ring_size: size of the ring
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun static INLINE int
bcm_ring_cons_avail(const bcm_ring_t * ring,const int ring_size)421*4882a593Smuzhiyun bcm_ring_cons_avail(const bcm_ring_t *ring, const int ring_size)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun int cons_avail;
424*4882a593Smuzhiyun RING_ASSERT(BCM_RING_IS_VALID(ring) && BCM_RING_SIZE_IS_VALID(ring_size));
425*4882a593Smuzhiyun if (ring->read == ring->write) {
426*4882a593Smuzhiyun cons_avail = 0;
427*4882a593Smuzhiyun } else if (ring->read > ring->write) {
428*4882a593Smuzhiyun cons_avail = ((ring_size - ring->read) + ring->write);
429*4882a593Smuzhiyun } else {
430*4882a593Smuzhiyun cons_avail = ring->write - ring->read;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun ASSERT(cons_avail < ring_size);
433*4882a593Smuzhiyun return cons_avail;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /**
437*4882a593Smuzhiyun * bcm_ring_cons_all - set ring in state where all elements are consumed.
438*4882a593Smuzhiyun * @ring: pointer to a ring context
439*4882a593Smuzhiyun */
440*4882a593Smuzhiyun static INLINE void
bcm_ring_cons_all(bcm_ring_t * ring)441*4882a593Smuzhiyun bcm_ring_cons_all(bcm_ring_t *ring)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun ring->read = ring->write;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /**
447*4882a593Smuzhiyun * Work Queue
448*4882a593Smuzhiyun * A work Queue is composed of a ring of work items, of a specified depth.
449*4882a593Smuzhiyun * It HAS-A bcm_ring object, comprising of a RD and WR offset, to implement a
450*4882a593Smuzhiyun * producer/consumer circular ring.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun struct bcm_workq {
454*4882a593Smuzhiyun bcm_ring_t ring; /* Ring context abstraction */
455*4882a593Smuzhiyun struct bcm_workq *peer; /* Peer workq context */
456*4882a593Smuzhiyun void *buffer; /* Buffer storage for work items in workQ */
457*4882a593Smuzhiyun int ring_size; /* Depth of workQ */
458*4882a593Smuzhiyun } __ring_aligned;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun typedef struct bcm_workq bcm_workq_t;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* #define BCM_WORKQ_DEBUG */
463*4882a593Smuzhiyun #if defined(BCM_WORKQ_DEBUG)
464*4882a593Smuzhiyun #define WORKQ_ASSERT(exp) ASSERT(exp)
465*4882a593Smuzhiyun #else /* ! BCM_WORKQ_DEBUG */
466*4882a593Smuzhiyun #define WORKQ_ASSERT(exp) do {} while (0)
467*4882a593Smuzhiyun #endif /* ! BCM_WORKQ_DEBUG */
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun #define WORKQ_AUDIT(workq) \
470*4882a593Smuzhiyun WORKQ_ASSERT((workq) != BCM_WORKQ_NULL); \
471*4882a593Smuzhiyun WORKQ_ASSERT(WORKQ_PEER(workq) != BCM_WORKQ_NULL); \
472*4882a593Smuzhiyun WORKQ_ASSERT((workq)->buffer == WORKQ_PEER(workq)->buffer); \
473*4882a593Smuzhiyun WORKQ_ASSERT((workq)->ring_size == WORKQ_PEER(workq)->ring_size);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #define BCM_WORKQ_NULL ((bcm_workq_t *)NULL)
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun #define WORKQ_PEER(workq) ((workq)->peer)
478*4882a593Smuzhiyun #define WORKQ_RING(workq) (&((workq)->ring))
479*4882a593Smuzhiyun #define WORKQ_PEER_RING(workq) (&((workq)->peer->ring))
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun #define WORKQ_ELEMENT(__elem_type, __workq, __index) ({ \
482*4882a593Smuzhiyun WORKQ_ASSERT((__workq) != BCM_WORKQ_NULL); \
483*4882a593Smuzhiyun WORKQ_ASSERT((__index) < ((__workq)->ring_size)); \
484*4882a593Smuzhiyun ((__elem_type *)((__workq)->buffer)) + (__index); \
485*4882a593Smuzhiyun })
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun static INLINE void bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
488*4882a593Smuzhiyun void *buffer, int ring_size);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun static INLINE bool bcm_workq_is_empty(const bcm_workq_t *workq_prod);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun static INLINE void bcm_workq_prod_sync(bcm_workq_t *workq_prod);
493*4882a593Smuzhiyun static INLINE void bcm_workq_cons_sync(bcm_workq_t *workq_cons);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun static INLINE void bcm_workq_prod_refresh(bcm_workq_t *workq_prod);
496*4882a593Smuzhiyun static INLINE void bcm_workq_cons_refresh(bcm_workq_t *workq_cons);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /**
499*4882a593Smuzhiyun * bcm_workq_init - initialize a workq
500*4882a593Smuzhiyun * @workq: pointer to a workq context
501*4882a593Smuzhiyun * @buffer: pointer to a pre-allocated circular buffer to serve as a ring
502*4882a593Smuzhiyun * @ring_size: size of the ring in terms of max number of elements.
503*4882a593Smuzhiyun */
504*4882a593Smuzhiyun static INLINE void
bcm_workq_init(bcm_workq_t * workq,bcm_workq_t * workq_peer,void * buffer,int ring_size)505*4882a593Smuzhiyun bcm_workq_init(bcm_workq_t *workq, bcm_workq_t *workq_peer,
506*4882a593Smuzhiyun void *buffer, int ring_size)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun ASSERT(workq != BCM_WORKQ_NULL);
509*4882a593Smuzhiyun ASSERT(workq_peer != BCM_WORKQ_NULL);
510*4882a593Smuzhiyun ASSERT(buffer != NULL);
511*4882a593Smuzhiyun ASSERT(ring_size > 0);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun WORKQ_PEER(workq) = workq_peer;
514*4882a593Smuzhiyun WORKQ_PEER(workq_peer) = workq;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun bcm_ring_init(WORKQ_RING(workq));
517*4882a593Smuzhiyun bcm_ring_init(WORKQ_RING(workq_peer));
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun workq->buffer = workq_peer->buffer = buffer;
520*4882a593Smuzhiyun workq->ring_size = workq_peer->ring_size = ring_size;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /**
524*4882a593Smuzhiyun * bcm_workq_empty - test whether there is work
525*4882a593Smuzhiyun * @workq_prod: producer's workq
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun static INLINE bool
bcm_workq_is_empty(const bcm_workq_t * workq_prod)528*4882a593Smuzhiyun bcm_workq_is_empty(const bcm_workq_t *workq_prod)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun return bcm_ring_is_empty(WORKQ_RING(workq_prod));
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /**
534*4882a593Smuzhiyun * bcm_workq_prod_sync - Commit the producer write index to peer workq's ring
535*4882a593Smuzhiyun * @workq_prod: producer's workq whose write index must be synced to peer
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun static INLINE void
bcm_workq_prod_sync(bcm_workq_t * workq_prod)538*4882a593Smuzhiyun bcm_workq_prod_sync(bcm_workq_t *workq_prod)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun WORKQ_AUDIT(workq_prod);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /* cons::write <--- prod::write */
543*4882a593Smuzhiyun bcm_ring_sync_write(WORKQ_PEER_RING(workq_prod), WORKQ_RING(workq_prod));
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * bcm_workq_cons_sync - Commit the consumer read index to the peer workq's ring
548*4882a593Smuzhiyun * @workq_cons: consumer's workq whose read index must be synced to peer
549*4882a593Smuzhiyun */
550*4882a593Smuzhiyun static INLINE void
bcm_workq_cons_sync(bcm_workq_t * workq_cons)551*4882a593Smuzhiyun bcm_workq_cons_sync(bcm_workq_t *workq_cons)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun WORKQ_AUDIT(workq_cons);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /* prod::read <--- cons::read */
556*4882a593Smuzhiyun bcm_ring_sync_read(WORKQ_PEER_RING(workq_cons), WORKQ_RING(workq_cons));
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /**
560*4882a593Smuzhiyun * bcm_workq_prod_refresh - Fetch the updated consumer's read index
561*4882a593Smuzhiyun * @workq_prod: producer's workq whose read index must be refreshed from peer
562*4882a593Smuzhiyun */
563*4882a593Smuzhiyun static INLINE void
bcm_workq_prod_refresh(bcm_workq_t * workq_prod)564*4882a593Smuzhiyun bcm_workq_prod_refresh(bcm_workq_t *workq_prod)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun WORKQ_AUDIT(workq_prod);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* prod::read <--- cons::read */
569*4882a593Smuzhiyun bcm_ring_sync_read(WORKQ_RING(workq_prod), WORKQ_PEER_RING(workq_prod));
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun * bcm_workq_cons_refresh - Fetch the updated producer's write index
574*4882a593Smuzhiyun * @workq_cons: consumer's workq whose write index must be refreshed from peer
575*4882a593Smuzhiyun */
576*4882a593Smuzhiyun static INLINE void
bcm_workq_cons_refresh(bcm_workq_t * workq_cons)577*4882a593Smuzhiyun bcm_workq_cons_refresh(bcm_workq_t *workq_cons)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun WORKQ_AUDIT(workq_cons);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* cons::write <--- prod::write */
582*4882a593Smuzhiyun bcm_ring_sync_write(WORKQ_RING(workq_cons), WORKQ_PEER_RING(workq_cons));
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun #endif /* ! __bcm_ring_h_included__ */
586