xref: /OK3568_Linux_fs/external/rkwifibt/drivers/infineon/hnd_pktpool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * HND generic packet pool operation primitives
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 1999-2017, Broadcom Corporation
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
9*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
10*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
11*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12*4882a593Smuzhiyun  * following added to such license:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
15*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
16*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
17*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
18*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
19*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
20*4882a593Smuzhiyun  * modifications of the software.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  *      Notwithstanding the above, under no circumstances may you combine this
23*4882a593Smuzhiyun  * software in any way with any other Broadcom software provided under a license
24*4882a593Smuzhiyun  * other than the GPL, without Broadcom's express prior written consent.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <typedefs.h>
33*4882a593Smuzhiyun #include <osl.h>
34*4882a593Smuzhiyun #include <osl_ext.h>
35*4882a593Smuzhiyun #include <bcmutils.h>
36*4882a593Smuzhiyun #include <hnd_pktpool.h>
37*4882a593Smuzhiyun #ifdef BCMRESVFRAGPOOL
38*4882a593Smuzhiyun #include <hnd_resvpool.h>
39*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
40*4882a593Smuzhiyun #ifdef BCMFRWDPOOLREORG
41*4882a593Smuzhiyun #include <hnd_poolreorg.h>
42*4882a593Smuzhiyun #endif /* BCMFRWDPOOLREORG */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* mutex macros for thread safe */
45*4882a593Smuzhiyun #ifdef HND_PKTPOOL_THREAD_SAFE
46*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_CREATE(name, mutex)	osl_ext_mutex_create(name, mutex)
47*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_DELETE(mutex)		osl_ext_mutex_delete(mutex)
48*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)	osl_ext_mutex_acquire(mutex, msec)
49*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_RELEASE(mutex)	osl_ext_mutex_release(mutex)
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_CREATE(name, mutex)	OSL_EXT_SUCCESS
52*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_DELETE(mutex)		OSL_EXT_SUCCESS
53*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)	OSL_EXT_SUCCESS
54*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_RELEASE(mutex)	OSL_EXT_SUCCESS
55*4882a593Smuzhiyun #endif // endif
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /* Registry size is one larger than max pools, as slot #0 is reserved */
58*4882a593Smuzhiyun #define PKTPOOLREG_RSVD_ID				(0U)
59*4882a593Smuzhiyun #define PKTPOOLREG_RSVD_PTR				(POOLPTR(0xdeaddead))
60*4882a593Smuzhiyun #define PKTPOOLREG_FREE_PTR				(POOLPTR(NULL))
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_SET(id, pp)	(pktpool_registry_set((id), (pp)))
63*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_CMP(id, pp)	(pktpool_registry_cmp((id), (pp)))
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* Tag a registry entry as free for use */
66*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_CLR(id)		\
67*4882a593Smuzhiyun 		PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
68*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_ISCLR(id)		\
69*4882a593Smuzhiyun 		(PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* Tag registry entry 0 as reserved */
72*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_RSV()			\
73*4882a593Smuzhiyun 		PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
74*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_ISRSVD()		\
75*4882a593Smuzhiyun 		(PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* Walk all un-reserved entries in registry */
78*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_FOREACH(id)	\
79*4882a593Smuzhiyun 		for ((id) = 1U; (id) <= pktpools_max; (id)++)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun enum pktpool_empty_cb_state {
82*4882a593Smuzhiyun 	EMPTYCB_ENABLED = 0,	/* Enable callback when new packets are added to pool */
83*4882a593Smuzhiyun 	EMPTYCB_DISABLED,	/* Disable callback when new packets are added to pool */
84*4882a593Smuzhiyun 	EMPTYCB_SKIPPED		/* Packet was added to pool when callback was disabled */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
88*4882a593Smuzhiyun pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
91*4882a593Smuzhiyun static int pktpool_register(pktpool_t * poolptr);
92*4882a593Smuzhiyun static int pktpool_deregister(pktpool_t * poolptr);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /** add declaration */
95*4882a593Smuzhiyun static void pktpool_avail_notify(pktpool_t *pktp);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /** accessor functions required when ROMming this file, forced into RAM */
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun pktpool_t *
BCMRAMFN(get_pktpools_registry)100*4882a593Smuzhiyun BCMRAMFN(get_pktpools_registry)(int id)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	return pktpools_registry[id];
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static void
BCMRAMFN(pktpool_registry_set)106*4882a593Smuzhiyun BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	pktpools_registry[id] = pp;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun static bool
BCMRAMFN(pktpool_registry_cmp)112*4882a593Smuzhiyun BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return pktpools_registry[id] == pp;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /** Constructs a pool registry to serve a maximum of total_pools */
118*4882a593Smuzhiyun int
pktpool_attach(osl_t * osh,uint32 total_pools)119*4882a593Smuzhiyun pktpool_attach(osl_t *osh, uint32 total_pools)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	uint32 poolid;
122*4882a593Smuzhiyun 	BCM_REFERENCE(osh);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (pktpools_max != 0U) {
125*4882a593Smuzhiyun 		return BCME_ERROR;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Initialize registry: reserve slot#0 and tag others as free */
131*4882a593Smuzhiyun 	PKTPOOL_REGISTRY_RSV();		/* reserve slot#0 */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* tag all unreserved entries as free */
134*4882a593Smuzhiyun 		PKTPOOL_REGISTRY_CLR(poolid);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	pktpools_max = total_pools;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return (int)pktpools_max;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /** Destructs the pool registry. Ascertain all pools were first de-inited */
143*4882a593Smuzhiyun int
pktpool_dettach(osl_t * osh)144*4882a593Smuzhiyun pktpool_dettach(osl_t *osh)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	uint32 poolid;
147*4882a593Smuzhiyun 	BCM_REFERENCE(osh);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (pktpools_max == 0U) {
150*4882a593Smuzhiyun 		return BCME_OK;
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Ascertain that no pools are still registered */
154*4882a593Smuzhiyun 	ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	PKTPOOL_REGISTRY_FOREACH(poolid) {	/* ascertain all others are free */
157*4882a593Smuzhiyun 		ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	pktpools_max = 0U; /* restore boot state */
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return BCME_OK;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /** Registers a pool in a free slot; returns the registry slot index */
166*4882a593Smuzhiyun static int
pktpool_register(pktpool_t * poolptr)167*4882a593Smuzhiyun pktpool_register(pktpool_t * poolptr)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	uint32 poolid;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (pktpools_max == 0U) {
172*4882a593Smuzhiyun 		return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	ASSERT(pktpools_max != 0U);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* find an empty slot in pktpools_registry */
178*4882a593Smuzhiyun 	PKTPOOL_REGISTRY_FOREACH(poolid) {
179*4882a593Smuzhiyun 		if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
180*4882a593Smuzhiyun 			PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
181*4882a593Smuzhiyun 			return (int)poolid; /* return pool ID */
182*4882a593Smuzhiyun 		}
183*4882a593Smuzhiyun 	} /* FOREACH */
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	return PKTPOOL_INVALID_ID;	/* error: registry is full */
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /** Deregisters a pktpool, given the pool pointer; tag slot as free */
189*4882a593Smuzhiyun static int
pktpool_deregister(pktpool_t * poolptr)190*4882a593Smuzhiyun pktpool_deregister(pktpool_t * poolptr)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	uint32 poolid;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	poolid = POOLID(poolptr);
197*4882a593Smuzhiyun 	ASSERT(poolid <= pktpools_max);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* Asertain that a previously registered poolptr is being de-registered */
200*4882a593Smuzhiyun 	if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
201*4882a593Smuzhiyun 		PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
202*4882a593Smuzhiyun 	} else {
203*4882a593Smuzhiyun 		ASSERT(0);
204*4882a593Smuzhiyun 		return BCME_ERROR; /* mismatch in registry */
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return BCME_OK;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /**
211*4882a593Smuzhiyun  * pktpool_init:
212*4882a593Smuzhiyun  * User provides a pktpool_t structure and specifies the number of packets to
213*4882a593Smuzhiyun  * be pre-filled into the pool (n_pkts).
214*4882a593Smuzhiyun  * pktpool_init first attempts to register the pool and fetch a unique poolid.
215*4882a593Smuzhiyun  * If registration fails, it is considered an BCME_ERR, caused by either the
216*4882a593Smuzhiyun  * registry was not pre-created (pktpool_attach) or the registry is full.
217*4882a593Smuzhiyun  * If registration succeeds, then the requested number of packets will be filled
218*4882a593Smuzhiyun  * into the pool as part of initialization. In the event that there is no
219*4882a593Smuzhiyun  * available memory to service the request, then BCME_NOMEM will be returned
220*4882a593Smuzhiyun  * along with the count of how many packets were successfully allocated.
221*4882a593Smuzhiyun  * In dongle builds, prior to memory reclaimation, one should limit the number
222*4882a593Smuzhiyun  * of packets to be allocated during pktpool_init and fill the pool up after
223*4882a593Smuzhiyun  * reclaim stage.
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * @param n_pkts           Number of packets to be pre-filled into the pool
226*4882a593Smuzhiyun  * @param max_pkt_bytes   The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
227*4882a593Smuzhiyun  * @param type            e.g. 'lbuf_frag'
228*4882a593Smuzhiyun  */
229*4882a593Smuzhiyun int
pktpool_init(osl_t * osh,pktpool_t * pktp,int * n_pkts,int max_pkt_bytes,bool istx,uint8 type)230*4882a593Smuzhiyun pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
231*4882a593Smuzhiyun 	uint8 type)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	int i, err = BCME_OK;
234*4882a593Smuzhiyun 	int pktplen;
235*4882a593Smuzhiyun 	uint8 pktp_id;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	ASSERT(pktp != NULL);
238*4882a593Smuzhiyun 	ASSERT(osh != NULL);
239*4882a593Smuzhiyun 	ASSERT(n_pkts != NULL);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	pktplen = *n_pkts;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	bzero(pktp, sizeof(pktpool_t));
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* assign a unique pktpool id */
246*4882a593Smuzhiyun 	if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
247*4882a593Smuzhiyun 		return BCME_ERROR;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 	POOLSETID(pktp, pktp_id);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	pktp->inited = TRUE;
252*4882a593Smuzhiyun 	pktp->istx = istx ? TRUE : FALSE;
253*4882a593Smuzhiyun 	pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
254*4882a593Smuzhiyun 	pktp->type = type;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
257*4882a593Smuzhiyun 		return BCME_ERROR;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	pktp->maxlen = PKTPOOL_LEN_MAX;
261*4882a593Smuzhiyun 	pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	for (i = 0; i < pktplen; i++) {
264*4882a593Smuzhiyun 		void *p;
265*4882a593Smuzhiyun 		p = PKTGET(osh, max_pkt_bytes, TRUE);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 		if (p == NULL) {
268*4882a593Smuzhiyun 			/* Not able to allocate all requested pkts
269*4882a593Smuzhiyun 			 * so just return what was actually allocated
270*4882a593Smuzhiyun 			 * We can add to the pool later
271*4882a593Smuzhiyun 			 */
272*4882a593Smuzhiyun 			if (pktp->freelist == NULL) /* pktpool free list is empty */
273*4882a593Smuzhiyun 				err = BCME_NOMEM;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 			goto exit;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
281*4882a593Smuzhiyun 		pktp->freelist = p;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		pktp->avail++;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun #ifdef BCMDBG_POOL
286*4882a593Smuzhiyun 		pktp->dbg_q[pktp->dbg_qlen++].p = p;
287*4882a593Smuzhiyun #endif // endif
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun exit:
291*4882a593Smuzhiyun 	pktp->n_pkts = pktp->avail;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	*n_pkts = pktp->n_pkts; /* number of packets managed by pool */
294*4882a593Smuzhiyun 	return err;
295*4882a593Smuzhiyun } /* pktpool_init */
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /**
298*4882a593Smuzhiyun  * pktpool_deinit:
299*4882a593Smuzhiyun  * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
300*4882a593Smuzhiyun  * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
301*4882a593Smuzhiyun  * An assert is in place to ensure that there are no packets still lingering
302*4882a593Smuzhiyun  * around. Packets freed to a pool after the deinit will cause a memory
303*4882a593Smuzhiyun  * corruption as the pktpool_t structure no longer exists.
304*4882a593Smuzhiyun  */
305*4882a593Smuzhiyun int
pktpool_deinit(osl_t * osh,pktpool_t * pktp)306*4882a593Smuzhiyun pktpool_deinit(osl_t *osh, pktpool_t *pktp)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	uint16 freed = 0;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	ASSERT(osh != NULL);
311*4882a593Smuzhiyun 	ASSERT(pktp != NULL);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #ifdef BCMDBG_POOL
314*4882a593Smuzhiyun 	{
315*4882a593Smuzhiyun 		int i;
316*4882a593Smuzhiyun 		for (i = 0; i <= pktp->n_pkts; i++) {
317*4882a593Smuzhiyun 			pktp->dbg_q[i].p = NULL;
318*4882a593Smuzhiyun 		}
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun #endif // endif
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	while (pktp->freelist != NULL) {
323*4882a593Smuzhiyun 		void * p = pktp->freelist;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
326*4882a593Smuzhiyun 		PKTSETFREELIST(p, NULL);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		PKTFREE(osh, p, pktp->istx); /* free the packet */
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		freed++;
333*4882a593Smuzhiyun 		ASSERT(freed <= pktp->n_pkts);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	pktp->avail -= freed;
337*4882a593Smuzhiyun 	ASSERT(pktp->avail == 0);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	pktp->n_pkts -= freed;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	pktpool_deregister(pktp); /* release previously acquired unique pool id */
342*4882a593Smuzhiyun 	POOLSETID(pktp, PKTPOOL_INVALID_ID);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
345*4882a593Smuzhiyun 		return BCME_ERROR;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	pktp->inited = FALSE;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Are there still pending pkts? */
350*4882a593Smuzhiyun 	ASSERT(pktp->n_pkts == 0);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	return 0;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)356*4882a593Smuzhiyun pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	void *p;
359*4882a593Smuzhiyun 	int err = 0;
360*4882a593Smuzhiyun 	int n_pkts, psize, maxlen;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* protect shared resource */
363*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
364*4882a593Smuzhiyun 		return BCME_ERROR;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	ASSERT(pktp->max_pkt_bytes != 0);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	maxlen = pktp->maxlen;
369*4882a593Smuzhiyun 	psize = minimal ? (maxlen >> 2) : maxlen;
370*4882a593Smuzhiyun 	for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		p = PKTGET(osh, pktp->n_pkts, TRUE);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		if (p == NULL) {
375*4882a593Smuzhiyun 			err = BCME_NOMEM;
376*4882a593Smuzhiyun 			break;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		if (pktpool_add(pktp, p) != BCME_OK) {
380*4882a593Smuzhiyun 			PKTFREE(osh, p, FALSE);
381*4882a593Smuzhiyun 			err = BCME_ERROR;
382*4882a593Smuzhiyun 			break;
383*4882a593Smuzhiyun 		}
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* protect shared resource */
387*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
388*4882a593Smuzhiyun 		return BCME_ERROR;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (pktp->cbcnt) {
391*4882a593Smuzhiyun 		if (pktp->empty == FALSE)
392*4882a593Smuzhiyun 			pktpool_avail_notify(pktp);
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return err;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun #ifdef BCMPOOLRECLAIM
399*4882a593Smuzhiyun /* New API to decrease the pkts from pool, but not deinit
400*4882a593Smuzhiyun */
401*4882a593Smuzhiyun uint16
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt)402*4882a593Smuzhiyun pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	uint16 freed = 0;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	pktpool_cb_extn_t cb = NULL;
407*4882a593Smuzhiyun 	void *arg = NULL;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	ASSERT(osh != NULL);
410*4882a593Smuzhiyun 	ASSERT(pktp != NULL);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* protect shared resource */
413*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
414*4882a593Smuzhiyun 		return freed;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (pktp->avail < free_cnt) {
418*4882a593Smuzhiyun 		free_cnt = pktp->avail;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
422*4882a593Smuzhiyun 		/* If pool is shared rx frag pool, use call back fn to reclaim host address
423*4882a593Smuzhiyun 		 * and Rx cpl ID associated with the pkt.
424*4882a593Smuzhiyun 		 */
425*4882a593Smuzhiyun 		ASSERT(pktp->cbext.cb != NULL);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		cb = pktp->cbext.cb;
428*4882a593Smuzhiyun 		arg = pktp->cbext.arg;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	} else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
431*4882a593Smuzhiyun 		/* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
432*4882a593Smuzhiyun 		 * associated with the pkt.
433*4882a593Smuzhiyun 		 */
434*4882a593Smuzhiyun 		cb = pktp->rxcplidfn.cb;
435*4882a593Smuzhiyun 		arg = pktp->rxcplidfn.arg;
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	while ((pktp->freelist != NULL) && (free_cnt)) {
439*4882a593Smuzhiyun 		void * p = pktp->freelist;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
442*4882a593Smuzhiyun 		PKTSETFREELIST(p, NULL);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		if (cb != NULL) {
445*4882a593Smuzhiyun 			if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
446*4882a593Smuzhiyun 				PKTSETFREELIST(p, pktp->freelist);
447*4882a593Smuzhiyun 				pktp->freelist = p;
448*4882a593Smuzhiyun 				break;
449*4882a593Smuzhiyun 			}
450*4882a593Smuzhiyun 		}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		PKTFREE(osh, p, pktp->istx); /* free the packet */
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		freed++;
457*4882a593Smuzhiyun 		free_cnt--;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	pktp->avail -= freed;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	pktp->n_pkts -= freed;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* protect shared resource */
465*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
466*4882a593Smuzhiyun 		return freed;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	return freed;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun #endif /* #ifdef BCMPOOLRECLAIM */
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /* New API to empty the pkts from pool, but not deinit
474*4882a593Smuzhiyun * NOTE: caller is responsible to ensure,
475*4882a593Smuzhiyun *	all pkts are available in pool for free; else LEAK !
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun int
pktpool_empty(osl_t * osh,pktpool_t * pktp)478*4882a593Smuzhiyun pktpool_empty(osl_t *osh, pktpool_t *pktp)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	uint16 freed = 0;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	ASSERT(osh != NULL);
483*4882a593Smuzhiyun 	ASSERT(pktp != NULL);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	/* protect shared resource */
486*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
487*4882a593Smuzhiyun 		return BCME_ERROR;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun #ifdef BCMDBG_POOL
490*4882a593Smuzhiyun 	{
491*4882a593Smuzhiyun 		int i;
492*4882a593Smuzhiyun 		for (i = 0; i <= pktp->n_pkts; i++) {
493*4882a593Smuzhiyun 			pktp->dbg_q[i].p = NULL;
494*4882a593Smuzhiyun 		}
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun #endif // endif
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	while (pktp->freelist != NULL) {
499*4882a593Smuzhiyun 		void * p = pktp->freelist;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
502*4882a593Smuzhiyun 		PKTSETFREELIST(p, NULL);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 		PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		PKTFREE(osh, p, pktp->istx); /* free the packet */
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		freed++;
509*4882a593Smuzhiyun 		ASSERT(freed <= pktp->n_pkts);
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	pktp->avail -= freed;
513*4882a593Smuzhiyun 	ASSERT(pktp->avail == 0);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	pktp->n_pkts -= freed;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	ASSERT(pktp->n_pkts == 0);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	/* protect shared resource */
520*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
521*4882a593Smuzhiyun 		return BCME_ERROR;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	return 0;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun static void *
pktpool_deq(pktpool_t * pktp)527*4882a593Smuzhiyun pktpool_deq(pktpool_t *pktp)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	void *p = NULL;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (pktp->avail == 0)
532*4882a593Smuzhiyun 		return NULL;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	ASSERT(pktp->freelist != NULL);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	p = pktp->freelist;  /* dequeue packet from head of pktpool free list */
537*4882a593Smuzhiyun 	pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	PKTSETFREELIST(p, NULL);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	pktp->avail--;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	return p;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun static void
pktpool_enq(pktpool_t * pktp,void * p)547*4882a593Smuzhiyun pktpool_enq(pktpool_t *pktp, void *p)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	ASSERT(p != NULL);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
552*4882a593Smuzhiyun 	pktp->freelist = p; /* free list points to newly inserted packet */
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	pktp->avail++;
555*4882a593Smuzhiyun 	ASSERT(pktp->avail <= pktp->n_pkts);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun /** utility for registering host addr fill function called from pciedev */
559*4882a593Smuzhiyun int
560*4882a593Smuzhiyun /* BCMATTACHFN */
561*4882a593Smuzhiyun (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	ASSERT(cb != NULL);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	ASSERT(pktp->cbext.cb == NULL);
567*4882a593Smuzhiyun 	pktp->cbext.cb = cb;
568*4882a593Smuzhiyun 	pktp->cbext.arg = arg;
569*4882a593Smuzhiyun 	return 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun int
pktpool_rxcplid_fill_register(pktpool_t * pktp,pktpool_cb_extn_t cb,void * arg)573*4882a593Smuzhiyun pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	ASSERT(cb != NULL);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (pktp == NULL)
579*4882a593Smuzhiyun 		return BCME_ERROR;
580*4882a593Smuzhiyun 	ASSERT(pktp->rxcplidfn.cb == NULL);
581*4882a593Smuzhiyun 	pktp->rxcplidfn.cb = cb;
582*4882a593Smuzhiyun 	pktp->rxcplidfn.arg = arg;
583*4882a593Smuzhiyun 	return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
587*4882a593Smuzhiyun void
pktpool_invoke_dmarxfill(pktpool_t * pktp)588*4882a593Smuzhiyun pktpool_invoke_dmarxfill(pktpool_t *pktp)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	ASSERT(pktp->dmarxfill.cb);
591*4882a593Smuzhiyun 	ASSERT(pktp->dmarxfill.arg);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (pktp->dmarxfill.cb)
594*4882a593Smuzhiyun 		pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /** Registers callback functions for split rx mode */
598*4882a593Smuzhiyun int
pkpool_haddr_avail_register_cb(pktpool_t * pktp,pktpool_cb_t cb,void * arg)599*4882a593Smuzhiyun pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	ASSERT(cb != NULL);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	pktp->dmarxfill.cb = cb;
605*4882a593Smuzhiyun 	pktp->dmarxfill.arg = arg;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return 0;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun /**
611*4882a593Smuzhiyun  * Registers callback functions.
612*4882a593Smuzhiyun  * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
613*4882a593Smuzhiyun  */
614*4882a593Smuzhiyun int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)615*4882a593Smuzhiyun pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	int err = 0;
618*4882a593Smuzhiyun 	int i;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* protect shared resource */
621*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
622*4882a593Smuzhiyun 		return BCME_ERROR;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	ASSERT(cb != NULL);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	for (i = 0; i < pktp->cbcnt; i++) {
627*4882a593Smuzhiyun 		ASSERT(pktp->cbs[i].cb != NULL);
628*4882a593Smuzhiyun 		if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
629*4882a593Smuzhiyun 			pktp->cbs[i].refcnt++;
630*4882a593Smuzhiyun 			goto done;
631*4882a593Smuzhiyun 		}
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	i = pktp->cbcnt;
635*4882a593Smuzhiyun 	if (i == PKTPOOL_CB_MAX_AVL) {
636*4882a593Smuzhiyun 		err = BCME_ERROR;
637*4882a593Smuzhiyun 		goto done;
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	ASSERT(pktp->cbs[i].cb == NULL);
641*4882a593Smuzhiyun 	pktp->cbs[i].cb = cb;
642*4882a593Smuzhiyun 	pktp->cbs[i].arg = arg;
643*4882a593Smuzhiyun 	pktp->cbs[i].refcnt++;
644*4882a593Smuzhiyun 	pktp->cbcnt++;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun done:
647*4882a593Smuzhiyun 	/* protect shared resource */
648*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
649*4882a593Smuzhiyun 		return BCME_ERROR;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	return err;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun /* No BCMATTACHFN as it is used in a non-attach function */
655*4882a593Smuzhiyun int
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)656*4882a593Smuzhiyun pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	int err = 0;
659*4882a593Smuzhiyun 	int i, k;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/* protect shared resource */
662*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
663*4882a593Smuzhiyun 		return BCME_ERROR;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	ASSERT(cb != NULL);
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	for (i = 0; i < pktp->cbcnt; i++) {
669*4882a593Smuzhiyun 		ASSERT(pktp->cbs[i].cb != NULL);
670*4882a593Smuzhiyun 		if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
671*4882a593Smuzhiyun 			pktp->cbs[i].refcnt--;
672*4882a593Smuzhiyun 			if (pktp->cbs[i].refcnt) {
673*4882a593Smuzhiyun 				/* Still there are references to this callback */
674*4882a593Smuzhiyun 				goto done;
675*4882a593Smuzhiyun 			}
676*4882a593Smuzhiyun 			/* Moving any more callbacks to fill the hole */
677*4882a593Smuzhiyun 			for (k = i+1; k < pktp->cbcnt; i++, k++) {
678*4882a593Smuzhiyun 				pktp->cbs[i].cb = pktp->cbs[k].cb;
679*4882a593Smuzhiyun 				pktp->cbs[i].arg = pktp->cbs[k].arg;
680*4882a593Smuzhiyun 				pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
681*4882a593Smuzhiyun 			}
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 			/* reset the last callback */
684*4882a593Smuzhiyun 			pktp->cbs[i].cb = NULL;
685*4882a593Smuzhiyun 			pktp->cbs[i].arg = NULL;
686*4882a593Smuzhiyun 			pktp->cbs[i].refcnt = 0;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 			pktp->cbcnt--;
689*4882a593Smuzhiyun 			goto done;
690*4882a593Smuzhiyun 		}
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun done:
694*4882a593Smuzhiyun 	/* protect shared resource */
695*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
696*4882a593Smuzhiyun 		return BCME_ERROR;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return err;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun /** Registers callback functions */
703*4882a593Smuzhiyun int
pktpool_empty_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)704*4882a593Smuzhiyun pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	int err = 0;
707*4882a593Smuzhiyun 	int i;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	/* protect shared resource */
710*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
711*4882a593Smuzhiyun 		return BCME_ERROR;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	ASSERT(cb != NULL);
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	i = pktp->ecbcnt;
716*4882a593Smuzhiyun 	if (i == PKTPOOL_CB_MAX) {
717*4882a593Smuzhiyun 		err = BCME_ERROR;
718*4882a593Smuzhiyun 		goto done;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	ASSERT(pktp->ecbs[i].cb == NULL);
722*4882a593Smuzhiyun 	pktp->ecbs[i].cb = cb;
723*4882a593Smuzhiyun 	pktp->ecbs[i].arg = arg;
724*4882a593Smuzhiyun 	pktp->ecbcnt++;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun done:
727*4882a593Smuzhiyun 	/* protect shared resource */
728*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
729*4882a593Smuzhiyun 		return BCME_ERROR;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	return err;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun /** Calls registered callback functions */
735*4882a593Smuzhiyun static int
pktpool_empty_notify(pktpool_t * pktp)736*4882a593Smuzhiyun pktpool_empty_notify(pktpool_t *pktp)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun 	int i;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	pktp->empty = TRUE;
741*4882a593Smuzhiyun 	for (i = 0; i < pktp->ecbcnt; i++) {
742*4882a593Smuzhiyun 		ASSERT(pktp->ecbs[i].cb != NULL);
743*4882a593Smuzhiyun 		pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 	pktp->empty = FALSE;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	return 0;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun #ifdef BCMDBG_POOL
751*4882a593Smuzhiyun int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)752*4882a593Smuzhiyun pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	int err = 0;
755*4882a593Smuzhiyun 	int i;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/* protect shared resource */
758*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
759*4882a593Smuzhiyun 		return BCME_ERROR;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	ASSERT(cb);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	i = pktp->dbg_cbcnt;
764*4882a593Smuzhiyun 	if (i == PKTPOOL_CB_MAX) {
765*4882a593Smuzhiyun 		err = BCME_ERROR;
766*4882a593Smuzhiyun 		goto done;
767*4882a593Smuzhiyun 	}
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	ASSERT(pktp->dbg_cbs[i].cb == NULL);
770*4882a593Smuzhiyun 	pktp->dbg_cbs[i].cb = cb;
771*4882a593Smuzhiyun 	pktp->dbg_cbs[i].arg = arg;
772*4882a593Smuzhiyun 	pktp->dbg_cbcnt++;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun done:
775*4882a593Smuzhiyun 	/* protect shared resource */
776*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
777*4882a593Smuzhiyun 		return BCME_ERROR;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	return err;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun int pktpool_dbg_notify(pktpool_t *pktp);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun int
pktpool_dbg_notify(pktpool_t * pktp)785*4882a593Smuzhiyun pktpool_dbg_notify(pktpool_t *pktp)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	int i;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* protect shared resource */
790*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
791*4882a593Smuzhiyun 		return BCME_ERROR;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	for (i = 0; i < pktp->dbg_cbcnt; i++) {
794*4882a593Smuzhiyun 		ASSERT(pktp->dbg_cbs[i].cb);
795*4882a593Smuzhiyun 		pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* protect shared resource */
799*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
800*4882a593Smuzhiyun 		return BCME_ERROR;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	return 0;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun int
pktpool_dbg_dump(pktpool_t * pktp)806*4882a593Smuzhiyun pktpool_dbg_dump(pktpool_t *pktp)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	int i;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/* protect shared resource */
811*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
812*4882a593Smuzhiyun 		return BCME_ERROR;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	printf("pool len=%d maxlen=%d\n",  pktp->dbg_qlen, pktp->maxlen);
815*4882a593Smuzhiyun 	for (i = 0; i < pktp->dbg_qlen; i++) {
816*4882a593Smuzhiyun 		ASSERT(pktp->dbg_q[i].p);
817*4882a593Smuzhiyun 		printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
818*4882a593Smuzhiyun 			pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	/* protect shared resource */
822*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
823*4882a593Smuzhiyun 		return BCME_ERROR;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return 0;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)829*4882a593Smuzhiyun pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	int i;
832*4882a593Smuzhiyun 	int state;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* protect shared resource */
835*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
836*4882a593Smuzhiyun 		return BCME_ERROR;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	bzero(stats, sizeof(pktpool_stats_t));
839*4882a593Smuzhiyun 	for (i = 0; i < pktp->dbg_qlen; i++) {
840*4882a593Smuzhiyun 		ASSERT(pktp->dbg_q[i].p != NULL);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		state = PKTPOOLSTATE(pktp->dbg_q[i].p);
843*4882a593Smuzhiyun 		switch (state) {
844*4882a593Smuzhiyun 			case POOL_TXENQ:
845*4882a593Smuzhiyun 				stats->enq++; break;
846*4882a593Smuzhiyun 			case POOL_TXDH:
847*4882a593Smuzhiyun 				stats->txdh++; break;
848*4882a593Smuzhiyun 			case POOL_TXD11:
849*4882a593Smuzhiyun 				stats->txd11++; break;
850*4882a593Smuzhiyun 			case POOL_RXDH:
851*4882a593Smuzhiyun 				stats->rxdh++; break;
852*4882a593Smuzhiyun 			case POOL_RXD11:
853*4882a593Smuzhiyun 				stats->rxd11++; break;
854*4882a593Smuzhiyun 			case POOL_RXFILL:
855*4882a593Smuzhiyun 				stats->rxfill++; break;
856*4882a593Smuzhiyun 			case POOL_IDLE:
857*4882a593Smuzhiyun 				stats->idle++; break;
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/* protect shared resource */
862*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
863*4882a593Smuzhiyun 		return BCME_ERROR;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	return 0;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun int
pktpool_start_trigger(pktpool_t * pktp,void * p)869*4882a593Smuzhiyun pktpool_start_trigger(pktpool_t *pktp, void *p)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	uint32 cycles, i;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/* protect shared resource */
874*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
875*4882a593Smuzhiyun 		return BCME_ERROR;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (!PKTPOOL(OSH_NULL, p))
878*4882a593Smuzhiyun 		goto done;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	OSL_GETCYCLES(cycles);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	for (i = 0; i < pktp->dbg_qlen; i++) {
883*4882a593Smuzhiyun 		ASSERT(pktp->dbg_q[i].p != NULL);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 		if (pktp->dbg_q[i].p == p) {
886*4882a593Smuzhiyun 			pktp->dbg_q[i].cycles = cycles;
887*4882a593Smuzhiyun 			break;
888*4882a593Smuzhiyun 		}
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun done:
892*4882a593Smuzhiyun 	/* protect shared resource */
893*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
894*4882a593Smuzhiyun 		return BCME_ERROR;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	return 0;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun int pktpool_stop_trigger(pktpool_t *pktp, void *p);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun int
pktpool_stop_trigger(pktpool_t * pktp,void * p)902*4882a593Smuzhiyun pktpool_stop_trigger(pktpool_t *pktp, void *p)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	uint32 cycles, i;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	/* protect shared resource */
907*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
908*4882a593Smuzhiyun 		return BCME_ERROR;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	if (!PKTPOOL(OSH_NULL, p))
911*4882a593Smuzhiyun 		goto done;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	OSL_GETCYCLES(cycles);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	for (i = 0; i < pktp->dbg_qlen; i++) {
916*4882a593Smuzhiyun 		ASSERT(pktp->dbg_q[i].p != NULL);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 		if (pktp->dbg_q[i].p == p) {
919*4882a593Smuzhiyun 			if (pktp->dbg_q[i].cycles == 0)
920*4882a593Smuzhiyun 				break;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 			if (cycles >= pktp->dbg_q[i].cycles)
923*4882a593Smuzhiyun 				pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
924*4882a593Smuzhiyun 			else
925*4882a593Smuzhiyun 				pktp->dbg_q[i].dur =
926*4882a593Smuzhiyun 					(((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 			pktp->dbg_q[i].cycles = 0;
929*4882a593Smuzhiyun 			break;
930*4882a593Smuzhiyun 		}
931*4882a593Smuzhiyun 	}
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun done:
934*4882a593Smuzhiyun 	/* protect shared resource */
935*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
936*4882a593Smuzhiyun 		return BCME_ERROR;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	return 0;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun #endif /* BCMDBG_POOL */
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)943*4882a593Smuzhiyun pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun 	BCM_REFERENCE(osh);
946*4882a593Smuzhiyun 	ASSERT(pktp);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	/* protect shared resource */
949*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
950*4882a593Smuzhiyun 		return BCME_ERROR;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	pktp->availcb_excl = NULL;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	/* protect shared resource */
955*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
956*4882a593Smuzhiyun 		return BCME_ERROR;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	return 0;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)962*4882a593Smuzhiyun pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	int i;
965*4882a593Smuzhiyun 	int err;
966*4882a593Smuzhiyun 	BCM_REFERENCE(osh);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	ASSERT(pktp);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* protect shared resource */
971*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
972*4882a593Smuzhiyun 		return BCME_ERROR;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	ASSERT(pktp->availcb_excl == NULL);
975*4882a593Smuzhiyun 	for (i = 0; i < pktp->cbcnt; i++) {
976*4882a593Smuzhiyun 		if (cb == pktp->cbs[i].cb) {
977*4882a593Smuzhiyun 			pktp->availcb_excl = &pktp->cbs[i];
978*4882a593Smuzhiyun 			break;
979*4882a593Smuzhiyun 		}
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	if (pktp->availcb_excl == NULL)
983*4882a593Smuzhiyun 		err = BCME_ERROR;
984*4882a593Smuzhiyun 	else
985*4882a593Smuzhiyun 		err = 0;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	/* protect shared resource */
988*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
989*4882a593Smuzhiyun 		return BCME_ERROR;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	return err;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun static void
pktpool_avail_notify(pktpool_t * pktp)995*4882a593Smuzhiyun pktpool_avail_notify(pktpool_t *pktp)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	int i, k, idx;
998*4882a593Smuzhiyun 	int avail;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	ASSERT(pktp);
1001*4882a593Smuzhiyun 	if (pktp->availcb_excl != NULL) {
1002*4882a593Smuzhiyun 		pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1003*4882a593Smuzhiyun 		return;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	k = pktp->cbcnt - 1;
1007*4882a593Smuzhiyun 	for (i = 0; i < pktp->cbcnt; i++) {
1008*4882a593Smuzhiyun 		avail = pktp->avail;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 		if (avail) {
1011*4882a593Smuzhiyun 			if (pktp->cbtoggle)
1012*4882a593Smuzhiyun 				idx = i;
1013*4882a593Smuzhiyun 			else
1014*4882a593Smuzhiyun 				idx = k--;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 			ASSERT(pktp->cbs[idx].cb != NULL);
1017*4882a593Smuzhiyun 			pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1018*4882a593Smuzhiyun 		}
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/* Alternate between filling from head or tail
1022*4882a593Smuzhiyun 	 */
1023*4882a593Smuzhiyun 	pktp->cbtoggle ^= 1;
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	return;
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun /** Gets an empty packet from the caller provided pool */
1029*4882a593Smuzhiyun void *
pktpool_get(pktpool_t * pktp)1030*4882a593Smuzhiyun pktpool_get(pktpool_t *pktp)
1031*4882a593Smuzhiyun {
1032*4882a593Smuzhiyun 	void *p;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* protect shared resource */
1035*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1036*4882a593Smuzhiyun 		return NULL;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	p = pktpool_deq(pktp);
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (p == NULL) {
1041*4882a593Smuzhiyun 		/* Notify and try to reclaim tx pkts */
1042*4882a593Smuzhiyun 		if (pktp->ecbcnt)
1043*4882a593Smuzhiyun 			pktpool_empty_notify(pktp);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		p = pktpool_deq(pktp);
1046*4882a593Smuzhiyun 		if (p == NULL)
1047*4882a593Smuzhiyun 			goto done;
1048*4882a593Smuzhiyun 	}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun done:
1051*4882a593Smuzhiyun 	/* protect shared resource */
1052*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1053*4882a593Smuzhiyun 		return NULL;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	return p;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun void
pktpool_free(pktpool_t * pktp,void * p)1059*4882a593Smuzhiyun pktpool_free(pktpool_t *pktp, void *p)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	/* protect shared resource */
1062*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1063*4882a593Smuzhiyun 		return;
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	ASSERT(p != NULL);
1066*4882a593Smuzhiyun #ifdef BCMDBG_POOL
1067*4882a593Smuzhiyun 	/* pktpool_stop_trigger(pktp, p); */
1068*4882a593Smuzhiyun #endif // endif
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	pktpool_enq(pktp, p);
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	/**
1073*4882a593Smuzhiyun 	 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1074*4882a593Smuzhiyun 	 * If any avail callback functions are registered, send a notification
1075*4882a593Smuzhiyun 	 * that a new packet is available in the pool.
1076*4882a593Smuzhiyun 	 */
1077*4882a593Smuzhiyun 	if (pktp->cbcnt) {
1078*4882a593Smuzhiyun 		/* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1079*4882a593Smuzhiyun 		 * This allows to feed on burst basis as opposed to inefficient per-packet basis.
1080*4882a593Smuzhiyun 		 */
1081*4882a593Smuzhiyun 		if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1082*4882a593Smuzhiyun 			/**
1083*4882a593Smuzhiyun 			 * If the call originated from pktpool_empty_notify, the just freed packet
1084*4882a593Smuzhiyun 			 * is needed in pktpool_get.
1085*4882a593Smuzhiyun 			 * Therefore don't call pktpool_avail_notify.
1086*4882a593Smuzhiyun 			 */
1087*4882a593Smuzhiyun 			if (pktp->empty == FALSE)
1088*4882a593Smuzhiyun 				pktpool_avail_notify(pktp);
1089*4882a593Smuzhiyun 		} else {
1090*4882a593Smuzhiyun 			/**
1091*4882a593Smuzhiyun 			 * The callback is temporarily disabled, log that a packet has been freed.
1092*4882a593Smuzhiyun 			 */
1093*4882a593Smuzhiyun 			pktp->emptycb_disable = EMPTYCB_SKIPPED;
1094*4882a593Smuzhiyun 		}
1095*4882a593Smuzhiyun 	}
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	/* protect shared resource */
1098*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1099*4882a593Smuzhiyun 		return;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun /** Adds a caller provided (empty) packet to the caller provided pool */
1103*4882a593Smuzhiyun int
pktpool_add(pktpool_t * pktp,void * p)1104*4882a593Smuzhiyun pktpool_add(pktpool_t *pktp, void *p)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	int err = 0;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	/* protect shared resource */
1109*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1110*4882a593Smuzhiyun 		return BCME_ERROR;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	ASSERT(p != NULL);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (pktp->n_pkts == pktp->maxlen) {
1115*4882a593Smuzhiyun 		err = BCME_RANGE;
1116*4882a593Smuzhiyun 		goto done;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	/* pkts in pool have same length */
1120*4882a593Smuzhiyun 	ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1121*4882a593Smuzhiyun 	PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	pktp->n_pkts++;
1124*4882a593Smuzhiyun 	pktpool_enq(pktp, p);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun #ifdef BCMDBG_POOL
1127*4882a593Smuzhiyun 	pktp->dbg_q[pktp->dbg_qlen++].p = p;
1128*4882a593Smuzhiyun #endif // endif
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun done:
1131*4882a593Smuzhiyun 	/* protect shared resource */
1132*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1133*4882a593Smuzhiyun 		return BCME_ERROR;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	return err;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /**
1139*4882a593Smuzhiyun  * Force pktpool_setmaxlen () into RAM as it uses a constant
1140*4882a593Smuzhiyun  * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1141*4882a593Smuzhiyun  */
1142*4882a593Smuzhiyun int
BCMRAMFN(pktpool_setmaxlen)1143*4882a593Smuzhiyun BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	/* protect shared resource */
1146*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1147*4882a593Smuzhiyun 		return BCME_ERROR;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (maxlen > PKTPOOL_LEN_MAX)
1150*4882a593Smuzhiyun 		maxlen = PKTPOOL_LEN_MAX;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	/* if pool is already beyond maxlen, then just cap it
1153*4882a593Smuzhiyun 	 * since we currently do not reduce the pool len
1154*4882a593Smuzhiyun 	 * already allocated
1155*4882a593Smuzhiyun 	 */
1156*4882a593Smuzhiyun 	pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	/* protect shared resource */
1159*4882a593Smuzhiyun 	if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1160*4882a593Smuzhiyun 		return BCME_ERROR;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	return pktp->maxlen;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun void
pktpool_emptycb_disable(pktpool_t * pktp,bool disable)1166*4882a593Smuzhiyun pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	ASSERT(pktp);
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/**
1171*4882a593Smuzhiyun 	 * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1172*4882a593Smuzhiyun 	 * If callback is going to be re-enabled, check if any packet got
1173*4882a593Smuzhiyun 	 * freed and added back to the pool while callback was disabled.
1174*4882a593Smuzhiyun 	 * When this is the case do the callback now, provided that callback functions
1175*4882a593Smuzhiyun 	 * are registered and this call did not originate from pktpool_empty_notify.
1176*4882a593Smuzhiyun 	 */
1177*4882a593Smuzhiyun 	if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1178*4882a593Smuzhiyun 		(pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1179*4882a593Smuzhiyun 			pktpool_avail_notify(pktp);
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	/* Enable or temporarily disable callback when packet becomes available. */
1183*4882a593Smuzhiyun 	pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun bool
pktpool_emptycb_disabled(pktpool_t * pktp)1187*4882a593Smuzhiyun pktpool_emptycb_disabled(pktpool_t *pktp)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun 	ASSERT(pktp);
1190*4882a593Smuzhiyun 	return pktp->emptycb_disable != EMPTYCB_ENABLED;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun #ifdef BCMPKTPOOL
1194*4882a593Smuzhiyun #include <hnd_lbuf.h>
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun pktpool_t *pktpool_shared = NULL;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun #ifdef BCMFRAGPOOL
1199*4882a593Smuzhiyun pktpool_t *pktpool_shared_lfrag = NULL;
1200*4882a593Smuzhiyun #ifdef BCMRESVFRAGPOOL
1201*4882a593Smuzhiyun pktpool_t *pktpool_resv_lfrag = NULL;
1202*4882a593Smuzhiyun struct resv_info *resv_pool_info = NULL;
1203*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
1204*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun pktpool_t *pktpool_shared_rxlfrag = NULL;
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun static osl_t *pktpool_osh = NULL;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun /**
1211*4882a593Smuzhiyun  * Initializes several packet pools and allocates packets within those pools.
1212*4882a593Smuzhiyun  */
1213*4882a593Smuzhiyun int
hnd_pktpool_init(osl_t * osh)1214*4882a593Smuzhiyun hnd_pktpool_init(osl_t *osh)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun 	int err = BCME_OK;
1217*4882a593Smuzhiyun 	int n;
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* Construct a packet pool registry before initializing packet pools */
1220*4882a593Smuzhiyun 	n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1221*4882a593Smuzhiyun 	if (n != PKTPOOL_MAXIMUM_ID) {
1222*4882a593Smuzhiyun 		ASSERT(0);
1223*4882a593Smuzhiyun 		err = BCME_ERROR;
1224*4882a593Smuzhiyun 		goto error0;
1225*4882a593Smuzhiyun 	}
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1228*4882a593Smuzhiyun 	if (pktpool_shared == NULL) {
1229*4882a593Smuzhiyun 		ASSERT(0);
1230*4882a593Smuzhiyun 		err = BCME_NOMEM;
1231*4882a593Smuzhiyun 		goto error1;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1235*4882a593Smuzhiyun 	pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1236*4882a593Smuzhiyun 	if (pktpool_shared_lfrag == NULL) {
1237*4882a593Smuzhiyun 		ASSERT(0);
1238*4882a593Smuzhiyun 		err = BCME_NOMEM;
1239*4882a593Smuzhiyun 		goto error2;
1240*4882a593Smuzhiyun 	}
1241*4882a593Smuzhiyun #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1242*4882a593Smuzhiyun 	resv_pool_info = hnd_resv_pool_alloc(osh);
1243*4882a593Smuzhiyun 	if (resv_pool_info == NULL) {
1244*4882a593Smuzhiyun 		ASSERT(0);
1245*4882a593Smuzhiyun 		goto error2;
1246*4882a593Smuzhiyun 	}
1247*4882a593Smuzhiyun 	pktpool_resv_lfrag = resv_pool_info->pktp;
1248*4882a593Smuzhiyun 	if (pktpool_resv_lfrag == NULL) {
1249*4882a593Smuzhiyun 		ASSERT(0);
1250*4882a593Smuzhiyun 		goto error2;
1251*4882a593Smuzhiyun 	}
1252*4882a593Smuzhiyun #endif	/* RESVFRAGPOOL */
1253*4882a593Smuzhiyun #endif /* FRAGPOOL */
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1256*4882a593Smuzhiyun 	pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1257*4882a593Smuzhiyun 	if (pktpool_shared_rxlfrag == NULL) {
1258*4882a593Smuzhiyun 		ASSERT(0);
1259*4882a593Smuzhiyun 		err = BCME_NOMEM;
1260*4882a593Smuzhiyun 		goto error3;
1261*4882a593Smuzhiyun 	}
1262*4882a593Smuzhiyun #endif // endif
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	/*
1265*4882a593Smuzhiyun 	 * At this early stage, there's not enough memory to allocate all
1266*4882a593Smuzhiyun 	 * requested pkts in the shared pool.  Need to add to the pool
1267*4882a593Smuzhiyun 	 * after reclaim
1268*4882a593Smuzhiyun 	 *
1269*4882a593Smuzhiyun 	 * n = NRXBUFPOST + SDPCMD_RXBUFS;
1270*4882a593Smuzhiyun 	 *
1271*4882a593Smuzhiyun 	 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1272*4882a593Smuzhiyun 	 * registry is not initialized or the registry is depleted.
1273*4882a593Smuzhiyun 	 *
1274*4882a593Smuzhiyun 	 * A BCME_NOMEM error only indicates that the requested number of packets
1275*4882a593Smuzhiyun 	 * were not filled into the pool.
1276*4882a593Smuzhiyun 	 */
1277*4882a593Smuzhiyun 	n = 1;
1278*4882a593Smuzhiyun 	MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
1279*4882a593Smuzhiyun 	if ((err = pktpool_init(osh, pktpool_shared,
1280*4882a593Smuzhiyun 	                        &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
1281*4882a593Smuzhiyun 		ASSERT(0);
1282*4882a593Smuzhiyun 		goto error4;
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 	pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1287*4882a593Smuzhiyun 	n = 1;
1288*4882a593Smuzhiyun 	if ((err = pktpool_init(osh, pktpool_shared_lfrag,
1289*4882a593Smuzhiyun 	                        &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
1290*4882a593Smuzhiyun 		ASSERT(0);
1291*4882a593Smuzhiyun 		goto error5;
1292*4882a593Smuzhiyun 	}
1293*4882a593Smuzhiyun 	pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1294*4882a593Smuzhiyun #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1295*4882a593Smuzhiyun 	n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1296*4882a593Smuzhiyun 	if (pktpool_init(osh, pktpool_resv_lfrag,
1297*4882a593Smuzhiyun 	                 &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
1298*4882a593Smuzhiyun 		ASSERT(0);
1299*4882a593Smuzhiyun 		goto error5;
1300*4882a593Smuzhiyun 	}
1301*4882a593Smuzhiyun 	pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1302*4882a593Smuzhiyun #endif /* RESVFRAGPOOL */
1303*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1304*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1305*4882a593Smuzhiyun 	n = 1;
1306*4882a593Smuzhiyun 	if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
1307*4882a593Smuzhiyun 	                        &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
1308*4882a593Smuzhiyun 		ASSERT(0);
1309*4882a593Smuzhiyun 		goto error6;
1310*4882a593Smuzhiyun 	}
1311*4882a593Smuzhiyun 	pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1312*4882a593Smuzhiyun #endif // endif
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1315*4882a593Smuzhiyun 	/* Attach poolreorg module */
1316*4882a593Smuzhiyun 	if ((frwd_poolreorg_info = poolreorg_attach(osh,
1317*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1318*4882a593Smuzhiyun 			pktpool_shared_lfrag,
1319*4882a593Smuzhiyun #else
1320*4882a593Smuzhiyun 			NULL,
1321*4882a593Smuzhiyun #endif // endif
1322*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1323*4882a593Smuzhiyun 			pktpool_shared_rxlfrag,
1324*4882a593Smuzhiyun #else
1325*4882a593Smuzhiyun 			NULL,
1326*4882a593Smuzhiyun #endif // endif
1327*4882a593Smuzhiyun 			pktpool_shared)) == NULL) {
1328*4882a593Smuzhiyun 		ASSERT(0);
1329*4882a593Smuzhiyun 		goto error7;
1330*4882a593Smuzhiyun 	}
1331*4882a593Smuzhiyun #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	pktpool_osh = osh;
1334*4882a593Smuzhiyun 	MALLOC_CLEAR_NOPERSIST(osh);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	return BCME_OK;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1339*4882a593Smuzhiyun 	/* detach poolreorg module */
1340*4882a593Smuzhiyun 	poolreorg_detach(frwd_poolreorg_info);
1341*4882a593Smuzhiyun error7:
1342*4882a593Smuzhiyun #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1345*4882a593Smuzhiyun 	pktpool_deinit(osh, pktpool_shared_rxlfrag);
1346*4882a593Smuzhiyun error6:
1347*4882a593Smuzhiyun #endif // endif
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1350*4882a593Smuzhiyun 	pktpool_deinit(osh, pktpool_shared_lfrag);
1351*4882a593Smuzhiyun error5:
1352*4882a593Smuzhiyun #endif // endif
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
1355*4882a593Smuzhiyun 	(defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
1356*4882a593Smuzhiyun 	pktpool_deinit(osh, pktpool_shared);
1357*4882a593Smuzhiyun #endif // endif
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun error4:
1360*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1361*4882a593Smuzhiyun 	hnd_free(pktpool_shared_rxlfrag);
1362*4882a593Smuzhiyun 	pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1363*4882a593Smuzhiyun error3:
1364*4882a593Smuzhiyun #endif /* BCMRXFRAGPOOL */
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1367*4882a593Smuzhiyun 	hnd_free(pktpool_shared_lfrag);
1368*4882a593Smuzhiyun 	pktpool_shared_lfrag = (pktpool_t *)NULL;
1369*4882a593Smuzhiyun error2:
1370*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	hnd_free(pktpool_shared);
1373*4882a593Smuzhiyun 	pktpool_shared = (pktpool_t *)NULL;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun error1:
1376*4882a593Smuzhiyun 	pktpool_dettach(osh);
1377*4882a593Smuzhiyun error0:
1378*4882a593Smuzhiyun 	MALLOC_CLEAR_NOPERSIST(osh);
1379*4882a593Smuzhiyun 	return err;
1380*4882a593Smuzhiyun } /* hnd_pktpool_init */
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun /** is called at each 'wl up' */
1383*4882a593Smuzhiyun int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1384*4882a593Smuzhiyun hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun 	return (pktpool_fill(pktpool_osh, pktpool, minimal));
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun /** refills pktpools after reclaim, is called once */
1390*4882a593Smuzhiyun void
hnd_pktpool_refill(bool minimal)1391*4882a593Smuzhiyun hnd_pktpool_refill(bool minimal)
1392*4882a593Smuzhiyun {
1393*4882a593Smuzhiyun 	if (POOL_ENAB(pktpool_shared)) {
1394*4882a593Smuzhiyun #if defined(SRMEM)
1395*4882a593Smuzhiyun 		if (SRMEM_ENAB()) {
1396*4882a593Smuzhiyun 			int maxlen = pktpool_max_pkts(pktpool_shared);
1397*4882a593Smuzhiyun 			int n_pkts = pktpool_tot_pkts(pktpool_shared);
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 			for (; n_pkts < maxlen; n_pkts++) {
1400*4882a593Smuzhiyun 				void *p;
1401*4882a593Smuzhiyun 				if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
1402*4882a593Smuzhiyun 					break;
1403*4882a593Smuzhiyun 				pktpool_add(pktpool_shared, p);
1404*4882a593Smuzhiyun 			}
1405*4882a593Smuzhiyun 		}
1406*4882a593Smuzhiyun #endif /* SRMEM */
1407*4882a593Smuzhiyun 		pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1408*4882a593Smuzhiyun 	}
1409*4882a593Smuzhiyun /* fragpool reclaim */
1410*4882a593Smuzhiyun #ifdef BCMFRAGPOOL
1411*4882a593Smuzhiyun 	if (POOL_ENAB(pktpool_shared_lfrag)) {
1412*4882a593Smuzhiyun 		pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1413*4882a593Smuzhiyun 	}
1414*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1415*4882a593Smuzhiyun /* rx fragpool reclaim */
1416*4882a593Smuzhiyun #ifdef BCMRXFRAGPOOL
1417*4882a593Smuzhiyun 	if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1418*4882a593Smuzhiyun 		pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1419*4882a593Smuzhiyun 	}
1420*4882a593Smuzhiyun #endif // endif
1421*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1422*4882a593Smuzhiyun 	if (POOL_ENAB(pktpool_resv_lfrag)) {
1423*4882a593Smuzhiyun 		int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN;
1424*4882a593Smuzhiyun 		hnd_resv_pool_init(resv_pool_info, resv_size);
1425*4882a593Smuzhiyun 		hnd_resv_pool_enable(resv_pool_info);
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun #endif /* BCMPKTPOOL */
1430