1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * HND generic packet pool operation primitives
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2020, Broadcom.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license
7*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you
8*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"),
9*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10*4882a593Smuzhiyun * following added to such license:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you
13*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and
14*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that
15*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of
16*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not
17*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any
18*4882a593Smuzhiyun * modifications of the software.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Dual:>>
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <typedefs.h>
25*4882a593Smuzhiyun #include <osl.h>
26*4882a593Smuzhiyun #include <osl_ext.h>
27*4882a593Smuzhiyun #include <bcmutils.h>
28*4882a593Smuzhiyun #include <wlioctl.h>
29*4882a593Smuzhiyun #include <hnd_pktpool.h>
30*4882a593Smuzhiyun #ifdef BCMRESVFRAGPOOL
31*4882a593Smuzhiyun #include <hnd_resvpool.h>
32*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
33*4882a593Smuzhiyun #ifdef BCMFRWDPOOLREORG
34*4882a593Smuzhiyun #include <hnd_poolreorg.h>
35*4882a593Smuzhiyun #endif /* BCMFRWDPOOLREORG */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #if defined(DONGLEBUILD) && defined(SRMEM)
38*4882a593Smuzhiyun #include <hndsrmem.h>
39*4882a593Smuzhiyun #endif /* DONGLEBUILD && SRMEM */
40*4882a593Smuzhiyun #if defined(DONGLEBUILD)
41*4882a593Smuzhiyun #include <d11_cfg.h>
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* mutex macros for thread safe */
45*4882a593Smuzhiyun #ifdef HND_PKTPOOL_THREAD_SAFE
46*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
47*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
48*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
49*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
50*4882a593Smuzhiyun #else
51*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
52*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
53*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
54*4882a593Smuzhiyun #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
55*4882a593Smuzhiyun #endif
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Registry size is one larger than max pools, as slot #0 is reserved */
58*4882a593Smuzhiyun #define PKTPOOLREG_RSVD_ID (0U)
59*4882a593Smuzhiyun #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
60*4882a593Smuzhiyun #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
63*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Tag a registry entry as free for use */
66*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_CLR(id) \
67*4882a593Smuzhiyun PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
68*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_ISCLR(id) \
69*4882a593Smuzhiyun (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* Tag registry entry 0 as reserved */
72*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_RSV() \
73*4882a593Smuzhiyun PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
74*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_ISRSVD() \
75*4882a593Smuzhiyun (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* Walk all un-reserved entries in registry */
78*4882a593Smuzhiyun #define PKTPOOL_REGISTRY_FOREACH(id) \
79*4882a593Smuzhiyun for ((id) = 1U; (id) <= pktpools_max; (id)++)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun enum pktpool_empty_cb_state {
82*4882a593Smuzhiyun EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
83*4882a593Smuzhiyun EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
84*4882a593Smuzhiyun EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
88*4882a593Smuzhiyun pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* number of pktids that are reserved for pktpool usage at the moment
91*4882a593Smuzhiyun * initializing this with max pktids reserved for pktpool
92*4882a593Smuzhiyun * pktpool_init, pktpool_fill and pktpool_refill decrements this
93*4882a593Smuzhiyun * pktpool_reclaim, pktpool_empty and heap_pkt_release increments this
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun #ifdef DONGLEBUILD
96*4882a593Smuzhiyun uint32 total_pool_pktid_count = PKTID_POOL;
97*4882a593Smuzhiyun #else
98*4882a593Smuzhiyun uint32 total_pool_pktid_count = 0U;
99*4882a593Smuzhiyun #endif /* DONGLEBUILD */
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #ifdef POOL_HEAP_RECONFIG
102*4882a593Smuzhiyun typedef struct pktpool_heap_cb_reg {
103*4882a593Smuzhiyun pktpool_heap_cb_t fn;
104*4882a593Smuzhiyun void *ctxt;
105*4882a593Smuzhiyun uint32 flag;
106*4882a593Smuzhiyun } pktpool_heap_cb_reg_t;
107*4882a593Smuzhiyun #define PKTPOOL_MAX_HEAP_CB 2
108*4882a593Smuzhiyun pktpool_heap_cb_reg_t pktpool_heap_cb_reg[PKTPOOL_MAX_HEAP_CB];
109*4882a593Smuzhiyun uint32 pktpool_heap_rel_active = 0U;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun static void hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag);
112*4882a593Smuzhiyun static void hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag);
113*4882a593Smuzhiyun static int hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize);
114*4882a593Smuzhiyun static void hnd_pktpool_lbuf_free_cb(uint8 poolid);
115*4882a593Smuzhiyun static pktpool_heap_cb_reg_t *BCMRAMFN(hnd_pool_get_cb_registry)(void);
116*4882a593Smuzhiyun #endif /* POOL_HEAP_RECONFIG */
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
119*4882a593Smuzhiyun static int pktpool_register(pktpool_t * poolptr);
120*4882a593Smuzhiyun static int pktpool_deregister(pktpool_t * poolptr);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /** add declaration */
123*4882a593Smuzhiyun static void pktpool_avail_notify(pktpool_t *pktp);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /** accessor functions required when ROMming this file, forced into RAM */
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun pktpool_t *
BCMPOSTTRAPRAMFN(get_pktpools_registry)128*4882a593Smuzhiyun BCMPOSTTRAPRAMFN(get_pktpools_registry)(int id)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return pktpools_registry[id];
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static void
BCMRAMFN(pktpool_registry_set)134*4882a593Smuzhiyun BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun pktpools_registry[id] = pp;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun static bool
BCMRAMFN(pktpool_registry_cmp)140*4882a593Smuzhiyun BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun return pktpools_registry[id] == pp;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /** Constructs a pool registry to serve a maximum of total_pools */
146*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_attach)147*4882a593Smuzhiyun BCMATTACHFN(pktpool_attach)(osl_t *osh, uint32 total_pools)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun uint32 poolid;
150*4882a593Smuzhiyun BCM_REFERENCE(osh);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun if (pktpools_max != 0U) {
153*4882a593Smuzhiyun return BCME_ERROR;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Initialize registry: reserve slot#0 and tag others as free */
159*4882a593Smuzhiyun PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
162*4882a593Smuzhiyun PKTPOOL_REGISTRY_CLR(poolid);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun pktpools_max = total_pools;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return (int)pktpools_max;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /** Destructs the pool registry. Ascertain all pools were first de-inited */
171*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_dettach)172*4882a593Smuzhiyun BCMATTACHFN(pktpool_dettach)(osl_t *osh)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun uint32 poolid;
175*4882a593Smuzhiyun BCM_REFERENCE(osh);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun if (pktpools_max == 0U) {
178*4882a593Smuzhiyun return BCME_OK;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Ascertain that no pools are still registered */
182*4882a593Smuzhiyun ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
185*4882a593Smuzhiyun ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun pktpools_max = 0U; /* restore boot state */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return BCME_OK;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /** Registers a pool in a free slot; returns the registry slot index */
194*4882a593Smuzhiyun static int
BCMATTACHFN(pktpool_register)195*4882a593Smuzhiyun BCMATTACHFN(pktpool_register)(pktpool_t * poolptr)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun uint32 poolid;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (pktpools_max == 0U) {
200*4882a593Smuzhiyun return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun ASSERT(pktpools_max != 0U);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /* find an empty slot in pktpools_registry */
206*4882a593Smuzhiyun PKTPOOL_REGISTRY_FOREACH(poolid) {
207*4882a593Smuzhiyun if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
208*4882a593Smuzhiyun PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
209*4882a593Smuzhiyun return (int)poolid; /* return pool ID */
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun } /* FOREACH */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return PKTPOOL_INVALID_ID; /* error: registry is full */
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /** Deregisters a pktpool, given the pool pointer; tag slot as free */
217*4882a593Smuzhiyun static int
BCMATTACHFN(pktpool_deregister)218*4882a593Smuzhiyun BCMATTACHFN(pktpool_deregister)(pktpool_t * poolptr)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun uint32 poolid;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun poolid = POOLID(poolptr);
225*4882a593Smuzhiyun ASSERT(poolid <= pktpools_max);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Asertain that a previously registered poolptr is being de-registered */
228*4882a593Smuzhiyun if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
229*4882a593Smuzhiyun PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
230*4882a593Smuzhiyun } else {
231*4882a593Smuzhiyun ASSERT(0);
232*4882a593Smuzhiyun return BCME_ERROR; /* mismatch in registry */
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return BCME_OK;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * pktpool_init:
240*4882a593Smuzhiyun * User provides a pktpool_t structure and specifies the number of packets to
241*4882a593Smuzhiyun * be pre-filled into the pool (n_pkts).
242*4882a593Smuzhiyun * pktpool_init first attempts to register the pool and fetch a unique poolid.
243*4882a593Smuzhiyun * If registration fails, it is considered an BCME_ERR, caused by either the
244*4882a593Smuzhiyun * registry was not pre-created (pktpool_attach) or the registry is full.
245*4882a593Smuzhiyun * If registration succeeds, then the requested number of packets will be filled
246*4882a593Smuzhiyun * into the pool as part of initialization. In the event that there is no
247*4882a593Smuzhiyun * available memory to service the request, then BCME_NOMEM will be returned
248*4882a593Smuzhiyun * along with the count of how many packets were successfully allocated.
249*4882a593Smuzhiyun * In dongle builds, prior to memory reclaimation, one should limit the number
250*4882a593Smuzhiyun * of packets to be allocated during pktpool_init and fill the pool up after
251*4882a593Smuzhiyun * reclaim stage.
252*4882a593Smuzhiyun *
253*4882a593Smuzhiyun * @param n_pkts Number of packets to be pre-filled into the pool
254*4882a593Smuzhiyun * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
255*4882a593Smuzhiyun * @param type e.g. 'lbuf_frag'
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_init)258*4882a593Smuzhiyun BCMATTACHFN(pktpool_init)(osl_t *osh,
259*4882a593Smuzhiyun pktpool_t *pktp,
260*4882a593Smuzhiyun int *n_pkts,
261*4882a593Smuzhiyun int max_pkt_bytes,
262*4882a593Smuzhiyun bool istx,
263*4882a593Smuzhiyun uint8 type,
264*4882a593Smuzhiyun bool is_heap_pool,
265*4882a593Smuzhiyun uint32 heap_pool_flag,
266*4882a593Smuzhiyun uint16 min_backup_buf)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun int i, err = BCME_OK;
269*4882a593Smuzhiyun int pktplen;
270*4882a593Smuzhiyun uint8 pktp_id;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ASSERT(pktp != NULL);
273*4882a593Smuzhiyun ASSERT(osh != NULL);
274*4882a593Smuzhiyun ASSERT(n_pkts != NULL);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun pktplen = *n_pkts;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun bzero(pktp, sizeof(pktpool_t));
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* assign a unique pktpool id */
281*4882a593Smuzhiyun if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
282*4882a593Smuzhiyun return BCME_ERROR;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun POOLSETID(pktp, pktp_id);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun pktp->inited = TRUE;
287*4882a593Smuzhiyun pktp->istx = istx ? TRUE : FALSE;
288*4882a593Smuzhiyun pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
289*4882a593Smuzhiyun pktp->type = type;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun #ifdef POOL_HEAP_RECONFIG
292*4882a593Smuzhiyun pktp->poolheap_flag = heap_pool_flag;
293*4882a593Smuzhiyun pktp->poolheap_count = 0;
294*4882a593Smuzhiyun pktp->min_backup_buf = min_backup_buf;
295*4882a593Smuzhiyun if (is_heap_pool) {
296*4882a593Smuzhiyun if (rte_freelist_mgr_register(&pktp->mem_handle,
297*4882a593Smuzhiyun hnd_pktpool_heap_get_cb,
298*4882a593Smuzhiyun lb_get_pktalloclen(type, max_pkt_bytes),
299*4882a593Smuzhiyun pktp) != BCME_OK) {
300*4882a593Smuzhiyun return BCME_ERROR;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun pktp->is_heap_pool = is_heap_pool;
304*4882a593Smuzhiyun #endif
305*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
306*4882a593Smuzhiyun return BCME_ERROR;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun pktp->maxlen = PKTPOOL_LEN_MAX;
310*4882a593Smuzhiyun pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun for (i = 0; i < pktplen; i++) {
313*4882a593Smuzhiyun void *p;
314*4882a593Smuzhiyun #ifdef _RTE_
315*4882a593Smuzhiyun /* For rte builds, use PKTALLOC rather than PKTGET
316*4882a593Smuzhiyun * Avoid same pkts being dequed and enqued to pool
317*4882a593Smuzhiyun * when allocation fails.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun p = PKTALLOC(osh, max_pkt_bytes, type);
320*4882a593Smuzhiyun #else
321*4882a593Smuzhiyun p = PKTGET(osh, max_pkt_bytes, TRUE);
322*4882a593Smuzhiyun #endif
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (p == NULL) {
325*4882a593Smuzhiyun /* Not able to allocate all requested pkts
326*4882a593Smuzhiyun * so just return what was actually allocated
327*4882a593Smuzhiyun * We can add to the pool later
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun if (pktp->freelist == NULL) /* pktpool free list is empty */
330*4882a593Smuzhiyun err = BCME_NOMEM;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun goto exit;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
338*4882a593Smuzhiyun pktp->freelist = p;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun pktp->avail++;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun ASSERT(total_pool_pktid_count > 0);
343*4882a593Smuzhiyun total_pool_pktid_count--;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun #ifdef BCMDBG_POOL
346*4882a593Smuzhiyun pktp->dbg_q[pktp->dbg_qlen++].p = p;
347*4882a593Smuzhiyun #endif
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun exit:
351*4882a593Smuzhiyun pktp->n_pkts = pktp->avail;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
354*4882a593Smuzhiyun return err;
355*4882a593Smuzhiyun } /* pktpool_init */
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /**
358*4882a593Smuzhiyun * pktpool_deinit:
359*4882a593Smuzhiyun * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
360*4882a593Smuzhiyun * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
361*4882a593Smuzhiyun * An assert is in place to ensure that there are no packets still lingering
362*4882a593Smuzhiyun * around. Packets freed to a pool after the deinit will cause a memory
363*4882a593Smuzhiyun * corruption as the pktpool_t structure no longer exists.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_deinit)366*4882a593Smuzhiyun BCMATTACHFN(pktpool_deinit)(osl_t *osh, pktpool_t *pktp)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun uint16 freed = 0;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun ASSERT(osh != NULL);
371*4882a593Smuzhiyun ASSERT(pktp != NULL);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #ifdef BCMDBG_POOL
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun int i;
376*4882a593Smuzhiyun for (i = 0; i <= pktp->n_pkts; i++) {
377*4882a593Smuzhiyun pktp->dbg_q[i].p = NULL;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun #endif
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun while (pktp->freelist != NULL) {
383*4882a593Smuzhiyun void * p = pktp->freelist;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
386*4882a593Smuzhiyun PKTSETFREELIST(p, NULL);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun total_pool_pktid_count++;
391*4882a593Smuzhiyun PKTFREE(osh, p, pktp->istx); /* free the packet */
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun freed++;
394*4882a593Smuzhiyun ASSERT(freed <= pktp->n_pkts);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun pktp->avail -= freed;
398*4882a593Smuzhiyun ASSERT(pktp->avail == 0);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun pktp->n_pkts -= freed;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun pktpool_deregister(pktp); /* release previously acquired unique pool id */
403*4882a593Smuzhiyun POOLSETID(pktp, PKTPOOL_INVALID_ID);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
406*4882a593Smuzhiyun return BCME_ERROR;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun pktp->inited = FALSE;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* Are there still pending pkts? */
411*4882a593Smuzhiyun ASSERT(pktp->n_pkts == 0);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)417*4882a593Smuzhiyun pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun void *p;
420*4882a593Smuzhiyun int err = 0;
421*4882a593Smuzhiyun int n_pkts, psize, maxlen;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* protect shared resource */
424*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
425*4882a593Smuzhiyun return BCME_ERROR;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun #ifdef BCMRXDATAPOOL
428*4882a593Smuzhiyun ASSERT((pktp->max_pkt_bytes != 0) || (pktp->type == lbuf_rxfrag));
429*4882a593Smuzhiyun #else
430*4882a593Smuzhiyun ASSERT(pktp->max_pkt_bytes != 0);
431*4882a593Smuzhiyun #endif /* BCMRXDATAPOOL */
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun maxlen = pktp->maxlen;
434*4882a593Smuzhiyun psize = minimal ? (maxlen >> 2) : maxlen;
435*4882a593Smuzhiyun n_pkts = (int)pktp->n_pkts;
436*4882a593Smuzhiyun #ifdef POOL_HEAP_RECONFIG
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Consider the packets released to freelist mgr also
439*4882a593Smuzhiyun * as part of pool size
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun n_pkts += pktp->is_heap_pool ?
442*4882a593Smuzhiyun pktp->poolheap_count : 0;
443*4882a593Smuzhiyun #endif
444*4882a593Smuzhiyun for (; n_pkts < psize; n_pkts++) {
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun #ifdef _RTE_
447*4882a593Smuzhiyun /* For rte builds, use PKTALLOC rather than PKTGET
448*4882a593Smuzhiyun * Avoid same pkts being dequed and enqued to pool when allocation fails.
449*4882a593Smuzhiyun * All pkts in pool have same length.
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun p = PKTALLOC(osh, pktp->max_pkt_bytes, pktp->type);
452*4882a593Smuzhiyun #else
453*4882a593Smuzhiyun p = PKTGET(osh, pktp->n_pkts, TRUE);
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (p == NULL) {
457*4882a593Smuzhiyun err = BCME_NOMEM;
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (pktpool_add(pktp, p) != BCME_OK) {
462*4882a593Smuzhiyun PKTFREE(osh, p, FALSE);
463*4882a593Smuzhiyun err = BCME_ERROR;
464*4882a593Smuzhiyun break;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun ASSERT(total_pool_pktid_count > 0);
467*4882a593Smuzhiyun total_pool_pktid_count--;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* protect shared resource */
471*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
472*4882a593Smuzhiyun return BCME_ERROR;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (pktp->cbcnt) {
475*4882a593Smuzhiyun if (pktp->empty == FALSE)
476*4882a593Smuzhiyun pktpool_avail_notify(pktp);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun return err;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun #ifdef BCMPOOLRECLAIM
483*4882a593Smuzhiyun /* New API to decrease the pkts from pool, but not deinit
484*4882a593Smuzhiyun */
485*4882a593Smuzhiyun uint16
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt,uint8 action)486*4882a593Smuzhiyun pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun uint16 freed = 0;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun pktpool_cb_extn_t cb = NULL;
491*4882a593Smuzhiyun void *arg = NULL;
492*4882a593Smuzhiyun void *rem_list_head = NULL;
493*4882a593Smuzhiyun void *rem_list_tail = NULL;
494*4882a593Smuzhiyun bool dont_free = FALSE;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun ASSERT(osh != NULL);
497*4882a593Smuzhiyun ASSERT(pktp != NULL);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* protect shared resource */
500*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
501*4882a593Smuzhiyun return freed;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (pktp->avail < free_cnt) {
505*4882a593Smuzhiyun free_cnt = pktp->avail;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
509*4882a593Smuzhiyun /* If pool is shared rx frag pool, use call back fn to reclaim host address
510*4882a593Smuzhiyun * and Rx cpl ID associated with the pkt.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun ASSERT(pktp->cbext.cb != NULL);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun cb = pktp->cbext.cb;
515*4882a593Smuzhiyun arg = pktp->cbext.arg;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
518*4882a593Smuzhiyun /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
519*4882a593Smuzhiyun * associated with the pkt.
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun cb = pktp->rxcplidfn.cb;
522*4882a593Smuzhiyun arg = pktp->rxcplidfn.arg;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun while ((pktp->freelist != NULL) && (free_cnt)) {
526*4882a593Smuzhiyun void * p = pktp->freelist;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
529*4882a593Smuzhiyun PKTSETFREELIST(p, NULL);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun dont_free = FALSE;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun if (action == FREE_ALL_FRAG_PKTS) {
534*4882a593Smuzhiyun /* Free lbufs which are marked as frag_free_mem */
535*4882a593Smuzhiyun if (!PKTISFRMFRAG(p)) {
536*4882a593Smuzhiyun dont_free = TRUE;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (dont_free) {
541*4882a593Smuzhiyun if (rem_list_head == NULL) {
542*4882a593Smuzhiyun rem_list_head = p;
543*4882a593Smuzhiyun } else {
544*4882a593Smuzhiyun PKTSETFREELIST(rem_list_tail, p);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun rem_list_tail = p;
547*4882a593Smuzhiyun continue;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun if (cb != NULL) {
550*4882a593Smuzhiyun if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
551*4882a593Smuzhiyun PKTSETFREELIST(p, pktp->freelist);
552*4882a593Smuzhiyun pktp->freelist = p;
553*4882a593Smuzhiyun break;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun pktp->avail--;
560*4882a593Smuzhiyun pktp->n_pkts--;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun total_pool_pktid_count++;
563*4882a593Smuzhiyun PKTFREE(osh, p, pktp->istx); /* free the packet */
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun freed++;
566*4882a593Smuzhiyun free_cnt--;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if (rem_list_head) {
570*4882a593Smuzhiyun PKTSETFREELIST(rem_list_tail, pktp->freelist);
571*4882a593Smuzhiyun pktp->freelist = rem_list_head;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* protect shared resource */
575*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
576*4882a593Smuzhiyun return freed;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun return freed;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun #endif /* #ifdef BCMPOOLRECLAIM */
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* New API to empty the pkts from pool, but not deinit
584*4882a593Smuzhiyun * NOTE: caller is responsible to ensure,
585*4882a593Smuzhiyun * all pkts are available in pool for free; else LEAK !
586*4882a593Smuzhiyun */
587*4882a593Smuzhiyun int
pktpool_empty(osl_t * osh,pktpool_t * pktp)588*4882a593Smuzhiyun pktpool_empty(osl_t *osh, pktpool_t *pktp)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun uint16 freed = 0;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun ASSERT(osh != NULL);
593*4882a593Smuzhiyun ASSERT(pktp != NULL);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /* protect shared resource */
596*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
597*4882a593Smuzhiyun return BCME_ERROR;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun #ifdef BCMDBG_POOL
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun int i;
602*4882a593Smuzhiyun for (i = 0; i <= pktp->n_pkts; i++) {
603*4882a593Smuzhiyun pktp->dbg_q[i].p = NULL;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun #endif
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun while (pktp->freelist != NULL) {
609*4882a593Smuzhiyun void * p = pktp->freelist;
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
612*4882a593Smuzhiyun PKTSETFREELIST(p, NULL);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun total_pool_pktid_count++;
617*4882a593Smuzhiyun PKTFREE(osh, p, pktp->istx); /* free the packet */
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun freed++;
620*4882a593Smuzhiyun ASSERT(freed <= pktp->n_pkts);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun pktp->avail -= freed;
624*4882a593Smuzhiyun ASSERT(pktp->avail == 0);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun pktp->n_pkts -= freed;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ASSERT(pktp->n_pkts == 0);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* protect shared resource */
631*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
632*4882a593Smuzhiyun return BCME_ERROR;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return 0;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun int
BCMPOSTTRAPFN(pktpool_avail)638*4882a593Smuzhiyun BCMPOSTTRAPFN(pktpool_avail)(pktpool_t *pktpool)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun int avail = pktpool->avail;
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (avail == 0) {
643*4882a593Smuzhiyun pktpool_emptycb_disable(pktpool, FALSE);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return avail;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun static void *
BCMPOSTTRAPFASTPATH(pktpool_deq)650*4882a593Smuzhiyun BCMPOSTTRAPFASTPATH(pktpool_deq)(pktpool_t *pktp)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun void *p = NULL;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (pktp->avail == 0)
655*4882a593Smuzhiyun return NULL;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun ASSERT_FP(pktp->freelist != NULL);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun p = pktp->freelist; /* dequeue packet from head of pktpool free list */
660*4882a593Smuzhiyun pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun #if defined(DONGLEBUILD) && defined(SRMEM)
663*4882a593Smuzhiyun if (SRMEM_ENAB()) {
664*4882a593Smuzhiyun PKTSRMEM_INC_INUSE(p);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun #endif /* DONGLEBUILD && SRMEM */
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun PKTSETFREELIST(p, NULL);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun pktp->avail--;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun return p;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun static void
BCMPOSTTRAPFASTPATH(pktpool_enq)676*4882a593Smuzhiyun BCMPOSTTRAPFASTPATH(pktpool_enq)(pktpool_t *pktp, void *p)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun ASSERT_FP(p != NULL);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
681*4882a593Smuzhiyun pktp->freelist = p; /* free list points to newly inserted packet */
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun #if defined(DONGLEBUILD) && defined(SRMEM)
684*4882a593Smuzhiyun if (SRMEM_ENAB()) {
685*4882a593Smuzhiyun PKTSRMEM_DEC_INUSE(p);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun #endif /* DONGLEBUILD && SRMEM */
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun pktp->avail++;
690*4882a593Smuzhiyun ASSERT_FP(pktp->avail <= pktp->n_pkts);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /** utility for registering host addr fill function called from pciedev */
694*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_hostaddr_fill_register)695*4882a593Smuzhiyun BCMATTACHFN(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ASSERT(cb != NULL);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun ASSERT(pktp->cbext.cb == NULL);
701*4882a593Smuzhiyun pktp->cbext.cb = cb;
702*4882a593Smuzhiyun pktp->cbext.arg = arg;
703*4882a593Smuzhiyun return 0;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_rxcplid_fill_register)707*4882a593Smuzhiyun BCMATTACHFN(pktpool_rxcplid_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun ASSERT(cb != NULL);
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (pktp == NULL)
713*4882a593Smuzhiyun return BCME_ERROR;
714*4882a593Smuzhiyun ASSERT(pktp->rxcplidfn.cb == NULL);
715*4882a593Smuzhiyun pktp->rxcplidfn.cb = cb;
716*4882a593Smuzhiyun pktp->rxcplidfn.arg = arg;
717*4882a593Smuzhiyun return 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
721*4882a593Smuzhiyun void
pktpool_invoke_dmarxfill(pktpool_t * pktp)722*4882a593Smuzhiyun pktpool_invoke_dmarxfill(pktpool_t *pktp)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun ASSERT(pktp->dmarxfill.cb);
725*4882a593Smuzhiyun ASSERT(pktp->dmarxfill.arg);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (pktp->dmarxfill.cb)
728*4882a593Smuzhiyun pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun /** Registers callback functions for split rx mode */
732*4882a593Smuzhiyun int
BCMATTACHFN(pkpool_haddr_avail_register_cb)733*4882a593Smuzhiyun BCMATTACHFN(pkpool_haddr_avail_register_cb)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun ASSERT(cb != NULL);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun pktp->dmarxfill.cb = cb;
739*4882a593Smuzhiyun pktp->dmarxfill.arg = arg;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun return 0;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /**
745*4882a593Smuzhiyun * Registers callback functions.
746*4882a593Smuzhiyun * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)749*4882a593Smuzhiyun pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun int err = 0;
752*4882a593Smuzhiyun int i;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* protect shared resource */
755*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
756*4882a593Smuzhiyun return BCME_ERROR;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun ASSERT(cb != NULL);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun for (i = 0; i < pktp->cbcnt; i++) {
761*4882a593Smuzhiyun ASSERT(pktp->cbs[i].cb != NULL);
762*4882a593Smuzhiyun if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
763*4882a593Smuzhiyun pktp->cbs[i].refcnt++;
764*4882a593Smuzhiyun goto done;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun i = pktp->cbcnt;
769*4882a593Smuzhiyun if (i == PKTPOOL_CB_MAX_AVL) {
770*4882a593Smuzhiyun err = BCME_ERROR;
771*4882a593Smuzhiyun goto done;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun ASSERT(pktp->cbs[i].cb == NULL);
775*4882a593Smuzhiyun pktp->cbs[i].cb = cb;
776*4882a593Smuzhiyun pktp->cbs[i].arg = arg;
777*4882a593Smuzhiyun pktp->cbs[i].refcnt++;
778*4882a593Smuzhiyun pktp->cbcnt++;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun /* force enable empty callback */
781*4882a593Smuzhiyun pktpool_emptycb_disable(pktp, FALSE);
782*4882a593Smuzhiyun done:
783*4882a593Smuzhiyun /* protect shared resource */
784*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
785*4882a593Smuzhiyun return BCME_ERROR;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun return err;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* No BCMATTACHFN as it is used in a non-attach function */
791*4882a593Smuzhiyun int
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)792*4882a593Smuzhiyun pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun int err = 0;
795*4882a593Smuzhiyun int i, k;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* protect shared resource */
798*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
799*4882a593Smuzhiyun return BCME_ERROR;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun ASSERT(cb != NULL);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun for (i = 0; i < pktp->cbcnt; i++) {
805*4882a593Smuzhiyun ASSERT(pktp->cbs[i].cb != NULL);
806*4882a593Smuzhiyun if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
807*4882a593Smuzhiyun pktp->cbs[i].refcnt--;
808*4882a593Smuzhiyun if (pktp->cbs[i].refcnt) {
809*4882a593Smuzhiyun /* Still there are references to this callback */
810*4882a593Smuzhiyun goto done;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun /* Moving any more callbacks to fill the hole */
813*4882a593Smuzhiyun for (k = i+1; k < pktp->cbcnt; i++, k++) {
814*4882a593Smuzhiyun pktp->cbs[i].cb = pktp->cbs[k].cb;
815*4882a593Smuzhiyun pktp->cbs[i].arg = pktp->cbs[k].arg;
816*4882a593Smuzhiyun pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* reset the last callback */
820*4882a593Smuzhiyun pktp->cbs[i].cb = NULL;
821*4882a593Smuzhiyun pktp->cbs[i].arg = NULL;
822*4882a593Smuzhiyun pktp->cbs[i].refcnt = 0;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun pktp->cbcnt--;
825*4882a593Smuzhiyun goto done;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun done:
830*4882a593Smuzhiyun /* protect shared resource */
831*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
832*4882a593Smuzhiyun return BCME_ERROR;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return err;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /** Registers callback functions */
839*4882a593Smuzhiyun int
BCMATTACHFN(pktpool_empty_register)840*4882a593Smuzhiyun BCMATTACHFN(pktpool_empty_register)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun int err = 0;
843*4882a593Smuzhiyun int i;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* protect shared resource */
846*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
847*4882a593Smuzhiyun return BCME_ERROR;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun ASSERT(cb != NULL);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun i = pktp->ecbcnt;
852*4882a593Smuzhiyun if (i == PKTPOOL_CB_MAX) {
853*4882a593Smuzhiyun err = BCME_ERROR;
854*4882a593Smuzhiyun goto done;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun ASSERT(pktp->ecbs[i].cb == NULL);
858*4882a593Smuzhiyun pktp->ecbs[i].cb = cb;
859*4882a593Smuzhiyun pktp->ecbs[i].arg = arg;
860*4882a593Smuzhiyun pktp->ecbcnt++;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun done:
863*4882a593Smuzhiyun /* protect shared resource */
864*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
865*4882a593Smuzhiyun return BCME_ERROR;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return err;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /** Calls registered callback functions */
871*4882a593Smuzhiyun static int
BCMPOSTTRAPFN(pktpool_empty_notify)872*4882a593Smuzhiyun BCMPOSTTRAPFN(pktpool_empty_notify)(pktpool_t *pktp)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun int i;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun pktp->empty = TRUE;
877*4882a593Smuzhiyun for (i = 0; i < pktp->ecbcnt; i++) {
878*4882a593Smuzhiyun ASSERT(pktp->ecbs[i].cb != NULL);
879*4882a593Smuzhiyun pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun pktp->empty = FALSE;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun return 0;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun #ifdef BCMDBG_POOL
887*4882a593Smuzhiyun int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)888*4882a593Smuzhiyun pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun int err = 0;
891*4882a593Smuzhiyun int i;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* protect shared resource */
894*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
895*4882a593Smuzhiyun return BCME_ERROR;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ASSERT(cb);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun i = pktp->dbg_cbcnt;
900*4882a593Smuzhiyun if (i == PKTPOOL_CB_MAX) {
901*4882a593Smuzhiyun err = BCME_ERROR;
902*4882a593Smuzhiyun goto done;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun ASSERT(pktp->dbg_cbs[i].cb == NULL);
906*4882a593Smuzhiyun pktp->dbg_cbs[i].cb = cb;
907*4882a593Smuzhiyun pktp->dbg_cbs[i].arg = arg;
908*4882a593Smuzhiyun pktp->dbg_cbcnt++;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun done:
911*4882a593Smuzhiyun /* protect shared resource */
912*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
913*4882a593Smuzhiyun return BCME_ERROR;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun return err;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun int pktpool_dbg_notify(pktpool_t *pktp);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun int
pktpool_dbg_notify(pktpool_t * pktp)921*4882a593Smuzhiyun pktpool_dbg_notify(pktpool_t *pktp)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun int i;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* protect shared resource */
926*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
927*4882a593Smuzhiyun return BCME_ERROR;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun for (i = 0; i < pktp->dbg_cbcnt; i++) {
930*4882a593Smuzhiyun ASSERT(pktp->dbg_cbs[i].cb);
931*4882a593Smuzhiyun pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* protect shared resource */
935*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
936*4882a593Smuzhiyun return BCME_ERROR;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun return 0;
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun int
pktpool_dbg_dump(pktpool_t * pktp)942*4882a593Smuzhiyun pktpool_dbg_dump(pktpool_t *pktp)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun int i;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /* protect shared resource */
947*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
948*4882a593Smuzhiyun return BCME_ERROR;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
951*4882a593Smuzhiyun for (i = 0; i < pktp->dbg_qlen; i++) {
952*4882a593Smuzhiyun ASSERT(pktp->dbg_q[i].p);
953*4882a593Smuzhiyun printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
954*4882a593Smuzhiyun pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /* protect shared resource */
958*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
959*4882a593Smuzhiyun return BCME_ERROR;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun return 0;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)965*4882a593Smuzhiyun pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun int i;
968*4882a593Smuzhiyun int state;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /* protect shared resource */
971*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
972*4882a593Smuzhiyun return BCME_ERROR;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun bzero(stats, sizeof(pktpool_stats_t));
975*4882a593Smuzhiyun for (i = 0; i < pktp->dbg_qlen; i++) {
976*4882a593Smuzhiyun ASSERT(pktp->dbg_q[i].p != NULL);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun state = PKTPOOLSTATE(pktp->dbg_q[i].p);
979*4882a593Smuzhiyun switch (state) {
980*4882a593Smuzhiyun case POOL_TXENQ:
981*4882a593Smuzhiyun stats->enq++; break;
982*4882a593Smuzhiyun case POOL_TXDH:
983*4882a593Smuzhiyun stats->txdh++; break;
984*4882a593Smuzhiyun case POOL_TXD11:
985*4882a593Smuzhiyun stats->txd11++; break;
986*4882a593Smuzhiyun case POOL_RXDH:
987*4882a593Smuzhiyun stats->rxdh++; break;
988*4882a593Smuzhiyun case POOL_RXD11:
989*4882a593Smuzhiyun stats->rxd11++; break;
990*4882a593Smuzhiyun case POOL_RXFILL:
991*4882a593Smuzhiyun stats->rxfill++; break;
992*4882a593Smuzhiyun case POOL_IDLE:
993*4882a593Smuzhiyun stats->idle++; break;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /* protect shared resource */
998*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
999*4882a593Smuzhiyun return BCME_ERROR;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun return 0;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun int
pktpool_start_trigger(pktpool_t * pktp,void * p)1005*4882a593Smuzhiyun pktpool_start_trigger(pktpool_t *pktp, void *p)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun uint32 cycles, i;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /* protect shared resource */
1010*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1011*4882a593Smuzhiyun return BCME_ERROR;
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun if (!PKTPOOL(OSH_NULL, p))
1014*4882a593Smuzhiyun goto done;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun OSL_GETCYCLES(cycles);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun for (i = 0; i < pktp->dbg_qlen; i++) {
1019*4882a593Smuzhiyun ASSERT(pktp->dbg_q[i].p != NULL);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if (pktp->dbg_q[i].p == p) {
1022*4882a593Smuzhiyun pktp->dbg_q[i].cycles = cycles;
1023*4882a593Smuzhiyun break;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun done:
1028*4882a593Smuzhiyun /* protect shared resource */
1029*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1030*4882a593Smuzhiyun return BCME_ERROR;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return 0;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun int pktpool_stop_trigger(pktpool_t *pktp, void *p);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun int
pktpool_stop_trigger(pktpool_t * pktp,void * p)1038*4882a593Smuzhiyun pktpool_stop_trigger(pktpool_t *pktp, void *p)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun uint32 cycles, i;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /* protect shared resource */
1043*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1044*4882a593Smuzhiyun return BCME_ERROR;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (!PKTPOOL(OSH_NULL, p))
1047*4882a593Smuzhiyun goto done;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun OSL_GETCYCLES(cycles);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun for (i = 0; i < pktp->dbg_qlen; i++) {
1052*4882a593Smuzhiyun ASSERT(pktp->dbg_q[i].p != NULL);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (pktp->dbg_q[i].p == p) {
1055*4882a593Smuzhiyun if (pktp->dbg_q[i].cycles == 0)
1056*4882a593Smuzhiyun break;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun if (cycles >= pktp->dbg_q[i].cycles)
1059*4882a593Smuzhiyun pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
1060*4882a593Smuzhiyun else
1061*4882a593Smuzhiyun pktp->dbg_q[i].dur =
1062*4882a593Smuzhiyun (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun pktp->dbg_q[i].cycles = 0;
1065*4882a593Smuzhiyun break;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun done:
1070*4882a593Smuzhiyun /* protect shared resource */
1071*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1072*4882a593Smuzhiyun return BCME_ERROR;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun #endif /* BCMDBG_POOL */
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)1079*4882a593Smuzhiyun pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun BCM_REFERENCE(osh);
1082*4882a593Smuzhiyun ASSERT(pktp);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /* protect shared resource */
1085*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1086*4882a593Smuzhiyun return BCME_ERROR;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun pktp->availcb_excl = NULL;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* protect shared resource */
1091*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1092*4882a593Smuzhiyun return BCME_ERROR;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return 0;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)1098*4882a593Smuzhiyun pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
1099*4882a593Smuzhiyun {
1100*4882a593Smuzhiyun int i;
1101*4882a593Smuzhiyun int err;
1102*4882a593Smuzhiyun BCM_REFERENCE(osh);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun ASSERT(pktp);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun /* protect shared resource */
1107*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1108*4882a593Smuzhiyun return BCME_ERROR;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun ASSERT(pktp->availcb_excl == NULL);
1111*4882a593Smuzhiyun for (i = 0; i < pktp->cbcnt; i++) {
1112*4882a593Smuzhiyun if (cb == pktp->cbs[i].cb) {
1113*4882a593Smuzhiyun pktp->availcb_excl = &pktp->cbs[i];
1114*4882a593Smuzhiyun break;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun if (pktp->availcb_excl == NULL)
1119*4882a593Smuzhiyun err = BCME_ERROR;
1120*4882a593Smuzhiyun else
1121*4882a593Smuzhiyun err = 0;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun /* protect shared resource */
1124*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1125*4882a593Smuzhiyun return BCME_ERROR;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun return err;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun static void
BCMPOSTTRAPFN(pktpool_avail_notify)1131*4882a593Smuzhiyun BCMPOSTTRAPFN(pktpool_avail_notify)(pktpool_t *pktp)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun int i, k, idx;
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun ASSERT(pktp);
1136*4882a593Smuzhiyun pktpool_emptycb_disable(pktp, TRUE);
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun if (pktp->availcb_excl != NULL) {
1139*4882a593Smuzhiyun pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1140*4882a593Smuzhiyun return;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun k = pktp->cbcnt - 1;
1144*4882a593Smuzhiyun for (i = 0; i < pktp->cbcnt; i++) {
1145*4882a593Smuzhiyun /* callbacks are getting disabled at this func entry.
1146*4882a593Smuzhiyun * For the case of avail is say 5, and first callback
1147*4882a593Smuzhiyun * consumes exactly 5 due to dma rxpost setting, then
1148*4882a593Smuzhiyun * further callbacks will not getting notified if avail check
1149*4882a593Smuzhiyun * is present.
1150*4882a593Smuzhiyun * so calling all cbs even if pktp->avail is zero, so that
1151*4882a593Smuzhiyun * cbs get oppurtunity to enable callbacks if their
1152*4882a593Smuzhiyun * operation is in progress / not completed.
1153*4882a593Smuzhiyun */
1154*4882a593Smuzhiyun if (pktp->cbtoggle)
1155*4882a593Smuzhiyun idx = i;
1156*4882a593Smuzhiyun else
1157*4882a593Smuzhiyun idx = k--;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun ASSERT(pktp->cbs[idx].cb != NULL);
1160*4882a593Smuzhiyun pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /* Alternate between filling from head or tail
1164*4882a593Smuzhiyun */
1165*4882a593Smuzhiyun pktp->cbtoggle ^= 1;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun return;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun #ifdef APP_RX
1171*4882a593Smuzhiyun /* Update freelist and avail count for a given packet pool */
1172*4882a593Smuzhiyun void
BCMFASTPATH(pktpool_update_freelist)1173*4882a593Smuzhiyun BCMFASTPATH(pktpool_update_freelist)(pktpool_t *pktp, void *p, uint pkts_consumed)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun ASSERT_FP(pktp->avail >= pkts_consumed);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun pktp->freelist = p;
1178*4882a593Smuzhiyun pktp->avail -= pkts_consumed;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun #endif /* APP_RX */
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /** Gets an empty packet from the caller provided pool */
1183*4882a593Smuzhiyun void *
BCMPOSTTRAPFASTPATH(pktpool_get_ext)1184*4882a593Smuzhiyun BCMPOSTTRAPFASTPATH(pktpool_get_ext)(pktpool_t *pktp, uint8 type, uint *pktcnt)
1185*4882a593Smuzhiyun {
1186*4882a593Smuzhiyun void *p = NULL;
1187*4882a593Smuzhiyun uint pkts_requested = 1;
1188*4882a593Smuzhiyun #if defined(DONGLEBUILD)
1189*4882a593Smuzhiyun uint pkts_avail;
1190*4882a593Smuzhiyun bool rxcpl = (pktp->rxcplidfn.cb != NULL) ? TRUE : FALSE;
1191*4882a593Smuzhiyun #endif /* DONGLEBUILD */
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (pktcnt) {
1194*4882a593Smuzhiyun pkts_requested = *pktcnt;
1195*4882a593Smuzhiyun if (pkts_requested == 0) {
1196*4882a593Smuzhiyun goto done;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun #if defined(DONGLEBUILD)
1201*4882a593Smuzhiyun pkts_avail = pkts_requested;
1202*4882a593Smuzhiyun #endif /* DONGLEBUILD */
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun /* protect shared resource */
1205*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1206*4882a593Smuzhiyun return NULL;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun /* If there are lesser packets in the pool than requested, call
1209*4882a593Smuzhiyun * pktpool_empty_notify() to reclaim more pkts.
1210*4882a593Smuzhiyun */
1211*4882a593Smuzhiyun if (pktp->avail < pkts_requested) {
1212*4882a593Smuzhiyun /* Notify and try to reclaim tx pkts */
1213*4882a593Smuzhiyun if (pktp->ecbcnt) {
1214*4882a593Smuzhiyun pktpool_empty_notify(pktp);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun if (pktp->avail < pkts_requested) {
1218*4882a593Smuzhiyun pktpool_emptycb_disable(pktp, FALSE);
1219*4882a593Smuzhiyun if (pktp->avail == 0) {
1220*4882a593Smuzhiyun goto done;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun #ifdef APP_RX
1226*4882a593Smuzhiyun if (pktcnt) {
1227*4882a593Smuzhiyun p = pktp->freelist;
1228*4882a593Smuzhiyun if (pktp->avail < pkts_requested) {
1229*4882a593Smuzhiyun pkts_avail = pktp->avail;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun /* For rx frags in APP, we need to return only the head of freelist and
1233*4882a593Smuzhiyun * the caller operates on it and updates the avail count and freelist pointer
1234*4882a593Smuzhiyun * using pktpool_update_freelist().
1235*4882a593Smuzhiyun */
1236*4882a593Smuzhiyun if (BCMSPLITRX_ENAB() && ((type == lbuf_rxfrag) || (type == lbuf_rxdata))) {
1237*4882a593Smuzhiyun *pktcnt = pkts_avail;
1238*4882a593Smuzhiyun goto done;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun } else
1241*4882a593Smuzhiyun #endif /* APP_RX */
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun ASSERT_FP(pkts_requested == 1);
1244*4882a593Smuzhiyun p = pktpool_deq(pktp);
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun ASSERT_FP(p);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun #if defined(DONGLEBUILD)
1250*4882a593Smuzhiyun #ifndef APP_RX
1251*4882a593Smuzhiyun if (BCMSPLITRX_ENAB() && (type == lbuf_rxfrag)) {
1252*4882a593Smuzhiyun /* If pool is shared rx pool, use call back fn to populate host address.
1253*4882a593Smuzhiyun * In case of APP, callback may use lesser number of packets than what
1254*4882a593Smuzhiyun * we have given to callback because of some resource crunch and the exact
1255*4882a593Smuzhiyun * number of packets that are used by the callback are returned using
1256*4882a593Smuzhiyun * (*pktcnt) and the pktpool freelist head is updated accordingly.
1257*4882a593Smuzhiyun */
1258*4882a593Smuzhiyun ASSERT_FP(pktp->cbext.cb != NULL);
1259*4882a593Smuzhiyun if (pktp->cbext.cb(pktp, pktp->cbext.arg, p, rxcpl, &pkts_avail)) {
1260*4882a593Smuzhiyun pktpool_enq(pktp, p);
1261*4882a593Smuzhiyun p = NULL;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun #endif /* APP_RX */
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun if ((type == lbuf_basic) && rxcpl) {
1267*4882a593Smuzhiyun /* If pool is shared rx pool, use call back fn to populate Rx cpl ID */
1268*4882a593Smuzhiyun ASSERT_FP(pktp->rxcplidfn.cb != NULL);
1269*4882a593Smuzhiyun /* If rxcplblock is allocated */
1270*4882a593Smuzhiyun if (pktp->rxcplidfn.cb(pktp, pktp->rxcplidfn.arg, p, TRUE, NULL)) {
1271*4882a593Smuzhiyun pktpool_enq(pktp, p);
1272*4882a593Smuzhiyun p = NULL;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun #endif /* _DONGLEBUILD_ */
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun done:
1278*4882a593Smuzhiyun if ((pktp->avail == 0) && (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1279*4882a593Smuzhiyun pktp->emptycb_disable = EMPTYCB_DISABLED;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun /* protect shared resource */
1282*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1283*4882a593Smuzhiyun return NULL;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun return p;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun void
BCMFASTPATH(pktpool_nfree)1289*4882a593Smuzhiyun BCMFASTPATH(pktpool_nfree)(pktpool_t *pktp, void *head, void *tail, uint count)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun #ifdef BCMRXDATAPOOL
1292*4882a593Smuzhiyun void *_head = head;
1293*4882a593Smuzhiyun #endif /* BCMRXDATAPOOL */
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun if (count > 1) {
1296*4882a593Smuzhiyun pktp->avail += (count - 1);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun #ifdef BCMRXDATAPOOL
1299*4882a593Smuzhiyun while (--count) {
1300*4882a593Smuzhiyun _head = PKTLINK(_head);
1301*4882a593Smuzhiyun ASSERT_FP(_head);
1302*4882a593Smuzhiyun pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, _head));
1303*4882a593Smuzhiyun }
1304*4882a593Smuzhiyun #endif /* BCMRXDATAPOOL */
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun PKTSETFREELIST(tail, pktp->freelist);
1307*4882a593Smuzhiyun pktp->freelist = PKTLINK(head);
1308*4882a593Smuzhiyun PKTSETLINK(head, NULL);
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun pktpool_free(pktp, head);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun void
BCMPOSTTRAPFASTPATH(pktpool_free)1314*4882a593Smuzhiyun BCMPOSTTRAPFASTPATH(pktpool_free)(pktpool_t *pktp, void *p)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun /* protect shared resource */
1317*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1318*4882a593Smuzhiyun return;
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun ASSERT_FP(p != NULL);
1321*4882a593Smuzhiyun #ifdef BCMDBG_POOL
1322*4882a593Smuzhiyun /* pktpool_stop_trigger(pktp, p); */
1323*4882a593Smuzhiyun #endif
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun #ifdef BCMRXDATAPOOL
1326*4882a593Smuzhiyun /* Free rx data buffer to rx data buffer pool */
1327*4882a593Smuzhiyun if (PKT_IS_RX_PKT(OSH_NULL, p)) {
1328*4882a593Smuzhiyun pktpool_t *_pktp = pktpool_shared_rxdata;
1329*4882a593Smuzhiyun if (PKTISRXFRAG(OSH_NULL, p)) {
1330*4882a593Smuzhiyun _pktp->cbext.cb(_pktp, _pktp->cbext.arg, p, REMOVE_RXCPLID, NULL);
1331*4882a593Smuzhiyun PKTRESETRXFRAG(OSH_NULL, p);
1332*4882a593Smuzhiyun }
1333*4882a593Smuzhiyun pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, p));
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun #endif /* BCMRXDATAPOOL */
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun pktpool_enq(pktp, p);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun /**
1340*4882a593Smuzhiyun * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1341*4882a593Smuzhiyun * If any avail callback functions are registered, send a notification
1342*4882a593Smuzhiyun * that a new packet is available in the pool.
1343*4882a593Smuzhiyun */
1344*4882a593Smuzhiyun if (pktp->cbcnt) {
1345*4882a593Smuzhiyun /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1346*4882a593Smuzhiyun * This allows to feed on burst basis as opposed to inefficient per-packet basis.
1347*4882a593Smuzhiyun */
1348*4882a593Smuzhiyun if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1349*4882a593Smuzhiyun /**
1350*4882a593Smuzhiyun * If the call originated from pktpool_empty_notify, the just freed packet
1351*4882a593Smuzhiyun * is needed in pktpool_get.
1352*4882a593Smuzhiyun * Therefore don't call pktpool_avail_notify.
1353*4882a593Smuzhiyun */
1354*4882a593Smuzhiyun if (pktp->empty == FALSE)
1355*4882a593Smuzhiyun pktpool_avail_notify(pktp);
1356*4882a593Smuzhiyun } else {
1357*4882a593Smuzhiyun /**
1358*4882a593Smuzhiyun * The callback is temporarily disabled, log that a packet has been freed.
1359*4882a593Smuzhiyun */
1360*4882a593Smuzhiyun pktp->emptycb_disable = EMPTYCB_SKIPPED;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun /* protect shared resource */
1365*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1366*4882a593Smuzhiyun return;
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun /** Adds a caller provided (empty) packet to the caller provided pool */
1370*4882a593Smuzhiyun int
pktpool_add(pktpool_t * pktp,void * p)1371*4882a593Smuzhiyun pktpool_add(pktpool_t *pktp, void *p)
1372*4882a593Smuzhiyun {
1373*4882a593Smuzhiyun int err = 0;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun /* protect shared resource */
1376*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1377*4882a593Smuzhiyun return BCME_ERROR;
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun ASSERT(p != NULL);
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun if (pktp->n_pkts == pktp->maxlen) {
1382*4882a593Smuzhiyun err = BCME_RANGE;
1383*4882a593Smuzhiyun goto done;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun /* pkts in pool have same length */
1387*4882a593Smuzhiyun ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1388*4882a593Smuzhiyun PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun pktp->n_pkts++;
1391*4882a593Smuzhiyun pktpool_enq(pktp, p);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun #ifdef BCMDBG_POOL
1394*4882a593Smuzhiyun pktp->dbg_q[pktp->dbg_qlen++].p = p;
1395*4882a593Smuzhiyun #endif
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun done:
1398*4882a593Smuzhiyun /* protect shared resource */
1399*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1400*4882a593Smuzhiyun return BCME_ERROR;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun return err;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun /**
1406*4882a593Smuzhiyun * Force pktpool_setmaxlen () into RAM as it uses a constant
1407*4882a593Smuzhiyun * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1408*4882a593Smuzhiyun */
1409*4882a593Smuzhiyun int
BCMRAMFN(pktpool_setmaxlen)1410*4882a593Smuzhiyun BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun /* protect shared resource */
1413*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1414*4882a593Smuzhiyun return BCME_ERROR;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun if (maxlen > PKTPOOL_LEN_MAX)
1417*4882a593Smuzhiyun maxlen = PKTPOOL_LEN_MAX;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun /* if pool is already beyond maxlen, then just cap it
1420*4882a593Smuzhiyun * since we currently do not reduce the pool len
1421*4882a593Smuzhiyun * already allocated
1422*4882a593Smuzhiyun */
1423*4882a593Smuzhiyun pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun /* protect shared resource */
1426*4882a593Smuzhiyun if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1427*4882a593Smuzhiyun return BCME_ERROR;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun return pktp->maxlen;
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun void
BCMPOSTTRAPFN(pktpool_emptycb_disable)1433*4882a593Smuzhiyun BCMPOSTTRAPFN(pktpool_emptycb_disable)(pktpool_t *pktp, bool disable)
1434*4882a593Smuzhiyun {
1435*4882a593Smuzhiyun bool notify = FALSE;
1436*4882a593Smuzhiyun ASSERT(pktp);
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun /**
1439*4882a593Smuzhiyun * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1440*4882a593Smuzhiyun * If callback is going to be re-enabled, check if any packet got
1441*4882a593Smuzhiyun * freed and added back to the pool while callback was disabled.
1442*4882a593Smuzhiyun * When this is the case do the callback now, provided that callback functions
1443*4882a593Smuzhiyun * are registered and this call did not originate from pktpool_empty_notify.
1444*4882a593Smuzhiyun */
1445*4882a593Smuzhiyun if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1446*4882a593Smuzhiyun (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1447*4882a593Smuzhiyun notify = TRUE;
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun /* Enable or temporarily disable callback when packet becomes available. */
1451*4882a593Smuzhiyun if (disable) {
1452*4882a593Smuzhiyun if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1453*4882a593Smuzhiyun /* mark disabled only if enabled.
1454*4882a593Smuzhiyun * if state is EMPTYCB_SKIPPED, it means already
1455*4882a593Smuzhiyun * disabled and some pkts are freed. So don't lose the state
1456*4882a593Smuzhiyun * of skipped to ensure calling pktpool_avail_notify().
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun pktp->emptycb_disable = EMPTYCB_DISABLED;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun } else {
1461*4882a593Smuzhiyun pktp->emptycb_disable = EMPTYCB_ENABLED;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun if (notify) {
1464*4882a593Smuzhiyun /* pktpool_emptycb_disable() is called from pktpool_avail_notify() and
1465*4882a593Smuzhiyun * pktp->cbs. To have the result of most recent call, notify after
1466*4882a593Smuzhiyun * emptycb_disable is modified.
1467*4882a593Smuzhiyun * This change also prevents any recursive calls of pktpool_avail_notify()
1468*4882a593Smuzhiyun * from pktp->cbs if pktpool_emptycb_disable() is called from them.
1469*4882a593Smuzhiyun */
1470*4882a593Smuzhiyun pktpool_avail_notify(pktp);
1471*4882a593Smuzhiyun }
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun bool
pktpool_emptycb_disabled(pktpool_t * pktp)1475*4882a593Smuzhiyun pktpool_emptycb_disabled(pktpool_t *pktp)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun ASSERT(pktp);
1478*4882a593Smuzhiyun return pktp->emptycb_disable != EMPTYCB_ENABLED;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun #ifdef BCMPKTPOOL
1482*4882a593Smuzhiyun #include <hnd_lbuf.h>
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun pktpool_t *pktpool_shared = NULL;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun #ifdef BCMFRAGPOOL
1487*4882a593Smuzhiyun pktpool_t *pktpool_shared_lfrag = NULL;
1488*4882a593Smuzhiyun #ifdef BCMRESVFRAGPOOL
1489*4882a593Smuzhiyun pktpool_t *pktpool_resv_lfrag = NULL;
1490*4882a593Smuzhiyun struct resv_info *resv_pool_info = NULL;
1491*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
1492*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun #ifdef BCMALFRAGPOOL
1495*4882a593Smuzhiyun pktpool_t *pktpool_shared_alfrag = NULL;
1496*4882a593Smuzhiyun pktpool_t *pktpool_shared_alfrag_data = NULL;
1497*4882a593Smuzhiyun #endif /* BCMCTFRAGPOOL */
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun pktpool_t *pktpool_shared_rxlfrag = NULL;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun /* Rx data pool w/o rxfrag structure */
1502*4882a593Smuzhiyun pktpool_t *pktpool_shared_rxdata = NULL;
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun static osl_t *pktpool_osh = NULL;
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /**
1507*4882a593Smuzhiyun * Initializes several packet pools and allocates packets within those pools.
1508*4882a593Smuzhiyun */
1509*4882a593Smuzhiyun int
BCMATTACHFN(hnd_pktpool_init)1510*4882a593Smuzhiyun BCMATTACHFN(hnd_pktpool_init)(osl_t *osh)
1511*4882a593Smuzhiyun {
1512*4882a593Smuzhiyun int err = BCME_OK;
1513*4882a593Smuzhiyun int n, pktsz;
1514*4882a593Smuzhiyun bool is_heap_pool;
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun BCM_REFERENCE(pktsz);
1517*4882a593Smuzhiyun BCM_REFERENCE(is_heap_pool);
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun /* Construct a packet pool registry before initializing packet pools */
1520*4882a593Smuzhiyun n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1521*4882a593Smuzhiyun if (n != PKTPOOL_MAXIMUM_ID) {
1522*4882a593Smuzhiyun ASSERT(0);
1523*4882a593Smuzhiyun err = BCME_ERROR;
1524*4882a593Smuzhiyun goto error;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1528*4882a593Smuzhiyun if (pktpool_shared == NULL) {
1529*4882a593Smuzhiyun ASSERT(0);
1530*4882a593Smuzhiyun err = BCME_NOMEM;
1531*4882a593Smuzhiyun goto error;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1535*4882a593Smuzhiyun pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1536*4882a593Smuzhiyun if (pktpool_shared_lfrag == NULL) {
1537*4882a593Smuzhiyun ASSERT(0);
1538*4882a593Smuzhiyun err = BCME_NOMEM;
1539*4882a593Smuzhiyun goto error;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1543*4882a593Smuzhiyun resv_pool_info = hnd_resv_pool_alloc(osh);
1544*4882a593Smuzhiyun if (resv_pool_info == NULL) {
1545*4882a593Smuzhiyun err = BCME_NOMEM;
1546*4882a593Smuzhiyun ASSERT(0);
1547*4882a593Smuzhiyun goto error;
1548*4882a593Smuzhiyun }
1549*4882a593Smuzhiyun pktpool_resv_lfrag = resv_pool_info->pktp;
1550*4882a593Smuzhiyun if (pktpool_resv_lfrag == NULL) {
1551*4882a593Smuzhiyun err = BCME_ERROR;
1552*4882a593Smuzhiyun ASSERT(0);
1553*4882a593Smuzhiyun goto error;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun #endif /* RESVFRAGPOOL */
1556*4882a593Smuzhiyun #endif /* FRAGPOOL */
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1559*4882a593Smuzhiyun pktpool_shared_alfrag = MALLOCZ(osh, sizeof(pktpool_t));
1560*4882a593Smuzhiyun if (pktpool_shared_alfrag == NULL) {
1561*4882a593Smuzhiyun ASSERT(0);
1562*4882a593Smuzhiyun err = BCME_NOMEM;
1563*4882a593Smuzhiyun goto error;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun pktpool_shared_alfrag_data = MALLOCZ(osh, sizeof(pktpool_t));
1567*4882a593Smuzhiyun if (pktpool_shared_alfrag_data == NULL) {
1568*4882a593Smuzhiyun ASSERT(0);
1569*4882a593Smuzhiyun err = BCME_NOMEM;
1570*4882a593Smuzhiyun goto error;
1571*4882a593Smuzhiyun }
1572*4882a593Smuzhiyun #endif /* BCMCTFRAGPOOL */
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1575*4882a593Smuzhiyun pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1576*4882a593Smuzhiyun if (pktpool_shared_rxlfrag == NULL) {
1577*4882a593Smuzhiyun ASSERT(0);
1578*4882a593Smuzhiyun err = BCME_NOMEM;
1579*4882a593Smuzhiyun goto error;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun #endif
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLE)
1584*4882a593Smuzhiyun pktpool_shared_rxdata = MALLOCZ(osh, sizeof(pktpool_t));
1585*4882a593Smuzhiyun if (pktpool_shared_rxdata == NULL) {
1586*4882a593Smuzhiyun ASSERT(0);
1587*4882a593Smuzhiyun err = BCME_NOMEM;
1588*4882a593Smuzhiyun goto error;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun #endif
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /*
1593*4882a593Smuzhiyun * At this early stage, there's not enough memory to allocate all
1594*4882a593Smuzhiyun * requested pkts in the shared pool. Need to add to the pool
1595*4882a593Smuzhiyun * after reclaim
1596*4882a593Smuzhiyun *
1597*4882a593Smuzhiyun * n = NRXBUFPOST + SDPCMD_RXBUFS;
1598*4882a593Smuzhiyun *
1599*4882a593Smuzhiyun * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1600*4882a593Smuzhiyun * registry is not initialized or the registry is depleted.
1601*4882a593Smuzhiyun *
1602*4882a593Smuzhiyun * A BCME_NOMEM error only indicates that the requested number of packets
1603*4882a593Smuzhiyun * were not filled into the pool.
1604*4882a593Smuzhiyun */
1605*4882a593Smuzhiyun n = 1;
1606*4882a593Smuzhiyun MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
1607*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared,
1608*4882a593Smuzhiyun &n, PKTBUFSZ, FALSE, lbuf_basic, FALSE, 0, 0)) != BCME_OK) {
1609*4882a593Smuzhiyun ASSERT(0);
1610*4882a593Smuzhiyun goto error;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1615*4882a593Smuzhiyun n = 1;
1616*4882a593Smuzhiyun #if (((defined(EVENTLOG_D3_PRESERVE) && !defined(EVENTLOG_D3_PRESERVE_DISABLED)) || \
1617*4882a593Smuzhiyun defined(BCMPOOLRECLAIM)))
1618*4882a593Smuzhiyun is_heap_pool = TRUE;
1619*4882a593Smuzhiyun #else
1620*4882a593Smuzhiyun is_heap_pool = FALSE;
1621*4882a593Smuzhiyun #endif /* (( EVENTLOG_D3_PRESERVE && !EVENTLOG_D3_PRESERVE_DISABLED) || BCMPOOLRECLAIM) */
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
1624*4882a593Smuzhiyun is_heap_pool, POOL_HEAP_FLAG_D3, SHARED_FRAG_POOL_LEN >> 3)) !=
1625*4882a593Smuzhiyun BCME_OK) {
1626*4882a593Smuzhiyun ASSERT(0);
1627*4882a593Smuzhiyun goto error;
1628*4882a593Smuzhiyun }
1629*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1632*4882a593Smuzhiyun n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1633*4882a593Smuzhiyun #ifdef RESV_POOL_HEAP
1634*4882a593Smuzhiyun is_heap_pool = TRUE;
1635*4882a593Smuzhiyun #else
1636*4882a593Smuzhiyun is_heap_pool = FALSE;
1637*4882a593Smuzhiyun #endif /* RESV_POOL_HEAP */
1638*4882a593Smuzhiyun
1639*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_resv_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
1640*4882a593Smuzhiyun is_heap_pool, POOL_HEAP_FLAG_RSRVPOOL, 0)) != BCME_OK) {
1641*4882a593Smuzhiyun ASSERT(0);
1642*4882a593Smuzhiyun goto error;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1645*4882a593Smuzhiyun #endif /* RESVFRAGPOOL */
1646*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1649*4882a593Smuzhiyun n = 1;
1650*4882a593Smuzhiyun is_heap_pool = FALSE;
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared_alfrag, &n, PKTFRAGSZ, TRUE, lbuf_alfrag,
1653*4882a593Smuzhiyun is_heap_pool, 0, SHARED_ALFRAG_POOL_LEN >> 3)) != BCME_OK) {
1654*4882a593Smuzhiyun ASSERT(0);
1655*4882a593Smuzhiyun goto error;
1656*4882a593Smuzhiyun }
1657*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared_alfrag, SHARED_ALFRAG_POOL_LEN);
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun n = 0;
1660*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared_alfrag_data, &n, TXPKTALFRAG_DATA_BUFSZ, TRUE,
1661*4882a593Smuzhiyun lbuf_alfrag_data, FALSE, 0, SHARED_ALFRAG_DATA_POOL_LEN >> 3)) != BCME_OK) {
1662*4882a593Smuzhiyun ASSERT(0);
1663*4882a593Smuzhiyun goto error;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared_alfrag_data, SHARED_ALFRAG_DATA_POOL_LEN);
1666*4882a593Smuzhiyun
1667*4882a593Smuzhiyun #endif /* BCMCTFRAGPOOL */
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1670*4882a593Smuzhiyun #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
1671*4882a593Smuzhiyun n = 1;
1672*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared_rxdata, &n, RXPKTFRAGDATASZ, TRUE, lbuf_rxdata,
1673*4882a593Smuzhiyun FALSE, 0, 0)) != BCME_OK) {
1674*4882a593Smuzhiyun ASSERT(0);
1675*4882a593Smuzhiyun goto error;
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared_rxdata, SHARED_RXDATA_POOL_LEN);
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun pktsz = 0;
1680*4882a593Smuzhiyun #else
1681*4882a593Smuzhiyun pktsz = RXPKTFRAGDATASZ;
1682*4882a593Smuzhiyun #endif /* defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) */
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun #ifdef RESV_POOL_HEAP
1685*4882a593Smuzhiyun is_heap_pool = BCMPOOLRECLAIM_ENAB() ? TRUE : FALSE;
1686*4882a593Smuzhiyun #else
1687*4882a593Smuzhiyun is_heap_pool = FALSE;
1688*4882a593Smuzhiyun #endif /* RESV_POOL_HEAP */
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun n = 1;
1691*4882a593Smuzhiyun if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, &n, pktsz, TRUE, lbuf_rxfrag,
1692*4882a593Smuzhiyun is_heap_pool, POOL_HEAP_FLAG_D3, 0)) != BCME_OK) {
1693*4882a593Smuzhiyun ASSERT(0);
1694*4882a593Smuzhiyun goto error;
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
1697*4882a593Smuzhiyun pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1698*4882a593Smuzhiyun #endif /* defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) */
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1701*4882a593Smuzhiyun /* Attach poolreorg module */
1702*4882a593Smuzhiyun if ((frwd_poolreorg_info = poolreorg_attach(osh,
1703*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1704*4882a593Smuzhiyun pktpool_shared_lfrag,
1705*4882a593Smuzhiyun #else
1706*4882a593Smuzhiyun NULL,
1707*4882a593Smuzhiyun #endif /* defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) */
1708*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1709*4882a593Smuzhiyun pktpool_shared_rxlfrag,
1710*4882a593Smuzhiyun #else
1711*4882a593Smuzhiyun NULL,
1712*4882a593Smuzhiyun #endif /* BCMRXFRAGPOOL */
1713*4882a593Smuzhiyun pktpool_shared)) == NULL) {
1714*4882a593Smuzhiyun ASSERT(0);
1715*4882a593Smuzhiyun err = BCME_NOMEM;
1716*4882a593Smuzhiyun goto error;
1717*4882a593Smuzhiyun }
1718*4882a593Smuzhiyun #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun pktpool_osh = osh;
1721*4882a593Smuzhiyun MALLOC_CLEAR_NOPERSIST(osh);
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun #ifdef POOL_HEAP_RECONFIG
1724*4882a593Smuzhiyun lbuf_free_cb_set(hnd_pktpool_lbuf_free_cb);
1725*4882a593Smuzhiyun #endif
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun return BCME_OK;
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun error:
1730*4882a593Smuzhiyun hnd_pktpool_deinit(osh);
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun return err;
1733*4882a593Smuzhiyun } /* hnd_pktpool_init */
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun void
BCMATTACHFN(hnd_pktpool_deinit)1736*4882a593Smuzhiyun BCMATTACHFN(hnd_pktpool_deinit)(osl_t *osh)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1739*4882a593Smuzhiyun if (frwd_poolreorg_info != NULL) {
1740*4882a593Smuzhiyun poolreorg_detach(frwd_poolreorg_info);
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1745*4882a593Smuzhiyun if (pktpool_shared_rxlfrag != NULL) {
1746*4882a593Smuzhiyun if (pktpool_shared_rxlfrag->inited) {
1747*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared_rxlfrag);
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun hnd_free(pktpool_shared_rxlfrag);
1751*4882a593Smuzhiyun pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun #endif
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
1756*4882a593Smuzhiyun if (pktpool_shared_rxdata != NULL) {
1757*4882a593Smuzhiyun if (pktpool_shared_rxdata->inited) {
1758*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared_rxdata);
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun hnd_free(pktpool_shared_rxdata);
1762*4882a593Smuzhiyun pktpool_shared_rxdata = (pktpool_t *)NULL;
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun #endif
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1767*4882a593Smuzhiyun if (pktpool_shared_lfrag != NULL) {
1768*4882a593Smuzhiyun if (pktpool_shared_lfrag->inited) {
1769*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared_lfrag);
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun hnd_free(pktpool_shared_lfrag);
1772*4882a593Smuzhiyun pktpool_shared_lfrag = (pktpool_t *)NULL;
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1777*4882a593Smuzhiyun if (pktpool_shared_alfrag != NULL) {
1778*4882a593Smuzhiyun if (pktpool_shared_alfrag->inited) {
1779*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared_alfrag);
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun hnd_free(pktpool_shared_alfrag);
1782*4882a593Smuzhiyun pktpool_shared_alfrag = (pktpool_t *)NULL;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun if (pktpool_shared_alfrag_data != NULL) {
1786*4882a593Smuzhiyun if (pktpool_shared_alfrag_data->inited) {
1787*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared_alfrag_data);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun hnd_free(pktpool_shared_alfrag_data);
1791*4882a593Smuzhiyun pktpool_shared_alfrag_data = (pktpool_t *)NULL;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1796*4882a593Smuzhiyun if (resv_pool_info != NULL) {
1797*4882a593Smuzhiyun if (pktpool_resv_lfrag != NULL) {
1798*4882a593Smuzhiyun pktpool_resv_lfrag = NULL;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun hnd_free(resv_pool_info);
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun #endif /* RESVFRAGPOOL */
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun if (pktpool_shared != NULL) {
1805*4882a593Smuzhiyun if (pktpool_shared->inited) {
1806*4882a593Smuzhiyun pktpool_deinit(osh, pktpool_shared);
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun hnd_free(pktpool_shared);
1810*4882a593Smuzhiyun pktpool_shared = (pktpool_t *)NULL;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun pktpool_dettach(osh);
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun MALLOC_CLEAR_NOPERSIST(osh);
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /** is called at each 'wl up' */
1819*4882a593Smuzhiyun int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1820*4882a593Smuzhiyun hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1821*4882a593Smuzhiyun {
1822*4882a593Smuzhiyun return (pktpool_fill(pktpool_osh, pktpool, minimal));
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun /** refills pktpools after reclaim, is called once */
1826*4882a593Smuzhiyun void
hnd_pktpool_refill(bool minimal)1827*4882a593Smuzhiyun hnd_pktpool_refill(bool minimal)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared)) {
1830*4882a593Smuzhiyun #if defined(SRMEM)
1831*4882a593Smuzhiyun if (SRMEM_ENAB()) {
1832*4882a593Smuzhiyun int maxlen = pktpool_max_pkts(pktpool_shared);
1833*4882a593Smuzhiyun int n_pkts = pktpool_tot_pkts(pktpool_shared);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun for (; n_pkts < maxlen; n_pkts++) {
1836*4882a593Smuzhiyun void *p;
1837*4882a593Smuzhiyun if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
1838*4882a593Smuzhiyun break;
1839*4882a593Smuzhiyun pktpool_add(pktpool_shared, p);
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun #endif /* SRMEM */
1843*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun /* fragpool reclaim */
1846*4882a593Smuzhiyun #ifdef BCMFRAGPOOL
1847*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared_lfrag)) {
1848*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun #endif /* BCMFRAGPOOL */
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun /* alfragpool reclaim */
1853*4882a593Smuzhiyun #ifdef BCMALFRAGPOOL
1854*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared_alfrag)) {
1855*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared_alfrag, minimal);
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared_alfrag_data)) {
1859*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared_alfrag_data, minimal);
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun #endif /* BCMALFRAGPOOL */
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun /* rx fragpool reclaim */
1864*4882a593Smuzhiyun #ifdef BCMRXFRAGPOOL
1865*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1866*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun #endif
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun #ifdef BCMRXDATAPOOL
1871*4882a593Smuzhiyun if (POOL_ENAB(pktpool_shared_rxdata)) {
1872*4882a593Smuzhiyun pktpool_fill(pktpool_osh, pktpool_shared_rxdata, minimal);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun #endif
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1877*4882a593Smuzhiyun if (POOL_ENAB(pktpool_resv_lfrag)) {
1878*4882a593Smuzhiyun int resv_size = (pktpool_resv_lfrag->max_pkt_bytes + LBUFFRAGSZ) *
1879*4882a593Smuzhiyun pktpool_resv_lfrag->maxlen;
1880*4882a593Smuzhiyun hnd_resv_pool_init(resv_pool_info, resv_size);
1881*4882a593Smuzhiyun hnd_resv_pool_enable(resv_pool_info);
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun #endif /* BCMRESVFRAGPOOL */
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun #ifdef POOL_HEAP_RECONFIG
1887*4882a593Smuzhiyun #define hnd_pktpool_release_active_set(pktp) (pktpool_heap_rel_active |= (1 << pktp->id))
1888*4882a593Smuzhiyun #define hnd_pktpool_release_active_reset(pktp) (pktpool_heap_rel_active &= ~(1 << pktp->id))
1889*4882a593Smuzhiyun /* Function enable/disable heap pool usage */
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun void
hnd_pktpool_heap_handle(osl_t * osh,uint32 flag,bool enable)1892*4882a593Smuzhiyun hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable)
1893*4882a593Smuzhiyun {
1894*4882a593Smuzhiyun int i = 0;
1895*4882a593Smuzhiyun pktpool_t *pktp;
1896*4882a593Smuzhiyun /*
1897*4882a593Smuzhiyun * Loop through all the registerd pktpools.
1898*4882a593Smuzhiyun * Trigger retreave of pkts from the heap back to pool if no
1899*4882a593Smuzhiyun * flags are active.
1900*4882a593Smuzhiyun */
1901*4882a593Smuzhiyun for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
1902*4882a593Smuzhiyun if ((pktp = get_pktpools_registry(i)) != NULL) {
1903*4882a593Smuzhiyun if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
1904*4882a593Smuzhiyun if (enable) {
1905*4882a593Smuzhiyun hnd_pktpool_heap_pkt_release(pktpool_osh, pktp, flag);
1906*4882a593Smuzhiyun } else {
1907*4882a593Smuzhiyun hnd_pktpool_heap_pkt_retrieve(pktp, flag);
1908*4882a593Smuzhiyun }
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun
1914*4882a593Smuzhiyun /* Do memory allocation from pool heap memory */
1915*4882a593Smuzhiyun void *
hnd_pktpool_freelist_alloc(uint size,uint alignbits,uint32 flag)1916*4882a593Smuzhiyun hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun int i = 0;
1919*4882a593Smuzhiyun pktpool_t *pktp;
1920*4882a593Smuzhiyun void *p = NULL;
1921*4882a593Smuzhiyun for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
1922*4882a593Smuzhiyun if ((pktp = get_pktpools_registry(i)) != NULL) {
1923*4882a593Smuzhiyun if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
1924*4882a593Smuzhiyun p = rte_freelist_mgr_alloc(size, alignbits, pktp->mem_handle);
1925*4882a593Smuzhiyun if (p)
1926*4882a593Smuzhiyun break;
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun return p;
1931*4882a593Smuzhiyun }
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* Release pkts from pool to free heap */
1934*4882a593Smuzhiyun static void
hnd_pktpool_heap_pkt_release(osl_t * osh,pktpool_t * pktp,uint32 flag)1935*4882a593Smuzhiyun hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag)
1936*4882a593Smuzhiyun {
1937*4882a593Smuzhiyun pktpool_cb_extn_t cb = NULL;
1938*4882a593Smuzhiyun void *arg = NULL;
1939*4882a593Smuzhiyun int i = 0;
1940*4882a593Smuzhiyun pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun pktp->release_active = FALSE;
1943*4882a593Smuzhiyun hnd_pktpool_release_active_reset(pktp);
1944*4882a593Smuzhiyun
1945*4882a593Smuzhiyun if (pktp->n_pkts <= pktp->min_backup_buf)
1946*4882a593Smuzhiyun return;
1947*4882a593Smuzhiyun /* call module specific callbacks */
1948*4882a593Smuzhiyun if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
1949*4882a593Smuzhiyun /* If pool is shared rx frag pool, use call back fn to reclaim host address
1950*4882a593Smuzhiyun * and Rx cpl ID associated with the pkt.
1951*4882a593Smuzhiyun */
1952*4882a593Smuzhiyun ASSERT(pktp->cbext.cb != NULL);
1953*4882a593Smuzhiyun cb = pktp->cbext.cb;
1954*4882a593Smuzhiyun arg = pktp->cbext.arg;
1955*4882a593Smuzhiyun } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
1956*4882a593Smuzhiyun /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
1957*4882a593Smuzhiyun * associated with the pkt.
1958*4882a593Smuzhiyun */
1959*4882a593Smuzhiyun cb = pktp->rxcplidfn.cb;
1960*4882a593Smuzhiyun arg = pktp->rxcplidfn.arg;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun while (pktp->avail > pktp->min_backup_buf) {
1964*4882a593Smuzhiyun void * p = pktp->freelist;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
1967*4882a593Smuzhiyun PKTSETFREELIST(p, NULL);
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun if (cb != NULL) {
1970*4882a593Smuzhiyun if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
1971*4882a593Smuzhiyun PKTSETFREELIST(p, pktp->freelist);
1972*4882a593Smuzhiyun pktp->freelist = p;
1973*4882a593Smuzhiyun break;
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun lb_set_nofree(p);
1980*4882a593Smuzhiyun total_pool_pktid_count++;
1981*4882a593Smuzhiyun PKTFREE(osh, p, pktp->istx); /* free the packet */
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun rte_freelist_mgr_add(p, pktp->mem_handle);
1984*4882a593Smuzhiyun pktp->avail--;
1985*4882a593Smuzhiyun pktp->n_pkts--;
1986*4882a593Smuzhiyun pktp->poolheap_count++;
1987*4882a593Smuzhiyun }
1988*4882a593Smuzhiyun
1989*4882a593Smuzhiyun /* Execute call back for upper layer which used pkt from heap */
1990*4882a593Smuzhiyun for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
1991*4882a593Smuzhiyun if ((pktp_heap_cb[i].fn != NULL) &&
1992*4882a593Smuzhiyun (flag == pktp_heap_cb[i].flag))
1993*4882a593Smuzhiyun (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, TRUE);
1994*4882a593Smuzhiyun }
1995*4882a593Smuzhiyun
1996*4882a593Smuzhiyun }
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun static pktpool_heap_cb_reg_t *
BCMRAMFN(hnd_pool_get_cb_registry)1999*4882a593Smuzhiyun BCMRAMFN(hnd_pool_get_cb_registry)(void)
2000*4882a593Smuzhiyun {
2001*4882a593Smuzhiyun return pktpool_heap_cb_reg;
2002*4882a593Smuzhiyun }
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun static void
BCMFASTPATH(hnd_pktpool_lbuf_free_cb)2005*4882a593Smuzhiyun BCMFASTPATH(hnd_pktpool_lbuf_free_cb)(uint8 poolid)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun int i = 0;
2008*4882a593Smuzhiyun pktpool_t *pktp;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (poolid == PKTPOOL_INVALID_ID && pktpool_heap_rel_active) {
2011*4882a593Smuzhiyun for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
2012*4882a593Smuzhiyun if ((pktp = get_pktpools_registry(i)) != NULL) {
2013*4882a593Smuzhiyun if (pktp->is_heap_pool && (pktp->release_active)) {
2014*4882a593Smuzhiyun rte_freelist_mgr_release(pktp->mem_handle);
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun /* Take back pkts from free mem and refill pool */
2022*4882a593Smuzhiyun static void
hnd_pktpool_heap_pkt_retrieve(pktpool_t * pktp,uint32 flag)2023*4882a593Smuzhiyun hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag)
2024*4882a593Smuzhiyun {
2025*4882a593Smuzhiyun int i = 0;
2026*4882a593Smuzhiyun pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2027*4882a593Smuzhiyun pktp->release_active = TRUE;
2028*4882a593Smuzhiyun hnd_pktpool_release_active_set(pktp);
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun /* Execute call back for upper layer which used pkt from heap */
2031*4882a593Smuzhiyun for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2032*4882a593Smuzhiyun if ((pktp_heap_cb[i].fn != NULL) &&
2033*4882a593Smuzhiyun (flag == pktp_heap_cb[i].flag))
2034*4882a593Smuzhiyun (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, FALSE);
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun rte_freelist_mgr_release(pktp->mem_handle);
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun
2040*4882a593Smuzhiyun /* Function to add back the pkt to pktpool */
2041*4882a593Smuzhiyun static int
hnd_pktpool_heap_get_cb(uint8 handle,void * ctxt,void * pkt,uint pktsize)2042*4882a593Smuzhiyun hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun pktpool_t *pktp = (pktpool_t *)ctxt;
2045*4882a593Smuzhiyun struct lbuf *lb;
2046*4882a593Smuzhiyun int ret = BCME_ERROR;
2047*4882a593Smuzhiyun if (pktp != NULL) {
2048*4882a593Smuzhiyun if ((lb = PKTALLOC_ON_LOC(pktpool_osh, pktp->max_pkt_bytes,
2049*4882a593Smuzhiyun pktp->type, pkt, pktsize)) != NULL) {
2050*4882a593Smuzhiyun if ((ret = pktpool_add(pktp, lb)) == BCME_OK) {
2051*4882a593Smuzhiyun pktp->poolheap_count--;
2052*4882a593Smuzhiyun ASSERT(total_pool_pktid_count > 0);
2053*4882a593Smuzhiyun total_pool_pktid_count--;
2054*4882a593Smuzhiyun if (pktp->poolheap_count == 0) {
2055*4882a593Smuzhiyun pktp->release_active = FALSE;
2056*4882a593Smuzhiyun hnd_pktpool_release_active_reset(pktp);
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun if (pktp->cbcnt) {
2059*4882a593Smuzhiyun if (pktp->empty == FALSE)
2060*4882a593Smuzhiyun pktpool_avail_notify(pktp);
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun } else {
2063*4882a593Smuzhiyun /*
2064*4882a593Smuzhiyun * pktpool_add failed indicate already max
2065*4882a593Smuzhiyun * number of pkts are available in pool. So
2066*4882a593Smuzhiyun * free this buffer to heap
2067*4882a593Smuzhiyun */
2068*4882a593Smuzhiyun PKTFREE(pktpool_osh, lb, pktsize);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun ret = BCME_OK;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun return ret;
2074*4882a593Smuzhiyun }
2075*4882a593Smuzhiyun
2076*4882a593Smuzhiyun int
hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn,void * ctxt,uint32 flag)2077*4882a593Smuzhiyun hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag)
2078*4882a593Smuzhiyun {
2079*4882a593Smuzhiyun int i = 0;
2080*4882a593Smuzhiyun int err = BCME_ERROR;
2081*4882a593Smuzhiyun pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun /* Search for free entry */
2084*4882a593Smuzhiyun for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2085*4882a593Smuzhiyun if (pktp_heap_cb[i].fn == NULL)
2086*4882a593Smuzhiyun break;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun
2089*4882a593Smuzhiyun if (i < PKTPOOL_MAX_HEAP_CB) {
2090*4882a593Smuzhiyun pktp_heap_cb[i].fn = fn;
2091*4882a593Smuzhiyun pktp_heap_cb[i].ctxt = ctxt;
2092*4882a593Smuzhiyun pktp_heap_cb[i].flag = flag;
2093*4882a593Smuzhiyun err = BCME_OK;
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun return err;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun int
hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn)2099*4882a593Smuzhiyun hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn)
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun int i = 0;
2102*4882a593Smuzhiyun int err = BCME_ERROR;
2103*4882a593Smuzhiyun pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2104*4882a593Smuzhiyun
2105*4882a593Smuzhiyun /* Search for matching entry */
2106*4882a593Smuzhiyun for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2107*4882a593Smuzhiyun if (pktp_heap_cb[i].fn == fn)
2108*4882a593Smuzhiyun break;
2109*4882a593Smuzhiyun }
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun if (i < PKTPOOL_MAX_HEAP_CB) {
2112*4882a593Smuzhiyun pktp_heap_cb[i].fn = NULL;
2113*4882a593Smuzhiyun err = BCME_OK;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun return err;
2116*4882a593Smuzhiyun }
2117*4882a593Smuzhiyun
2118*4882a593Smuzhiyun uint16
hnd_pktpool_get_min_bkup_buf(pktpool_t * pktp)2119*4882a593Smuzhiyun hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp)
2120*4882a593Smuzhiyun {
2121*4882a593Smuzhiyun return pktp->min_backup_buf;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun #endif /* POOL_HEAP_RECONFIG */
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun uint32
hnd_pktpool_get_total_poolheap_count(void)2126*4882a593Smuzhiyun hnd_pktpool_get_total_poolheap_count(void)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun return total_pool_pktid_count;
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun #endif /* BCMPKTPOOL */
2131