1 /*
2 * HND generic packet pool operation primitives
3 *
4 * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5 *
6 * Copyright (C) 1999-2017, Broadcom Corporation
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
30 */
31
32 #include <typedefs.h>
33 #include <osl.h>
34 #include <osl_ext.h>
35 #include <bcmutils.h>
36 #include <hnd_pktpool.h>
37 #ifdef BCMRESVFRAGPOOL
38 #include <hnd_resvpool.h>
39 #endif /* BCMRESVFRAGPOOL */
40 #ifdef BCMFRWDPOOLREORG
41 #include <hnd_poolreorg.h>
42 #endif /* BCMFRWDPOOLREORG */
43
44 /* mutex macros for thread safe */
45 #ifdef HND_PKTPOOL_THREAD_SAFE
46 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
47 #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
48 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
49 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
50 #else
51 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
52 #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
53 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
54 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
55 #endif // endif
56
57 /* Registry size is one larger than max pools, as slot #0 is reserved */
58 #define PKTPOOLREG_RSVD_ID (0U)
59 #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
60 #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
61
62 #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
63 #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
64
65 /* Tag a registry entry as free for use */
66 #define PKTPOOL_REGISTRY_CLR(id) \
67 PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
68 #define PKTPOOL_REGISTRY_ISCLR(id) \
69 (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
70
71 /* Tag registry entry 0 as reserved */
72 #define PKTPOOL_REGISTRY_RSV() \
73 PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
74 #define PKTPOOL_REGISTRY_ISRSVD() \
75 (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
76
77 /* Walk all un-reserved entries in registry */
78 #define PKTPOOL_REGISTRY_FOREACH(id) \
79 for ((id) = 1U; (id) <= pktpools_max; (id)++)
80
81 enum pktpool_empty_cb_state {
82 EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
83 EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
84 EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
85 };
86
87 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
88 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
89
90 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
91 static int pktpool_register(pktpool_t * poolptr);
92 static int pktpool_deregister(pktpool_t * poolptr);
93
94 /** add declaration */
95 static void pktpool_avail_notify(pktpool_t *pktp);
96
97 /** accessor functions required when ROMming this file, forced into RAM */
98
99 pktpool_t *
BCMRAMFN(get_pktpools_registry)100 BCMRAMFN(get_pktpools_registry)(int id)
101 {
102 return pktpools_registry[id];
103 }
104
105 static void
BCMRAMFN(pktpool_registry_set)106 BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
107 {
108 pktpools_registry[id] = pp;
109 }
110
111 static bool
BCMRAMFN(pktpool_registry_cmp)112 BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
113 {
114 return pktpools_registry[id] == pp;
115 }
116
117 /** Constructs a pool registry to serve a maximum of total_pools */
118 int
pktpool_attach(osl_t * osh,uint32 total_pools)119 pktpool_attach(osl_t *osh, uint32 total_pools)
120 {
121 uint32 poolid;
122 BCM_REFERENCE(osh);
123
124 if (pktpools_max != 0U) {
125 return BCME_ERROR;
126 }
127
128 ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
129
130 /* Initialize registry: reserve slot#0 and tag others as free */
131 PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
132
133 PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
134 PKTPOOL_REGISTRY_CLR(poolid);
135 }
136
137 pktpools_max = total_pools;
138
139 return (int)pktpools_max;
140 }
141
142 /** Destructs the pool registry. Ascertain all pools were first de-inited */
143 int
pktpool_dettach(osl_t * osh)144 pktpool_dettach(osl_t *osh)
145 {
146 uint32 poolid;
147 BCM_REFERENCE(osh);
148
149 if (pktpools_max == 0U) {
150 return BCME_OK;
151 }
152
153 /* Ascertain that no pools are still registered */
154 ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
155
156 PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
157 ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
158 }
159
160 pktpools_max = 0U; /* restore boot state */
161
162 return BCME_OK;
163 }
164
165 /** Registers a pool in a free slot; returns the registry slot index */
166 static int
pktpool_register(pktpool_t * poolptr)167 pktpool_register(pktpool_t * poolptr)
168 {
169 uint32 poolid;
170
171 if (pktpools_max == 0U) {
172 return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
173 }
174
175 ASSERT(pktpools_max != 0U);
176
177 /* find an empty slot in pktpools_registry */
178 PKTPOOL_REGISTRY_FOREACH(poolid) {
179 if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
180 PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
181 return (int)poolid; /* return pool ID */
182 }
183 } /* FOREACH */
184
185 return PKTPOOL_INVALID_ID; /* error: registry is full */
186 }
187
188 /** Deregisters a pktpool, given the pool pointer; tag slot as free */
189 static int
pktpool_deregister(pktpool_t * poolptr)190 pktpool_deregister(pktpool_t * poolptr)
191 {
192 uint32 poolid;
193
194 ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
195
196 poolid = POOLID(poolptr);
197 ASSERT(poolid <= pktpools_max);
198
199 /* Asertain that a previously registered poolptr is being de-registered */
200 if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
201 PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
202 } else {
203 ASSERT(0);
204 return BCME_ERROR; /* mismatch in registry */
205 }
206
207 return BCME_OK;
208 }
209
210 /**
211 * pktpool_init:
212 * User provides a pktpool_t structure and specifies the number of packets to
213 * be pre-filled into the pool (n_pkts).
214 * pktpool_init first attempts to register the pool and fetch a unique poolid.
215 * If registration fails, it is considered an BCME_ERR, caused by either the
216 * registry was not pre-created (pktpool_attach) or the registry is full.
217 * If registration succeeds, then the requested number of packets will be filled
218 * into the pool as part of initialization. In the event that there is no
219 * available memory to service the request, then BCME_NOMEM will be returned
220 * along with the count of how many packets were successfully allocated.
221 * In dongle builds, prior to memory reclaimation, one should limit the number
222 * of packets to be allocated during pktpool_init and fill the pool up after
223 * reclaim stage.
224 *
225 * @param n_pkts Number of packets to be pre-filled into the pool
226 * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
227 * @param type e.g. 'lbuf_frag'
228 */
229 int
pktpool_init(osl_t * osh,pktpool_t * pktp,int * n_pkts,int max_pkt_bytes,bool istx,uint8 type)230 pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
231 uint8 type)
232 {
233 int i, err = BCME_OK;
234 int pktplen;
235 uint8 pktp_id;
236
237 ASSERT(pktp != NULL);
238 ASSERT(osh != NULL);
239 ASSERT(n_pkts != NULL);
240
241 pktplen = *n_pkts;
242
243 bzero(pktp, sizeof(pktpool_t));
244
245 /* assign a unique pktpool id */
246 if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
247 return BCME_ERROR;
248 }
249 POOLSETID(pktp, pktp_id);
250
251 pktp->inited = TRUE;
252 pktp->istx = istx ? TRUE : FALSE;
253 pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
254 pktp->type = type;
255
256 if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
257 return BCME_ERROR;
258 }
259
260 pktp->maxlen = PKTPOOL_LEN_MAX;
261 pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
262
263 for (i = 0; i < pktplen; i++) {
264 void *p;
265 p = PKTGET(osh, max_pkt_bytes, TRUE);
266
267 if (p == NULL) {
268 /* Not able to allocate all requested pkts
269 * so just return what was actually allocated
270 * We can add to the pool later
271 */
272 if (pktp->freelist == NULL) /* pktpool free list is empty */
273 err = BCME_NOMEM;
274
275 goto exit;
276 }
277
278 PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
279
280 PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
281 pktp->freelist = p;
282
283 pktp->avail++;
284
285 #ifdef BCMDBG_POOL
286 pktp->dbg_q[pktp->dbg_qlen++].p = p;
287 #endif // endif
288 }
289
290 exit:
291 pktp->n_pkts = pktp->avail;
292
293 *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
294 return err;
295 } /* pktpool_init */
296
297 /**
298 * pktpool_deinit:
299 * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
300 * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
301 * An assert is in place to ensure that there are no packets still lingering
302 * around. Packets freed to a pool after the deinit will cause a memory
303 * corruption as the pktpool_t structure no longer exists.
304 */
305 int
pktpool_deinit(osl_t * osh,pktpool_t * pktp)306 pktpool_deinit(osl_t *osh, pktpool_t *pktp)
307 {
308 uint16 freed = 0;
309
310 ASSERT(osh != NULL);
311 ASSERT(pktp != NULL);
312
313 #ifdef BCMDBG_POOL
314 {
315 int i;
316 for (i = 0; i <= pktp->n_pkts; i++) {
317 pktp->dbg_q[i].p = NULL;
318 }
319 }
320 #endif // endif
321
322 while (pktp->freelist != NULL) {
323 void * p = pktp->freelist;
324
325 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
326 PKTSETFREELIST(p, NULL);
327
328 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
329
330 PKTFREE(osh, p, pktp->istx); /* free the packet */
331
332 freed++;
333 ASSERT(freed <= pktp->n_pkts);
334 }
335
336 pktp->avail -= freed;
337 ASSERT(pktp->avail == 0);
338
339 pktp->n_pkts -= freed;
340
341 pktpool_deregister(pktp); /* release previously acquired unique pool id */
342 POOLSETID(pktp, PKTPOOL_INVALID_ID);
343
344 if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
345 return BCME_ERROR;
346
347 pktp->inited = FALSE;
348
349 /* Are there still pending pkts? */
350 ASSERT(pktp->n_pkts == 0);
351
352 return 0;
353 }
354
355 int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)356 pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
357 {
358 void *p;
359 int err = 0;
360 int n_pkts, psize, maxlen;
361
362 /* protect shared resource */
363 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
364 return BCME_ERROR;
365
366 ASSERT(pktp->max_pkt_bytes != 0);
367
368 maxlen = pktp->maxlen;
369 psize = minimal ? (maxlen >> 2) : maxlen;
370 for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
371
372 p = PKTGET(osh, pktp->n_pkts, TRUE);
373
374 if (p == NULL) {
375 err = BCME_NOMEM;
376 break;
377 }
378
379 if (pktpool_add(pktp, p) != BCME_OK) {
380 PKTFREE(osh, p, FALSE);
381 err = BCME_ERROR;
382 break;
383 }
384 }
385
386 /* protect shared resource */
387 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
388 return BCME_ERROR;
389
390 if (pktp->cbcnt) {
391 if (pktp->empty == FALSE)
392 pktpool_avail_notify(pktp);
393 }
394
395 return err;
396 }
397
398 #ifdef BCMPOOLRECLAIM
399 /* New API to decrease the pkts from pool, but not deinit
400 */
401 uint16
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt)402 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
403 {
404 uint16 freed = 0;
405
406 pktpool_cb_extn_t cb = NULL;
407 void *arg = NULL;
408
409 ASSERT(osh != NULL);
410 ASSERT(pktp != NULL);
411
412 /* protect shared resource */
413 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
414 return freed;
415 }
416
417 if (pktp->avail < free_cnt) {
418 free_cnt = pktp->avail;
419 }
420
421 if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
422 /* If pool is shared rx frag pool, use call back fn to reclaim host address
423 * and Rx cpl ID associated with the pkt.
424 */
425 ASSERT(pktp->cbext.cb != NULL);
426
427 cb = pktp->cbext.cb;
428 arg = pktp->cbext.arg;
429
430 } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
431 /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
432 * associated with the pkt.
433 */
434 cb = pktp->rxcplidfn.cb;
435 arg = pktp->rxcplidfn.arg;
436 }
437
438 while ((pktp->freelist != NULL) && (free_cnt)) {
439 void * p = pktp->freelist;
440
441 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
442 PKTSETFREELIST(p, NULL);
443
444 if (cb != NULL) {
445 if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
446 PKTSETFREELIST(p, pktp->freelist);
447 pktp->freelist = p;
448 break;
449 }
450 }
451
452 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
453
454 PKTFREE(osh, p, pktp->istx); /* free the packet */
455
456 freed++;
457 free_cnt--;
458 }
459
460 pktp->avail -= freed;
461
462 pktp->n_pkts -= freed;
463
464 /* protect shared resource */
465 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
466 return freed;
467 }
468
469 return freed;
470 }
471 #endif /* #ifdef BCMPOOLRECLAIM */
472
473 /* New API to empty the pkts from pool, but not deinit
474 * NOTE: caller is responsible to ensure,
475 * all pkts are available in pool for free; else LEAK !
476 */
477 int
pktpool_empty(osl_t * osh,pktpool_t * pktp)478 pktpool_empty(osl_t *osh, pktpool_t *pktp)
479 {
480 uint16 freed = 0;
481
482 ASSERT(osh != NULL);
483 ASSERT(pktp != NULL);
484
485 /* protect shared resource */
486 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
487 return BCME_ERROR;
488
489 #ifdef BCMDBG_POOL
490 {
491 int i;
492 for (i = 0; i <= pktp->n_pkts; i++) {
493 pktp->dbg_q[i].p = NULL;
494 }
495 }
496 #endif // endif
497
498 while (pktp->freelist != NULL) {
499 void * p = pktp->freelist;
500
501 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
502 PKTSETFREELIST(p, NULL);
503
504 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
505
506 PKTFREE(osh, p, pktp->istx); /* free the packet */
507
508 freed++;
509 ASSERT(freed <= pktp->n_pkts);
510 }
511
512 pktp->avail -= freed;
513 ASSERT(pktp->avail == 0);
514
515 pktp->n_pkts -= freed;
516
517 ASSERT(pktp->n_pkts == 0);
518
519 /* protect shared resource */
520 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
521 return BCME_ERROR;
522
523 return 0;
524 }
525
526 static void *
pktpool_deq(pktpool_t * pktp)527 pktpool_deq(pktpool_t *pktp)
528 {
529 void *p = NULL;
530
531 if (pktp->avail == 0)
532 return NULL;
533
534 ASSERT(pktp->freelist != NULL);
535
536 p = pktp->freelist; /* dequeue packet from head of pktpool free list */
537 pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
538
539 PKTSETFREELIST(p, NULL);
540
541 pktp->avail--;
542
543 return p;
544 }
545
546 static void
pktpool_enq(pktpool_t * pktp,void * p)547 pktpool_enq(pktpool_t *pktp, void *p)
548 {
549 ASSERT(p != NULL);
550
551 PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
552 pktp->freelist = p; /* free list points to newly inserted packet */
553
554 pktp->avail++;
555 ASSERT(pktp->avail <= pktp->n_pkts);
556 }
557
558 /** utility for registering host addr fill function called from pciedev */
559 int
560 /* BCMATTACHFN */
561 (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
562 {
563
564 ASSERT(cb != NULL);
565
566 ASSERT(pktp->cbext.cb == NULL);
567 pktp->cbext.cb = cb;
568 pktp->cbext.arg = arg;
569 return 0;
570 }
571
572 int
pktpool_rxcplid_fill_register(pktpool_t * pktp,pktpool_cb_extn_t cb,void * arg)573 pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
574 {
575
576 ASSERT(cb != NULL);
577
578 if (pktp == NULL)
579 return BCME_ERROR;
580 ASSERT(pktp->rxcplidfn.cb == NULL);
581 pktp->rxcplidfn.cb = cb;
582 pktp->rxcplidfn.arg = arg;
583 return 0;
584 }
585
586 /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
587 void
pktpool_invoke_dmarxfill(pktpool_t * pktp)588 pktpool_invoke_dmarxfill(pktpool_t *pktp)
589 {
590 ASSERT(pktp->dmarxfill.cb);
591 ASSERT(pktp->dmarxfill.arg);
592
593 if (pktp->dmarxfill.cb)
594 pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
595 }
596
597 /** Registers callback functions for split rx mode */
598 int
pkpool_haddr_avail_register_cb(pktpool_t * pktp,pktpool_cb_t cb,void * arg)599 pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
600 {
601
602 ASSERT(cb != NULL);
603
604 pktp->dmarxfill.cb = cb;
605 pktp->dmarxfill.arg = arg;
606
607 return 0;
608 }
609
610 /**
611 * Registers callback functions.
612 * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
613 */
614 int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)615 pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
616 {
617 int err = 0;
618 int i;
619
620 /* protect shared resource */
621 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
622 return BCME_ERROR;
623
624 ASSERT(cb != NULL);
625
626 for (i = 0; i < pktp->cbcnt; i++) {
627 ASSERT(pktp->cbs[i].cb != NULL);
628 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
629 pktp->cbs[i].refcnt++;
630 goto done;
631 }
632 }
633
634 i = pktp->cbcnt;
635 if (i == PKTPOOL_CB_MAX_AVL) {
636 err = BCME_ERROR;
637 goto done;
638 }
639
640 ASSERT(pktp->cbs[i].cb == NULL);
641 pktp->cbs[i].cb = cb;
642 pktp->cbs[i].arg = arg;
643 pktp->cbs[i].refcnt++;
644 pktp->cbcnt++;
645
646 done:
647 /* protect shared resource */
648 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
649 return BCME_ERROR;
650
651 return err;
652 }
653
654 /* No BCMATTACHFN as it is used in a non-attach function */
655 int
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)656 pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
657 {
658 int err = 0;
659 int i, k;
660
661 /* protect shared resource */
662 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
663 return BCME_ERROR;
664 }
665
666 ASSERT(cb != NULL);
667
668 for (i = 0; i < pktp->cbcnt; i++) {
669 ASSERT(pktp->cbs[i].cb != NULL);
670 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
671 pktp->cbs[i].refcnt--;
672 if (pktp->cbs[i].refcnt) {
673 /* Still there are references to this callback */
674 goto done;
675 }
676 /* Moving any more callbacks to fill the hole */
677 for (k = i+1; k < pktp->cbcnt; i++, k++) {
678 pktp->cbs[i].cb = pktp->cbs[k].cb;
679 pktp->cbs[i].arg = pktp->cbs[k].arg;
680 pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
681 }
682
683 /* reset the last callback */
684 pktp->cbs[i].cb = NULL;
685 pktp->cbs[i].arg = NULL;
686 pktp->cbs[i].refcnt = 0;
687
688 pktp->cbcnt--;
689 goto done;
690 }
691 }
692
693 done:
694 /* protect shared resource */
695 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
696 return BCME_ERROR;
697 }
698
699 return err;
700 }
701
702 /** Registers callback functions */
703 int
pktpool_empty_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)704 pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
705 {
706 int err = 0;
707 int i;
708
709 /* protect shared resource */
710 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
711 return BCME_ERROR;
712
713 ASSERT(cb != NULL);
714
715 i = pktp->ecbcnt;
716 if (i == PKTPOOL_CB_MAX) {
717 err = BCME_ERROR;
718 goto done;
719 }
720
721 ASSERT(pktp->ecbs[i].cb == NULL);
722 pktp->ecbs[i].cb = cb;
723 pktp->ecbs[i].arg = arg;
724 pktp->ecbcnt++;
725
726 done:
727 /* protect shared resource */
728 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
729 return BCME_ERROR;
730
731 return err;
732 }
733
734 /** Calls registered callback functions */
735 static int
pktpool_empty_notify(pktpool_t * pktp)736 pktpool_empty_notify(pktpool_t *pktp)
737 {
738 int i;
739
740 pktp->empty = TRUE;
741 for (i = 0; i < pktp->ecbcnt; i++) {
742 ASSERT(pktp->ecbs[i].cb != NULL);
743 pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
744 }
745 pktp->empty = FALSE;
746
747 return 0;
748 }
749
750 #ifdef BCMDBG_POOL
751 int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)752 pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
753 {
754 int err = 0;
755 int i;
756
757 /* protect shared resource */
758 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
759 return BCME_ERROR;
760
761 ASSERT(cb);
762
763 i = pktp->dbg_cbcnt;
764 if (i == PKTPOOL_CB_MAX) {
765 err = BCME_ERROR;
766 goto done;
767 }
768
769 ASSERT(pktp->dbg_cbs[i].cb == NULL);
770 pktp->dbg_cbs[i].cb = cb;
771 pktp->dbg_cbs[i].arg = arg;
772 pktp->dbg_cbcnt++;
773
774 done:
775 /* protect shared resource */
776 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
777 return BCME_ERROR;
778
779 return err;
780 }
781
782 int pktpool_dbg_notify(pktpool_t *pktp);
783
784 int
pktpool_dbg_notify(pktpool_t * pktp)785 pktpool_dbg_notify(pktpool_t *pktp)
786 {
787 int i;
788
789 /* protect shared resource */
790 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
791 return BCME_ERROR;
792
793 for (i = 0; i < pktp->dbg_cbcnt; i++) {
794 ASSERT(pktp->dbg_cbs[i].cb);
795 pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
796 }
797
798 /* protect shared resource */
799 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
800 return BCME_ERROR;
801
802 return 0;
803 }
804
805 int
pktpool_dbg_dump(pktpool_t * pktp)806 pktpool_dbg_dump(pktpool_t *pktp)
807 {
808 int i;
809
810 /* protect shared resource */
811 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
812 return BCME_ERROR;
813
814 printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
815 for (i = 0; i < pktp->dbg_qlen; i++) {
816 ASSERT(pktp->dbg_q[i].p);
817 printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
818 pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
819 }
820
821 /* protect shared resource */
822 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
823 return BCME_ERROR;
824
825 return 0;
826 }
827
828 int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)829 pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
830 {
831 int i;
832 int state;
833
834 /* protect shared resource */
835 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
836 return BCME_ERROR;
837
838 bzero(stats, sizeof(pktpool_stats_t));
839 for (i = 0; i < pktp->dbg_qlen; i++) {
840 ASSERT(pktp->dbg_q[i].p != NULL);
841
842 state = PKTPOOLSTATE(pktp->dbg_q[i].p);
843 switch (state) {
844 case POOL_TXENQ:
845 stats->enq++; break;
846 case POOL_TXDH:
847 stats->txdh++; break;
848 case POOL_TXD11:
849 stats->txd11++; break;
850 case POOL_RXDH:
851 stats->rxdh++; break;
852 case POOL_RXD11:
853 stats->rxd11++; break;
854 case POOL_RXFILL:
855 stats->rxfill++; break;
856 case POOL_IDLE:
857 stats->idle++; break;
858 }
859 }
860
861 /* protect shared resource */
862 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
863 return BCME_ERROR;
864
865 return 0;
866 }
867
868 int
pktpool_start_trigger(pktpool_t * pktp,void * p)869 pktpool_start_trigger(pktpool_t *pktp, void *p)
870 {
871 uint32 cycles, i;
872
873 /* protect shared resource */
874 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
875 return BCME_ERROR;
876
877 if (!PKTPOOL(OSH_NULL, p))
878 goto done;
879
880 OSL_GETCYCLES(cycles);
881
882 for (i = 0; i < pktp->dbg_qlen; i++) {
883 ASSERT(pktp->dbg_q[i].p != NULL);
884
885 if (pktp->dbg_q[i].p == p) {
886 pktp->dbg_q[i].cycles = cycles;
887 break;
888 }
889 }
890
891 done:
892 /* protect shared resource */
893 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
894 return BCME_ERROR;
895
896 return 0;
897 }
898
899 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
900
901 int
pktpool_stop_trigger(pktpool_t * pktp,void * p)902 pktpool_stop_trigger(pktpool_t *pktp, void *p)
903 {
904 uint32 cycles, i;
905
906 /* protect shared resource */
907 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
908 return BCME_ERROR;
909
910 if (!PKTPOOL(OSH_NULL, p))
911 goto done;
912
913 OSL_GETCYCLES(cycles);
914
915 for (i = 0; i < pktp->dbg_qlen; i++) {
916 ASSERT(pktp->dbg_q[i].p != NULL);
917
918 if (pktp->dbg_q[i].p == p) {
919 if (pktp->dbg_q[i].cycles == 0)
920 break;
921
922 if (cycles >= pktp->dbg_q[i].cycles)
923 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
924 else
925 pktp->dbg_q[i].dur =
926 (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
927
928 pktp->dbg_q[i].cycles = 0;
929 break;
930 }
931 }
932
933 done:
934 /* protect shared resource */
935 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
936 return BCME_ERROR;
937
938 return 0;
939 }
940 #endif /* BCMDBG_POOL */
941
942 int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)943 pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
944 {
945 BCM_REFERENCE(osh);
946 ASSERT(pktp);
947
948 /* protect shared resource */
949 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
950 return BCME_ERROR;
951
952 pktp->availcb_excl = NULL;
953
954 /* protect shared resource */
955 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
956 return BCME_ERROR;
957
958 return 0;
959 }
960
961 int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)962 pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
963 {
964 int i;
965 int err;
966 BCM_REFERENCE(osh);
967
968 ASSERT(pktp);
969
970 /* protect shared resource */
971 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
972 return BCME_ERROR;
973
974 ASSERT(pktp->availcb_excl == NULL);
975 for (i = 0; i < pktp->cbcnt; i++) {
976 if (cb == pktp->cbs[i].cb) {
977 pktp->availcb_excl = &pktp->cbs[i];
978 break;
979 }
980 }
981
982 if (pktp->availcb_excl == NULL)
983 err = BCME_ERROR;
984 else
985 err = 0;
986
987 /* protect shared resource */
988 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
989 return BCME_ERROR;
990
991 return err;
992 }
993
994 static void
pktpool_avail_notify(pktpool_t * pktp)995 pktpool_avail_notify(pktpool_t *pktp)
996 {
997 int i, k, idx;
998 int avail;
999
1000 ASSERT(pktp);
1001 if (pktp->availcb_excl != NULL) {
1002 pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1003 return;
1004 }
1005
1006 k = pktp->cbcnt - 1;
1007 for (i = 0; i < pktp->cbcnt; i++) {
1008 avail = pktp->avail;
1009
1010 if (avail) {
1011 if (pktp->cbtoggle)
1012 idx = i;
1013 else
1014 idx = k--;
1015
1016 ASSERT(pktp->cbs[idx].cb != NULL);
1017 pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1018 }
1019 }
1020
1021 /* Alternate between filling from head or tail
1022 */
1023 pktp->cbtoggle ^= 1;
1024
1025 return;
1026 }
1027
1028 /** Gets an empty packet from the caller provided pool */
1029 void *
pktpool_get(pktpool_t * pktp)1030 pktpool_get(pktpool_t *pktp)
1031 {
1032 void *p;
1033
1034 /* protect shared resource */
1035 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1036 return NULL;
1037
1038 p = pktpool_deq(pktp);
1039
1040 if (p == NULL) {
1041 /* Notify and try to reclaim tx pkts */
1042 if (pktp->ecbcnt)
1043 pktpool_empty_notify(pktp);
1044
1045 p = pktpool_deq(pktp);
1046 if (p == NULL)
1047 goto done;
1048 }
1049
1050 done:
1051 /* protect shared resource */
1052 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1053 return NULL;
1054
1055 return p;
1056 }
1057
1058 void
pktpool_free(pktpool_t * pktp,void * p)1059 pktpool_free(pktpool_t *pktp, void *p)
1060 {
1061 /* protect shared resource */
1062 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1063 return;
1064
1065 ASSERT(p != NULL);
1066 #ifdef BCMDBG_POOL
1067 /* pktpool_stop_trigger(pktp, p); */
1068 #endif // endif
1069
1070 pktpool_enq(pktp, p);
1071
1072 /**
1073 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1074 * If any avail callback functions are registered, send a notification
1075 * that a new packet is available in the pool.
1076 */
1077 if (pktp->cbcnt) {
1078 /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1079 * This allows to feed on burst basis as opposed to inefficient per-packet basis.
1080 */
1081 if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1082 /**
1083 * If the call originated from pktpool_empty_notify, the just freed packet
1084 * is needed in pktpool_get.
1085 * Therefore don't call pktpool_avail_notify.
1086 */
1087 if (pktp->empty == FALSE)
1088 pktpool_avail_notify(pktp);
1089 } else {
1090 /**
1091 * The callback is temporarily disabled, log that a packet has been freed.
1092 */
1093 pktp->emptycb_disable = EMPTYCB_SKIPPED;
1094 }
1095 }
1096
1097 /* protect shared resource */
1098 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1099 return;
1100 }
1101
1102 /** Adds a caller provided (empty) packet to the caller provided pool */
1103 int
pktpool_add(pktpool_t * pktp,void * p)1104 pktpool_add(pktpool_t *pktp, void *p)
1105 {
1106 int err = 0;
1107
1108 /* protect shared resource */
1109 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1110 return BCME_ERROR;
1111
1112 ASSERT(p != NULL);
1113
1114 if (pktp->n_pkts == pktp->maxlen) {
1115 err = BCME_RANGE;
1116 goto done;
1117 }
1118
1119 /* pkts in pool have same length */
1120 ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1121 PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1122
1123 pktp->n_pkts++;
1124 pktpool_enq(pktp, p);
1125
1126 #ifdef BCMDBG_POOL
1127 pktp->dbg_q[pktp->dbg_qlen++].p = p;
1128 #endif // endif
1129
1130 done:
1131 /* protect shared resource */
1132 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1133 return BCME_ERROR;
1134
1135 return err;
1136 }
1137
1138 /**
1139 * Force pktpool_setmaxlen () into RAM as it uses a constant
1140 * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1141 */
1142 int
BCMRAMFN(pktpool_setmaxlen)1143 BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1144 {
1145 /* protect shared resource */
1146 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1147 return BCME_ERROR;
1148
1149 if (maxlen > PKTPOOL_LEN_MAX)
1150 maxlen = PKTPOOL_LEN_MAX;
1151
1152 /* if pool is already beyond maxlen, then just cap it
1153 * since we currently do not reduce the pool len
1154 * already allocated
1155 */
1156 pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1157
1158 /* protect shared resource */
1159 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1160 return BCME_ERROR;
1161
1162 return pktp->maxlen;
1163 }
1164
1165 void
pktpool_emptycb_disable(pktpool_t * pktp,bool disable)1166 pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
1167 {
1168 ASSERT(pktp);
1169
1170 /**
1171 * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1172 * If callback is going to be re-enabled, check if any packet got
1173 * freed and added back to the pool while callback was disabled.
1174 * When this is the case do the callback now, provided that callback functions
1175 * are registered and this call did not originate from pktpool_empty_notify.
1176 */
1177 if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1178 (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1179 pktpool_avail_notify(pktp);
1180 }
1181
1182 /* Enable or temporarily disable callback when packet becomes available. */
1183 pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
1184 }
1185
1186 bool
pktpool_emptycb_disabled(pktpool_t * pktp)1187 pktpool_emptycb_disabled(pktpool_t *pktp)
1188 {
1189 ASSERT(pktp);
1190 return pktp->emptycb_disable != EMPTYCB_ENABLED;
1191 }
1192
1193 #ifdef BCMPKTPOOL
1194 #include <hnd_lbuf.h>
1195
1196 pktpool_t *pktpool_shared = NULL;
1197
1198 #ifdef BCMFRAGPOOL
1199 pktpool_t *pktpool_shared_lfrag = NULL;
1200 #ifdef BCMRESVFRAGPOOL
1201 pktpool_t *pktpool_resv_lfrag = NULL;
1202 struct resv_info *resv_pool_info = NULL;
1203 #endif /* BCMRESVFRAGPOOL */
1204 #endif /* BCMFRAGPOOL */
1205
1206 pktpool_t *pktpool_shared_rxlfrag = NULL;
1207
1208 static osl_t *pktpool_osh = NULL;
1209
1210 /**
1211 * Initializes several packet pools and allocates packets within those pools.
1212 */
1213 int
hnd_pktpool_init(osl_t * osh)1214 hnd_pktpool_init(osl_t *osh)
1215 {
1216 int err = BCME_OK;
1217 int n;
1218
1219 /* Construct a packet pool registry before initializing packet pools */
1220 n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1221 if (n != PKTPOOL_MAXIMUM_ID) {
1222 ASSERT(0);
1223 err = BCME_ERROR;
1224 goto error0;
1225 }
1226
1227 pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1228 if (pktpool_shared == NULL) {
1229 ASSERT(0);
1230 err = BCME_NOMEM;
1231 goto error1;
1232 }
1233
1234 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1235 pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1236 if (pktpool_shared_lfrag == NULL) {
1237 ASSERT(0);
1238 err = BCME_NOMEM;
1239 goto error2;
1240 }
1241 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1242 resv_pool_info = hnd_resv_pool_alloc(osh);
1243 if (resv_pool_info == NULL) {
1244 ASSERT(0);
1245 goto error2;
1246 }
1247 pktpool_resv_lfrag = resv_pool_info->pktp;
1248 if (pktpool_resv_lfrag == NULL) {
1249 ASSERT(0);
1250 goto error2;
1251 }
1252 #endif /* RESVFRAGPOOL */
1253 #endif /* FRAGPOOL */
1254
1255 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1256 pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1257 if (pktpool_shared_rxlfrag == NULL) {
1258 ASSERT(0);
1259 err = BCME_NOMEM;
1260 goto error3;
1261 }
1262 #endif // endif
1263
1264 /*
1265 * At this early stage, there's not enough memory to allocate all
1266 * requested pkts in the shared pool. Need to add to the pool
1267 * after reclaim
1268 *
1269 * n = NRXBUFPOST + SDPCMD_RXBUFS;
1270 *
1271 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1272 * registry is not initialized or the registry is depleted.
1273 *
1274 * A BCME_NOMEM error only indicates that the requested number of packets
1275 * were not filled into the pool.
1276 */
1277 n = 1;
1278 MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
1279 if ((err = pktpool_init(osh, pktpool_shared,
1280 &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
1281 ASSERT(0);
1282 goto error4;
1283 }
1284 pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1285
1286 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1287 n = 1;
1288 if ((err = pktpool_init(osh, pktpool_shared_lfrag,
1289 &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
1290 ASSERT(0);
1291 goto error5;
1292 }
1293 pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1294 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1295 n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1296 if (pktpool_init(osh, pktpool_resv_lfrag,
1297 &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
1298 ASSERT(0);
1299 goto error5;
1300 }
1301 pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1302 #endif /* RESVFRAGPOOL */
1303 #endif /* BCMFRAGPOOL */
1304 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1305 n = 1;
1306 if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
1307 &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
1308 ASSERT(0);
1309 goto error6;
1310 }
1311 pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1312 #endif // endif
1313
1314 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1315 /* Attach poolreorg module */
1316 if ((frwd_poolreorg_info = poolreorg_attach(osh,
1317 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1318 pktpool_shared_lfrag,
1319 #else
1320 NULL,
1321 #endif // endif
1322 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1323 pktpool_shared_rxlfrag,
1324 #else
1325 NULL,
1326 #endif // endif
1327 pktpool_shared)) == NULL) {
1328 ASSERT(0);
1329 goto error7;
1330 }
1331 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1332
1333 pktpool_osh = osh;
1334 MALLOC_CLEAR_NOPERSIST(osh);
1335
1336 return BCME_OK;
1337
1338 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1339 /* detach poolreorg module */
1340 poolreorg_detach(frwd_poolreorg_info);
1341 error7:
1342 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1343
1344 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1345 pktpool_deinit(osh, pktpool_shared_rxlfrag);
1346 error6:
1347 #endif // endif
1348
1349 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1350 pktpool_deinit(osh, pktpool_shared_lfrag);
1351 error5:
1352 #endif // endif
1353
1354 #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
1355 (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
1356 pktpool_deinit(osh, pktpool_shared);
1357 #endif // endif
1358
1359 error4:
1360 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1361 hnd_free(pktpool_shared_rxlfrag);
1362 pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1363 error3:
1364 #endif /* BCMRXFRAGPOOL */
1365
1366 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1367 hnd_free(pktpool_shared_lfrag);
1368 pktpool_shared_lfrag = (pktpool_t *)NULL;
1369 error2:
1370 #endif /* BCMFRAGPOOL */
1371
1372 hnd_free(pktpool_shared);
1373 pktpool_shared = (pktpool_t *)NULL;
1374
1375 error1:
1376 pktpool_dettach(osh);
1377 error0:
1378 MALLOC_CLEAR_NOPERSIST(osh);
1379 return err;
1380 } /* hnd_pktpool_init */
1381
1382 /** is called at each 'wl up' */
1383 int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1384 hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1385 {
1386 return (pktpool_fill(pktpool_osh, pktpool, minimal));
1387 }
1388
1389 /** refills pktpools after reclaim, is called once */
1390 void
hnd_pktpool_refill(bool minimal)1391 hnd_pktpool_refill(bool minimal)
1392 {
1393 if (POOL_ENAB(pktpool_shared)) {
1394 #if defined(SRMEM)
1395 if (SRMEM_ENAB()) {
1396 int maxlen = pktpool_max_pkts(pktpool_shared);
1397 int n_pkts = pktpool_tot_pkts(pktpool_shared);
1398
1399 for (; n_pkts < maxlen; n_pkts++) {
1400 void *p;
1401 if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
1402 break;
1403 pktpool_add(pktpool_shared, p);
1404 }
1405 }
1406 #endif /* SRMEM */
1407 pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1408 }
1409 /* fragpool reclaim */
1410 #ifdef BCMFRAGPOOL
1411 if (POOL_ENAB(pktpool_shared_lfrag)) {
1412 pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1413 }
1414 #endif /* BCMFRAGPOOL */
1415 /* rx fragpool reclaim */
1416 #ifdef BCMRXFRAGPOOL
1417 if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1418 pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1419 }
1420 #endif // endif
1421 #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1422 if (POOL_ENAB(pktpool_resv_lfrag)) {
1423 int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN;
1424 hnd_resv_pool_init(resv_pool_info, resv_size);
1425 hnd_resv_pool_enable(resv_pool_info);
1426 }
1427 #endif /* BCMRESVFRAGPOOL */
1428 }
1429 #endif /* BCMPKTPOOL */
1430