1 /*
2 * HND generic packet pool operation primitives
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Dual:>>
22 */
23
24 #include <typedefs.h>
25 #include <osl.h>
26 #include <osl_ext.h>
27 #include <bcmutils.h>
28 #include <wlioctl.h>
29 #include <hnd_pktpool.h>
30 #ifdef BCMRESVFRAGPOOL
31 #include <hnd_resvpool.h>
32 #endif /* BCMRESVFRAGPOOL */
33 #ifdef BCMFRWDPOOLREORG
34 #include <hnd_poolreorg.h>
35 #endif /* BCMFRWDPOOLREORG */
36
37 #if defined(DONGLEBUILD) && defined(SRMEM)
38 #include <hndsrmem.h>
39 #endif /* DONGLEBUILD && SRMEM */
40 #if defined(DONGLEBUILD)
41 #include <d11_cfg.h>
42 #endif
43
44 /* mutex macros for thread safe */
45 #ifdef HND_PKTPOOL_THREAD_SAFE
46 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
47 #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
48 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
49 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
50 #else
51 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
52 #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
53 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
54 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
55 #endif
56
57 /* Registry size is one larger than max pools, as slot #0 is reserved */
58 #define PKTPOOLREG_RSVD_ID (0U)
59 #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
60 #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
61
62 #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
63 #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
64
65 /* Tag a registry entry as free for use */
66 #define PKTPOOL_REGISTRY_CLR(id) \
67 PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
68 #define PKTPOOL_REGISTRY_ISCLR(id) \
69 (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
70
71 /* Tag registry entry 0 as reserved */
72 #define PKTPOOL_REGISTRY_RSV() \
73 PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
74 #define PKTPOOL_REGISTRY_ISRSVD() \
75 (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
76
77 /* Walk all un-reserved entries in registry */
78 #define PKTPOOL_REGISTRY_FOREACH(id) \
79 for ((id) = 1U; (id) <= pktpools_max; (id)++)
80
81 enum pktpool_empty_cb_state {
82 EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
83 EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
84 EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
85 };
86
87 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
88 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
89
90 /* number of pktids that are reserved for pktpool usage at the moment
91 * initializing this with max pktids reserved for pktpool
92 * pktpool_init, pktpool_fill and pktpool_refill decrements this
93 * pktpool_reclaim, pktpool_empty and heap_pkt_release increments this
94 */
95 #ifdef DONGLEBUILD
96 uint32 total_pool_pktid_count = PKTID_POOL;
97 #else
98 uint32 total_pool_pktid_count = 0U;
99 #endif /* DONGLEBUILD */
100
101 #ifdef POOL_HEAP_RECONFIG
102 typedef struct pktpool_heap_cb_reg {
103 pktpool_heap_cb_t fn;
104 void *ctxt;
105 uint32 flag;
106 } pktpool_heap_cb_reg_t;
107 #define PKTPOOL_MAX_HEAP_CB 2
108 pktpool_heap_cb_reg_t pktpool_heap_cb_reg[PKTPOOL_MAX_HEAP_CB];
109 uint32 pktpool_heap_rel_active = 0U;
110
111 static void hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag);
112 static void hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag);
113 static int hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize);
114 static void hnd_pktpool_lbuf_free_cb(uint8 poolid);
115 static pktpool_heap_cb_reg_t *BCMRAMFN(hnd_pool_get_cb_registry)(void);
116 #endif /* POOL_HEAP_RECONFIG */
117
118 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
119 static int pktpool_register(pktpool_t * poolptr);
120 static int pktpool_deregister(pktpool_t * poolptr);
121
122 /** add declaration */
123 static void pktpool_avail_notify(pktpool_t *pktp);
124
125 /** accessor functions required when ROMming this file, forced into RAM */
126
127 pktpool_t *
BCMPOSTTRAPRAMFN(get_pktpools_registry)128 BCMPOSTTRAPRAMFN(get_pktpools_registry)(int id)
129 {
130 return pktpools_registry[id];
131 }
132
133 static void
BCMRAMFN(pktpool_registry_set)134 BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
135 {
136 pktpools_registry[id] = pp;
137 }
138
139 static bool
BCMRAMFN(pktpool_registry_cmp)140 BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
141 {
142 return pktpools_registry[id] == pp;
143 }
144
145 /** Constructs a pool registry to serve a maximum of total_pools */
146 int
BCMATTACHFN(pktpool_attach)147 BCMATTACHFN(pktpool_attach)(osl_t *osh, uint32 total_pools)
148 {
149 uint32 poolid;
150 BCM_REFERENCE(osh);
151
152 if (pktpools_max != 0U) {
153 return BCME_ERROR;
154 }
155
156 ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
157
158 /* Initialize registry: reserve slot#0 and tag others as free */
159 PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
160
161 PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
162 PKTPOOL_REGISTRY_CLR(poolid);
163 }
164
165 pktpools_max = total_pools;
166
167 return (int)pktpools_max;
168 }
169
170 /** Destructs the pool registry. Ascertain all pools were first de-inited */
171 int
BCMATTACHFN(pktpool_dettach)172 BCMATTACHFN(pktpool_dettach)(osl_t *osh)
173 {
174 uint32 poolid;
175 BCM_REFERENCE(osh);
176
177 if (pktpools_max == 0U) {
178 return BCME_OK;
179 }
180
181 /* Ascertain that no pools are still registered */
182 ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
183
184 PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
185 ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
186 }
187
188 pktpools_max = 0U; /* restore boot state */
189
190 return BCME_OK;
191 }
192
193 /** Registers a pool in a free slot; returns the registry slot index */
194 static int
BCMATTACHFN(pktpool_register)195 BCMATTACHFN(pktpool_register)(pktpool_t * poolptr)
196 {
197 uint32 poolid;
198
199 if (pktpools_max == 0U) {
200 return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
201 }
202
203 ASSERT(pktpools_max != 0U);
204
205 /* find an empty slot in pktpools_registry */
206 PKTPOOL_REGISTRY_FOREACH(poolid) {
207 if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
208 PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
209 return (int)poolid; /* return pool ID */
210 }
211 } /* FOREACH */
212
213 return PKTPOOL_INVALID_ID; /* error: registry is full */
214 }
215
216 /** Deregisters a pktpool, given the pool pointer; tag slot as free */
217 static int
BCMATTACHFN(pktpool_deregister)218 BCMATTACHFN(pktpool_deregister)(pktpool_t * poolptr)
219 {
220 uint32 poolid;
221
222 ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
223
224 poolid = POOLID(poolptr);
225 ASSERT(poolid <= pktpools_max);
226
227 /* Asertain that a previously registered poolptr is being de-registered */
228 if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
229 PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
230 } else {
231 ASSERT(0);
232 return BCME_ERROR; /* mismatch in registry */
233 }
234
235 return BCME_OK;
236 }
237
238 /**
239 * pktpool_init:
240 * User provides a pktpool_t structure and specifies the number of packets to
241 * be pre-filled into the pool (n_pkts).
242 * pktpool_init first attempts to register the pool and fetch a unique poolid.
243 * If registration fails, it is considered an BCME_ERR, caused by either the
244 * registry was not pre-created (pktpool_attach) or the registry is full.
245 * If registration succeeds, then the requested number of packets will be filled
246 * into the pool as part of initialization. In the event that there is no
247 * available memory to service the request, then BCME_NOMEM will be returned
248 * along with the count of how many packets were successfully allocated.
249 * In dongle builds, prior to memory reclaimation, one should limit the number
250 * of packets to be allocated during pktpool_init and fill the pool up after
251 * reclaim stage.
252 *
253 * @param n_pkts Number of packets to be pre-filled into the pool
254 * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
255 * @param type e.g. 'lbuf_frag'
256 */
257 int
BCMATTACHFN(pktpool_init)258 BCMATTACHFN(pktpool_init)(osl_t *osh,
259 pktpool_t *pktp,
260 int *n_pkts,
261 int max_pkt_bytes,
262 bool istx,
263 uint8 type,
264 bool is_heap_pool,
265 uint32 heap_pool_flag,
266 uint16 min_backup_buf)
267 {
268 int i, err = BCME_OK;
269 int pktplen;
270 uint8 pktp_id;
271
272 ASSERT(pktp != NULL);
273 ASSERT(osh != NULL);
274 ASSERT(n_pkts != NULL);
275
276 pktplen = *n_pkts;
277
278 bzero(pktp, sizeof(pktpool_t));
279
280 /* assign a unique pktpool id */
281 if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
282 return BCME_ERROR;
283 }
284 POOLSETID(pktp, pktp_id);
285
286 pktp->inited = TRUE;
287 pktp->istx = istx ? TRUE : FALSE;
288 pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
289 pktp->type = type;
290
291 #ifdef POOL_HEAP_RECONFIG
292 pktp->poolheap_flag = heap_pool_flag;
293 pktp->poolheap_count = 0;
294 pktp->min_backup_buf = min_backup_buf;
295 if (is_heap_pool) {
296 if (rte_freelist_mgr_register(&pktp->mem_handle,
297 hnd_pktpool_heap_get_cb,
298 lb_get_pktalloclen(type, max_pkt_bytes),
299 pktp) != BCME_OK) {
300 return BCME_ERROR;
301 }
302 }
303 pktp->is_heap_pool = is_heap_pool;
304 #endif
305 if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
306 return BCME_ERROR;
307 }
308
309 pktp->maxlen = PKTPOOL_LEN_MAX;
310 pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
311
312 for (i = 0; i < pktplen; i++) {
313 void *p;
314 #ifdef _RTE_
315 /* For rte builds, use PKTALLOC rather than PKTGET
316 * Avoid same pkts being dequed and enqued to pool
317 * when allocation fails.
318 */
319 p = PKTALLOC(osh, max_pkt_bytes, type);
320 #else
321 p = PKTGET(osh, max_pkt_bytes, TRUE);
322 #endif
323
324 if (p == NULL) {
325 /* Not able to allocate all requested pkts
326 * so just return what was actually allocated
327 * We can add to the pool later
328 */
329 if (pktp->freelist == NULL) /* pktpool free list is empty */
330 err = BCME_NOMEM;
331
332 goto exit;
333 }
334
335 PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
336
337 PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
338 pktp->freelist = p;
339
340 pktp->avail++;
341
342 ASSERT(total_pool_pktid_count > 0);
343 total_pool_pktid_count--;
344
345 #ifdef BCMDBG_POOL
346 pktp->dbg_q[pktp->dbg_qlen++].p = p;
347 #endif
348 }
349
350 exit:
351 pktp->n_pkts = pktp->avail;
352
353 *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
354 return err;
355 } /* pktpool_init */
356
357 /**
358 * pktpool_deinit:
359 * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
360 * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
361 * An assert is in place to ensure that there are no packets still lingering
362 * around. Packets freed to a pool after the deinit will cause a memory
363 * corruption as the pktpool_t structure no longer exists.
364 */
365 int
BCMATTACHFN(pktpool_deinit)366 BCMATTACHFN(pktpool_deinit)(osl_t *osh, pktpool_t *pktp)
367 {
368 uint16 freed = 0;
369
370 ASSERT(osh != NULL);
371 ASSERT(pktp != NULL);
372
373 #ifdef BCMDBG_POOL
374 {
375 int i;
376 for (i = 0; i <= pktp->n_pkts; i++) {
377 pktp->dbg_q[i].p = NULL;
378 }
379 }
380 #endif
381
382 while (pktp->freelist != NULL) {
383 void * p = pktp->freelist;
384
385 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
386 PKTSETFREELIST(p, NULL);
387
388 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
389
390 total_pool_pktid_count++;
391 PKTFREE(osh, p, pktp->istx); /* free the packet */
392
393 freed++;
394 ASSERT(freed <= pktp->n_pkts);
395 }
396
397 pktp->avail -= freed;
398 ASSERT(pktp->avail == 0);
399
400 pktp->n_pkts -= freed;
401
402 pktpool_deregister(pktp); /* release previously acquired unique pool id */
403 POOLSETID(pktp, PKTPOOL_INVALID_ID);
404
405 if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
406 return BCME_ERROR;
407
408 pktp->inited = FALSE;
409
410 /* Are there still pending pkts? */
411 ASSERT(pktp->n_pkts == 0);
412
413 return 0;
414 }
415
416 int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)417 pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
418 {
419 void *p;
420 int err = 0;
421 int n_pkts, psize, maxlen;
422
423 /* protect shared resource */
424 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
425 return BCME_ERROR;
426
427 #ifdef BCMRXDATAPOOL
428 ASSERT((pktp->max_pkt_bytes != 0) || (pktp->type == lbuf_rxfrag));
429 #else
430 ASSERT(pktp->max_pkt_bytes != 0);
431 #endif /* BCMRXDATAPOOL */
432
433 maxlen = pktp->maxlen;
434 psize = minimal ? (maxlen >> 2) : maxlen;
435 n_pkts = (int)pktp->n_pkts;
436 #ifdef POOL_HEAP_RECONFIG
437 /*
438 * Consider the packets released to freelist mgr also
439 * as part of pool size
440 */
441 n_pkts += pktp->is_heap_pool ?
442 pktp->poolheap_count : 0;
443 #endif
444 for (; n_pkts < psize; n_pkts++) {
445
446 #ifdef _RTE_
447 /* For rte builds, use PKTALLOC rather than PKTGET
448 * Avoid same pkts being dequed and enqued to pool when allocation fails.
449 * All pkts in pool have same length.
450 */
451 p = PKTALLOC(osh, pktp->max_pkt_bytes, pktp->type);
452 #else
453 p = PKTGET(osh, pktp->n_pkts, TRUE);
454 #endif
455
456 if (p == NULL) {
457 err = BCME_NOMEM;
458 break;
459 }
460
461 if (pktpool_add(pktp, p) != BCME_OK) {
462 PKTFREE(osh, p, FALSE);
463 err = BCME_ERROR;
464 break;
465 }
466 ASSERT(total_pool_pktid_count > 0);
467 total_pool_pktid_count--;
468 }
469
470 /* protect shared resource */
471 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
472 return BCME_ERROR;
473
474 if (pktp->cbcnt) {
475 if (pktp->empty == FALSE)
476 pktpool_avail_notify(pktp);
477 }
478
479 return err;
480 }
481
482 #ifdef BCMPOOLRECLAIM
483 /* New API to decrease the pkts from pool, but not deinit
484 */
485 uint16
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt,uint8 action)486 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt, uint8 action)
487 {
488 uint16 freed = 0;
489
490 pktpool_cb_extn_t cb = NULL;
491 void *arg = NULL;
492 void *rem_list_head = NULL;
493 void *rem_list_tail = NULL;
494 bool dont_free = FALSE;
495
496 ASSERT(osh != NULL);
497 ASSERT(pktp != NULL);
498
499 /* protect shared resource */
500 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
501 return freed;
502 }
503
504 if (pktp->avail < free_cnt) {
505 free_cnt = pktp->avail;
506 }
507
508 if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
509 /* If pool is shared rx frag pool, use call back fn to reclaim host address
510 * and Rx cpl ID associated with the pkt.
511 */
512 ASSERT(pktp->cbext.cb != NULL);
513
514 cb = pktp->cbext.cb;
515 arg = pktp->cbext.arg;
516
517 } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
518 /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
519 * associated with the pkt.
520 */
521 cb = pktp->rxcplidfn.cb;
522 arg = pktp->rxcplidfn.arg;
523 }
524
525 while ((pktp->freelist != NULL) && (free_cnt)) {
526 void * p = pktp->freelist;
527
528 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
529 PKTSETFREELIST(p, NULL);
530
531 dont_free = FALSE;
532
533 if (action == FREE_ALL_FRAG_PKTS) {
534 /* Free lbufs which are marked as frag_free_mem */
535 if (!PKTISFRMFRAG(p)) {
536 dont_free = TRUE;
537 }
538 }
539
540 if (dont_free) {
541 if (rem_list_head == NULL) {
542 rem_list_head = p;
543 } else {
544 PKTSETFREELIST(rem_list_tail, p);
545 }
546 rem_list_tail = p;
547 continue;
548 }
549 if (cb != NULL) {
550 if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
551 PKTSETFREELIST(p, pktp->freelist);
552 pktp->freelist = p;
553 break;
554 }
555 }
556
557 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
558
559 pktp->avail--;
560 pktp->n_pkts--;
561
562 total_pool_pktid_count++;
563 PKTFREE(osh, p, pktp->istx); /* free the packet */
564
565 freed++;
566 free_cnt--;
567 }
568
569 if (rem_list_head) {
570 PKTSETFREELIST(rem_list_tail, pktp->freelist);
571 pktp->freelist = rem_list_head;
572 }
573
574 /* protect shared resource */
575 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
576 return freed;
577 }
578
579 return freed;
580 }
581 #endif /* #ifdef BCMPOOLRECLAIM */
582
583 /* New API to empty the pkts from pool, but not deinit
584 * NOTE: caller is responsible to ensure,
585 * all pkts are available in pool for free; else LEAK !
586 */
587 int
pktpool_empty(osl_t * osh,pktpool_t * pktp)588 pktpool_empty(osl_t *osh, pktpool_t *pktp)
589 {
590 uint16 freed = 0;
591
592 ASSERT(osh != NULL);
593 ASSERT(pktp != NULL);
594
595 /* protect shared resource */
596 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
597 return BCME_ERROR;
598
599 #ifdef BCMDBG_POOL
600 {
601 int i;
602 for (i = 0; i <= pktp->n_pkts; i++) {
603 pktp->dbg_q[i].p = NULL;
604 }
605 }
606 #endif
607
608 while (pktp->freelist != NULL) {
609 void * p = pktp->freelist;
610
611 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
612 PKTSETFREELIST(p, NULL);
613
614 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
615
616 total_pool_pktid_count++;
617 PKTFREE(osh, p, pktp->istx); /* free the packet */
618
619 freed++;
620 ASSERT(freed <= pktp->n_pkts);
621 }
622
623 pktp->avail -= freed;
624 ASSERT(pktp->avail == 0);
625
626 pktp->n_pkts -= freed;
627
628 ASSERT(pktp->n_pkts == 0);
629
630 /* protect shared resource */
631 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
632 return BCME_ERROR;
633
634 return 0;
635 }
636
637 int
BCMPOSTTRAPFN(pktpool_avail)638 BCMPOSTTRAPFN(pktpool_avail)(pktpool_t *pktpool)
639 {
640 int avail = pktpool->avail;
641
642 if (avail == 0) {
643 pktpool_emptycb_disable(pktpool, FALSE);
644 }
645
646 return avail;
647 }
648
649 static void *
BCMPOSTTRAPFASTPATH(pktpool_deq)650 BCMPOSTTRAPFASTPATH(pktpool_deq)(pktpool_t *pktp)
651 {
652 void *p = NULL;
653
654 if (pktp->avail == 0)
655 return NULL;
656
657 ASSERT_FP(pktp->freelist != NULL);
658
659 p = pktp->freelist; /* dequeue packet from head of pktpool free list */
660 pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
661
662 #if defined(DONGLEBUILD) && defined(SRMEM)
663 if (SRMEM_ENAB()) {
664 PKTSRMEM_INC_INUSE(p);
665 }
666 #endif /* DONGLEBUILD && SRMEM */
667
668 PKTSETFREELIST(p, NULL);
669
670 pktp->avail--;
671
672 return p;
673 }
674
675 static void
BCMPOSTTRAPFASTPATH(pktpool_enq)676 BCMPOSTTRAPFASTPATH(pktpool_enq)(pktpool_t *pktp, void *p)
677 {
678 ASSERT_FP(p != NULL);
679
680 PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
681 pktp->freelist = p; /* free list points to newly inserted packet */
682
683 #if defined(DONGLEBUILD) && defined(SRMEM)
684 if (SRMEM_ENAB()) {
685 PKTSRMEM_DEC_INUSE(p);
686 }
687 #endif /* DONGLEBUILD && SRMEM */
688
689 pktp->avail++;
690 ASSERT_FP(pktp->avail <= pktp->n_pkts);
691 }
692
693 /** utility for registering host addr fill function called from pciedev */
694 int
BCMATTACHFN(pktpool_hostaddr_fill_register)695 BCMATTACHFN(pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
696 {
697
698 ASSERT(cb != NULL);
699
700 ASSERT(pktp->cbext.cb == NULL);
701 pktp->cbext.cb = cb;
702 pktp->cbext.arg = arg;
703 return 0;
704 }
705
706 int
BCMATTACHFN(pktpool_rxcplid_fill_register)707 BCMATTACHFN(pktpool_rxcplid_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
708 {
709
710 ASSERT(cb != NULL);
711
712 if (pktp == NULL)
713 return BCME_ERROR;
714 ASSERT(pktp->rxcplidfn.cb == NULL);
715 pktp->rxcplidfn.cb = cb;
716 pktp->rxcplidfn.arg = arg;
717 return 0;
718 }
719
720 /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
721 void
pktpool_invoke_dmarxfill(pktpool_t * pktp)722 pktpool_invoke_dmarxfill(pktpool_t *pktp)
723 {
724 ASSERT(pktp->dmarxfill.cb);
725 ASSERT(pktp->dmarxfill.arg);
726
727 if (pktp->dmarxfill.cb)
728 pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
729 }
730
731 /** Registers callback functions for split rx mode */
732 int
BCMATTACHFN(pkpool_haddr_avail_register_cb)733 BCMATTACHFN(pkpool_haddr_avail_register_cb)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
734 {
735
736 ASSERT(cb != NULL);
737
738 pktp->dmarxfill.cb = cb;
739 pktp->dmarxfill.arg = arg;
740
741 return 0;
742 }
743
744 /**
745 * Registers callback functions.
746 * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
747 */
748 int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)749 pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
750 {
751 int err = 0;
752 int i;
753
754 /* protect shared resource */
755 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
756 return BCME_ERROR;
757
758 ASSERT(cb != NULL);
759
760 for (i = 0; i < pktp->cbcnt; i++) {
761 ASSERT(pktp->cbs[i].cb != NULL);
762 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
763 pktp->cbs[i].refcnt++;
764 goto done;
765 }
766 }
767
768 i = pktp->cbcnt;
769 if (i == PKTPOOL_CB_MAX_AVL) {
770 err = BCME_ERROR;
771 goto done;
772 }
773
774 ASSERT(pktp->cbs[i].cb == NULL);
775 pktp->cbs[i].cb = cb;
776 pktp->cbs[i].arg = arg;
777 pktp->cbs[i].refcnt++;
778 pktp->cbcnt++;
779
780 /* force enable empty callback */
781 pktpool_emptycb_disable(pktp, FALSE);
782 done:
783 /* protect shared resource */
784 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
785 return BCME_ERROR;
786
787 return err;
788 }
789
790 /* No BCMATTACHFN as it is used in a non-attach function */
791 int
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)792 pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
793 {
794 int err = 0;
795 int i, k;
796
797 /* protect shared resource */
798 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
799 return BCME_ERROR;
800 }
801
802 ASSERT(cb != NULL);
803
804 for (i = 0; i < pktp->cbcnt; i++) {
805 ASSERT(pktp->cbs[i].cb != NULL);
806 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
807 pktp->cbs[i].refcnt--;
808 if (pktp->cbs[i].refcnt) {
809 /* Still there are references to this callback */
810 goto done;
811 }
812 /* Moving any more callbacks to fill the hole */
813 for (k = i+1; k < pktp->cbcnt; i++, k++) {
814 pktp->cbs[i].cb = pktp->cbs[k].cb;
815 pktp->cbs[i].arg = pktp->cbs[k].arg;
816 pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
817 }
818
819 /* reset the last callback */
820 pktp->cbs[i].cb = NULL;
821 pktp->cbs[i].arg = NULL;
822 pktp->cbs[i].refcnt = 0;
823
824 pktp->cbcnt--;
825 goto done;
826 }
827 }
828
829 done:
830 /* protect shared resource */
831 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
832 return BCME_ERROR;
833 }
834
835 return err;
836 }
837
838 /** Registers callback functions */
839 int
BCMATTACHFN(pktpool_empty_register)840 BCMATTACHFN(pktpool_empty_register)(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
841 {
842 int err = 0;
843 int i;
844
845 /* protect shared resource */
846 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
847 return BCME_ERROR;
848
849 ASSERT(cb != NULL);
850
851 i = pktp->ecbcnt;
852 if (i == PKTPOOL_CB_MAX) {
853 err = BCME_ERROR;
854 goto done;
855 }
856
857 ASSERT(pktp->ecbs[i].cb == NULL);
858 pktp->ecbs[i].cb = cb;
859 pktp->ecbs[i].arg = arg;
860 pktp->ecbcnt++;
861
862 done:
863 /* protect shared resource */
864 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
865 return BCME_ERROR;
866
867 return err;
868 }
869
870 /** Calls registered callback functions */
871 static int
BCMPOSTTRAPFN(pktpool_empty_notify)872 BCMPOSTTRAPFN(pktpool_empty_notify)(pktpool_t *pktp)
873 {
874 int i;
875
876 pktp->empty = TRUE;
877 for (i = 0; i < pktp->ecbcnt; i++) {
878 ASSERT(pktp->ecbs[i].cb != NULL);
879 pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
880 }
881 pktp->empty = FALSE;
882
883 return 0;
884 }
885
886 #ifdef BCMDBG_POOL
887 int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)888 pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
889 {
890 int err = 0;
891 int i;
892
893 /* protect shared resource */
894 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
895 return BCME_ERROR;
896
897 ASSERT(cb);
898
899 i = pktp->dbg_cbcnt;
900 if (i == PKTPOOL_CB_MAX) {
901 err = BCME_ERROR;
902 goto done;
903 }
904
905 ASSERT(pktp->dbg_cbs[i].cb == NULL);
906 pktp->dbg_cbs[i].cb = cb;
907 pktp->dbg_cbs[i].arg = arg;
908 pktp->dbg_cbcnt++;
909
910 done:
911 /* protect shared resource */
912 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
913 return BCME_ERROR;
914
915 return err;
916 }
917
918 int pktpool_dbg_notify(pktpool_t *pktp);
919
920 int
pktpool_dbg_notify(pktpool_t * pktp)921 pktpool_dbg_notify(pktpool_t *pktp)
922 {
923 int i;
924
925 /* protect shared resource */
926 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
927 return BCME_ERROR;
928
929 for (i = 0; i < pktp->dbg_cbcnt; i++) {
930 ASSERT(pktp->dbg_cbs[i].cb);
931 pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
932 }
933
934 /* protect shared resource */
935 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
936 return BCME_ERROR;
937
938 return 0;
939 }
940
941 int
pktpool_dbg_dump(pktpool_t * pktp)942 pktpool_dbg_dump(pktpool_t *pktp)
943 {
944 int i;
945
946 /* protect shared resource */
947 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
948 return BCME_ERROR;
949
950 printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
951 for (i = 0; i < pktp->dbg_qlen; i++) {
952 ASSERT(pktp->dbg_q[i].p);
953 printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
954 pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
955 }
956
957 /* protect shared resource */
958 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
959 return BCME_ERROR;
960
961 return 0;
962 }
963
964 int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)965 pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
966 {
967 int i;
968 int state;
969
970 /* protect shared resource */
971 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
972 return BCME_ERROR;
973
974 bzero(stats, sizeof(pktpool_stats_t));
975 for (i = 0; i < pktp->dbg_qlen; i++) {
976 ASSERT(pktp->dbg_q[i].p != NULL);
977
978 state = PKTPOOLSTATE(pktp->dbg_q[i].p);
979 switch (state) {
980 case POOL_TXENQ:
981 stats->enq++; break;
982 case POOL_TXDH:
983 stats->txdh++; break;
984 case POOL_TXD11:
985 stats->txd11++; break;
986 case POOL_RXDH:
987 stats->rxdh++; break;
988 case POOL_RXD11:
989 stats->rxd11++; break;
990 case POOL_RXFILL:
991 stats->rxfill++; break;
992 case POOL_IDLE:
993 stats->idle++; break;
994 }
995 }
996
997 /* protect shared resource */
998 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
999 return BCME_ERROR;
1000
1001 return 0;
1002 }
1003
1004 int
pktpool_start_trigger(pktpool_t * pktp,void * p)1005 pktpool_start_trigger(pktpool_t *pktp, void *p)
1006 {
1007 uint32 cycles, i;
1008
1009 /* protect shared resource */
1010 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1011 return BCME_ERROR;
1012
1013 if (!PKTPOOL(OSH_NULL, p))
1014 goto done;
1015
1016 OSL_GETCYCLES(cycles);
1017
1018 for (i = 0; i < pktp->dbg_qlen; i++) {
1019 ASSERT(pktp->dbg_q[i].p != NULL);
1020
1021 if (pktp->dbg_q[i].p == p) {
1022 pktp->dbg_q[i].cycles = cycles;
1023 break;
1024 }
1025 }
1026
1027 done:
1028 /* protect shared resource */
1029 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1030 return BCME_ERROR;
1031
1032 return 0;
1033 }
1034
1035 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
1036
1037 int
pktpool_stop_trigger(pktpool_t * pktp,void * p)1038 pktpool_stop_trigger(pktpool_t *pktp, void *p)
1039 {
1040 uint32 cycles, i;
1041
1042 /* protect shared resource */
1043 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1044 return BCME_ERROR;
1045
1046 if (!PKTPOOL(OSH_NULL, p))
1047 goto done;
1048
1049 OSL_GETCYCLES(cycles);
1050
1051 for (i = 0; i < pktp->dbg_qlen; i++) {
1052 ASSERT(pktp->dbg_q[i].p != NULL);
1053
1054 if (pktp->dbg_q[i].p == p) {
1055 if (pktp->dbg_q[i].cycles == 0)
1056 break;
1057
1058 if (cycles >= pktp->dbg_q[i].cycles)
1059 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
1060 else
1061 pktp->dbg_q[i].dur =
1062 (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
1063
1064 pktp->dbg_q[i].cycles = 0;
1065 break;
1066 }
1067 }
1068
1069 done:
1070 /* protect shared resource */
1071 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1072 return BCME_ERROR;
1073
1074 return 0;
1075 }
1076 #endif /* BCMDBG_POOL */
1077
1078 int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)1079 pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
1080 {
1081 BCM_REFERENCE(osh);
1082 ASSERT(pktp);
1083
1084 /* protect shared resource */
1085 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1086 return BCME_ERROR;
1087
1088 pktp->availcb_excl = NULL;
1089
1090 /* protect shared resource */
1091 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1092 return BCME_ERROR;
1093
1094 return 0;
1095 }
1096
1097 int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)1098 pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
1099 {
1100 int i;
1101 int err;
1102 BCM_REFERENCE(osh);
1103
1104 ASSERT(pktp);
1105
1106 /* protect shared resource */
1107 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1108 return BCME_ERROR;
1109
1110 ASSERT(pktp->availcb_excl == NULL);
1111 for (i = 0; i < pktp->cbcnt; i++) {
1112 if (cb == pktp->cbs[i].cb) {
1113 pktp->availcb_excl = &pktp->cbs[i];
1114 break;
1115 }
1116 }
1117
1118 if (pktp->availcb_excl == NULL)
1119 err = BCME_ERROR;
1120 else
1121 err = 0;
1122
1123 /* protect shared resource */
1124 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1125 return BCME_ERROR;
1126
1127 return err;
1128 }
1129
1130 static void
BCMPOSTTRAPFN(pktpool_avail_notify)1131 BCMPOSTTRAPFN(pktpool_avail_notify)(pktpool_t *pktp)
1132 {
1133 int i, k, idx;
1134
1135 ASSERT(pktp);
1136 pktpool_emptycb_disable(pktp, TRUE);
1137
1138 if (pktp->availcb_excl != NULL) {
1139 pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1140 return;
1141 }
1142
1143 k = pktp->cbcnt - 1;
1144 for (i = 0; i < pktp->cbcnt; i++) {
1145 /* callbacks are getting disabled at this func entry.
1146 * For the case of avail is say 5, and first callback
1147 * consumes exactly 5 due to dma rxpost setting, then
1148 * further callbacks will not getting notified if avail check
1149 * is present.
1150 * so calling all cbs even if pktp->avail is zero, so that
1151 * cbs get oppurtunity to enable callbacks if their
1152 * operation is in progress / not completed.
1153 */
1154 if (pktp->cbtoggle)
1155 idx = i;
1156 else
1157 idx = k--;
1158
1159 ASSERT(pktp->cbs[idx].cb != NULL);
1160 pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1161 }
1162
1163 /* Alternate between filling from head or tail
1164 */
1165 pktp->cbtoggle ^= 1;
1166
1167 return;
1168 }
1169
1170 #ifdef APP_RX
1171 /* Update freelist and avail count for a given packet pool */
1172 void
BCMFASTPATH(pktpool_update_freelist)1173 BCMFASTPATH(pktpool_update_freelist)(pktpool_t *pktp, void *p, uint pkts_consumed)
1174 {
1175 ASSERT_FP(pktp->avail >= pkts_consumed);
1176
1177 pktp->freelist = p;
1178 pktp->avail -= pkts_consumed;
1179 }
1180 #endif /* APP_RX */
1181
1182 /** Gets an empty packet from the caller provided pool */
1183 void *
BCMPOSTTRAPFASTPATH(pktpool_get_ext)1184 BCMPOSTTRAPFASTPATH(pktpool_get_ext)(pktpool_t *pktp, uint8 type, uint *pktcnt)
1185 {
1186 void *p = NULL;
1187 uint pkts_requested = 1;
1188 #if defined(DONGLEBUILD)
1189 uint pkts_avail;
1190 bool rxcpl = (pktp->rxcplidfn.cb != NULL) ? TRUE : FALSE;
1191 #endif /* DONGLEBUILD */
1192
1193 if (pktcnt) {
1194 pkts_requested = *pktcnt;
1195 if (pkts_requested == 0) {
1196 goto done;
1197 }
1198 }
1199
1200 #if defined(DONGLEBUILD)
1201 pkts_avail = pkts_requested;
1202 #endif /* DONGLEBUILD */
1203
1204 /* protect shared resource */
1205 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1206 return NULL;
1207
1208 /* If there are lesser packets in the pool than requested, call
1209 * pktpool_empty_notify() to reclaim more pkts.
1210 */
1211 if (pktp->avail < pkts_requested) {
1212 /* Notify and try to reclaim tx pkts */
1213 if (pktp->ecbcnt) {
1214 pktpool_empty_notify(pktp);
1215 }
1216
1217 if (pktp->avail < pkts_requested) {
1218 pktpool_emptycb_disable(pktp, FALSE);
1219 if (pktp->avail == 0) {
1220 goto done;
1221 }
1222 }
1223 }
1224
1225 #ifdef APP_RX
1226 if (pktcnt) {
1227 p = pktp->freelist;
1228 if (pktp->avail < pkts_requested) {
1229 pkts_avail = pktp->avail;
1230 }
1231
1232 /* For rx frags in APP, we need to return only the head of freelist and
1233 * the caller operates on it and updates the avail count and freelist pointer
1234 * using pktpool_update_freelist().
1235 */
1236 if (BCMSPLITRX_ENAB() && ((type == lbuf_rxfrag) || (type == lbuf_rxdata))) {
1237 *pktcnt = pkts_avail;
1238 goto done;
1239 }
1240 } else
1241 #endif /* APP_RX */
1242 {
1243 ASSERT_FP(pkts_requested == 1);
1244 p = pktpool_deq(pktp);
1245 }
1246
1247 ASSERT_FP(p);
1248
1249 #if defined(DONGLEBUILD)
1250 #ifndef APP_RX
1251 if (BCMSPLITRX_ENAB() && (type == lbuf_rxfrag)) {
1252 /* If pool is shared rx pool, use call back fn to populate host address.
1253 * In case of APP, callback may use lesser number of packets than what
1254 * we have given to callback because of some resource crunch and the exact
1255 * number of packets that are used by the callback are returned using
1256 * (*pktcnt) and the pktpool freelist head is updated accordingly.
1257 */
1258 ASSERT_FP(pktp->cbext.cb != NULL);
1259 if (pktp->cbext.cb(pktp, pktp->cbext.arg, p, rxcpl, &pkts_avail)) {
1260 pktpool_enq(pktp, p);
1261 p = NULL;
1262 }
1263 }
1264 #endif /* APP_RX */
1265
1266 if ((type == lbuf_basic) && rxcpl) {
1267 /* If pool is shared rx pool, use call back fn to populate Rx cpl ID */
1268 ASSERT_FP(pktp->rxcplidfn.cb != NULL);
1269 /* If rxcplblock is allocated */
1270 if (pktp->rxcplidfn.cb(pktp, pktp->rxcplidfn.arg, p, TRUE, NULL)) {
1271 pktpool_enq(pktp, p);
1272 p = NULL;
1273 }
1274 }
1275 #endif /* _DONGLEBUILD_ */
1276
1277 done:
1278 if ((pktp->avail == 0) && (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1279 pktp->emptycb_disable = EMPTYCB_DISABLED;
1280 }
1281 /* protect shared resource */
1282 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1283 return NULL;
1284
1285 return p;
1286 }
1287
1288 void
BCMFASTPATH(pktpool_nfree)1289 BCMFASTPATH(pktpool_nfree)(pktpool_t *pktp, void *head, void *tail, uint count)
1290 {
1291 #ifdef BCMRXDATAPOOL
1292 void *_head = head;
1293 #endif /* BCMRXDATAPOOL */
1294
1295 if (count > 1) {
1296 pktp->avail += (count - 1);
1297
1298 #ifdef BCMRXDATAPOOL
1299 while (--count) {
1300 _head = PKTLINK(_head);
1301 ASSERT_FP(_head);
1302 pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, _head));
1303 }
1304 #endif /* BCMRXDATAPOOL */
1305
1306 PKTSETFREELIST(tail, pktp->freelist);
1307 pktp->freelist = PKTLINK(head);
1308 PKTSETLINK(head, NULL);
1309 }
1310 pktpool_free(pktp, head);
1311 }
1312
1313 void
BCMPOSTTRAPFASTPATH(pktpool_free)1314 BCMPOSTTRAPFASTPATH(pktpool_free)(pktpool_t *pktp, void *p)
1315 {
1316 /* protect shared resource */
1317 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1318 return;
1319
1320 ASSERT_FP(p != NULL);
1321 #ifdef BCMDBG_POOL
1322 /* pktpool_stop_trigger(pktp, p); */
1323 #endif
1324
1325 #ifdef BCMRXDATAPOOL
1326 /* Free rx data buffer to rx data buffer pool */
1327 if (PKT_IS_RX_PKT(OSH_NULL, p)) {
1328 pktpool_t *_pktp = pktpool_shared_rxdata;
1329 if (PKTISRXFRAG(OSH_NULL, p)) {
1330 _pktp->cbext.cb(_pktp, _pktp->cbext.arg, p, REMOVE_RXCPLID, NULL);
1331 PKTRESETRXFRAG(OSH_NULL, p);
1332 }
1333 pktpool_enq(pktpool_shared_rxdata, PKTDATA(OSH_NULL, p));
1334 }
1335 #endif /* BCMRXDATAPOOL */
1336
1337 pktpool_enq(pktp, p);
1338
1339 /**
1340 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1341 * If any avail callback functions are registered, send a notification
1342 * that a new packet is available in the pool.
1343 */
1344 if (pktp->cbcnt) {
1345 /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1346 * This allows to feed on burst basis as opposed to inefficient per-packet basis.
1347 */
1348 if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1349 /**
1350 * If the call originated from pktpool_empty_notify, the just freed packet
1351 * is needed in pktpool_get.
1352 * Therefore don't call pktpool_avail_notify.
1353 */
1354 if (pktp->empty == FALSE)
1355 pktpool_avail_notify(pktp);
1356 } else {
1357 /**
1358 * The callback is temporarily disabled, log that a packet has been freed.
1359 */
1360 pktp->emptycb_disable = EMPTYCB_SKIPPED;
1361 }
1362 }
1363
1364 /* protect shared resource */
1365 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1366 return;
1367 }
1368
1369 /** Adds a caller provided (empty) packet to the caller provided pool */
1370 int
pktpool_add(pktpool_t * pktp,void * p)1371 pktpool_add(pktpool_t *pktp, void *p)
1372 {
1373 int err = 0;
1374
1375 /* protect shared resource */
1376 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1377 return BCME_ERROR;
1378
1379 ASSERT(p != NULL);
1380
1381 if (pktp->n_pkts == pktp->maxlen) {
1382 err = BCME_RANGE;
1383 goto done;
1384 }
1385
1386 /* pkts in pool have same length */
1387 ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1388 PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1389
1390 pktp->n_pkts++;
1391 pktpool_enq(pktp, p);
1392
1393 #ifdef BCMDBG_POOL
1394 pktp->dbg_q[pktp->dbg_qlen++].p = p;
1395 #endif
1396
1397 done:
1398 /* protect shared resource */
1399 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1400 return BCME_ERROR;
1401
1402 return err;
1403 }
1404
1405 /**
1406 * Force pktpool_setmaxlen () into RAM as it uses a constant
1407 * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1408 */
1409 int
BCMRAMFN(pktpool_setmaxlen)1410 BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1411 {
1412 /* protect shared resource */
1413 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1414 return BCME_ERROR;
1415
1416 if (maxlen > PKTPOOL_LEN_MAX)
1417 maxlen = PKTPOOL_LEN_MAX;
1418
1419 /* if pool is already beyond maxlen, then just cap it
1420 * since we currently do not reduce the pool len
1421 * already allocated
1422 */
1423 pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1424
1425 /* protect shared resource */
1426 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1427 return BCME_ERROR;
1428
1429 return pktp->maxlen;
1430 }
1431
1432 void
BCMPOSTTRAPFN(pktpool_emptycb_disable)1433 BCMPOSTTRAPFN(pktpool_emptycb_disable)(pktpool_t *pktp, bool disable)
1434 {
1435 bool notify = FALSE;
1436 ASSERT(pktp);
1437
1438 /**
1439 * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1440 * If callback is going to be re-enabled, check if any packet got
1441 * freed and added back to the pool while callback was disabled.
1442 * When this is the case do the callback now, provided that callback functions
1443 * are registered and this call did not originate from pktpool_empty_notify.
1444 */
1445 if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1446 (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1447 notify = TRUE;
1448 }
1449
1450 /* Enable or temporarily disable callback when packet becomes available. */
1451 if (disable) {
1452 if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1453 /* mark disabled only if enabled.
1454 * if state is EMPTYCB_SKIPPED, it means already
1455 * disabled and some pkts are freed. So don't lose the state
1456 * of skipped to ensure calling pktpool_avail_notify().
1457 */
1458 pktp->emptycb_disable = EMPTYCB_DISABLED;
1459 }
1460 } else {
1461 pktp->emptycb_disable = EMPTYCB_ENABLED;
1462 }
1463 if (notify) {
1464 /* pktpool_emptycb_disable() is called from pktpool_avail_notify() and
1465 * pktp->cbs. To have the result of most recent call, notify after
1466 * emptycb_disable is modified.
1467 * This change also prevents any recursive calls of pktpool_avail_notify()
1468 * from pktp->cbs if pktpool_emptycb_disable() is called from them.
1469 */
1470 pktpool_avail_notify(pktp);
1471 }
1472 }
1473
1474 bool
pktpool_emptycb_disabled(pktpool_t * pktp)1475 pktpool_emptycb_disabled(pktpool_t *pktp)
1476 {
1477 ASSERT(pktp);
1478 return pktp->emptycb_disable != EMPTYCB_ENABLED;
1479 }
1480
1481 #ifdef BCMPKTPOOL
1482 #include <hnd_lbuf.h>
1483
1484 pktpool_t *pktpool_shared = NULL;
1485
1486 #ifdef BCMFRAGPOOL
1487 pktpool_t *pktpool_shared_lfrag = NULL;
1488 #ifdef BCMRESVFRAGPOOL
1489 pktpool_t *pktpool_resv_lfrag = NULL;
1490 struct resv_info *resv_pool_info = NULL;
1491 #endif /* BCMRESVFRAGPOOL */
1492 #endif /* BCMFRAGPOOL */
1493
1494 #ifdef BCMALFRAGPOOL
1495 pktpool_t *pktpool_shared_alfrag = NULL;
1496 pktpool_t *pktpool_shared_alfrag_data = NULL;
1497 #endif /* BCMCTFRAGPOOL */
1498
1499 pktpool_t *pktpool_shared_rxlfrag = NULL;
1500
1501 /* Rx data pool w/o rxfrag structure */
1502 pktpool_t *pktpool_shared_rxdata = NULL;
1503
1504 static osl_t *pktpool_osh = NULL;
1505
1506 /**
1507 * Initializes several packet pools and allocates packets within those pools.
1508 */
1509 int
BCMATTACHFN(hnd_pktpool_init)1510 BCMATTACHFN(hnd_pktpool_init)(osl_t *osh)
1511 {
1512 int err = BCME_OK;
1513 int n, pktsz;
1514 bool is_heap_pool;
1515
1516 BCM_REFERENCE(pktsz);
1517 BCM_REFERENCE(is_heap_pool);
1518
1519 /* Construct a packet pool registry before initializing packet pools */
1520 n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1521 if (n != PKTPOOL_MAXIMUM_ID) {
1522 ASSERT(0);
1523 err = BCME_ERROR;
1524 goto error;
1525 }
1526
1527 pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1528 if (pktpool_shared == NULL) {
1529 ASSERT(0);
1530 err = BCME_NOMEM;
1531 goto error;
1532 }
1533
1534 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1535 pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1536 if (pktpool_shared_lfrag == NULL) {
1537 ASSERT(0);
1538 err = BCME_NOMEM;
1539 goto error;
1540 }
1541
1542 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1543 resv_pool_info = hnd_resv_pool_alloc(osh);
1544 if (resv_pool_info == NULL) {
1545 err = BCME_NOMEM;
1546 ASSERT(0);
1547 goto error;
1548 }
1549 pktpool_resv_lfrag = resv_pool_info->pktp;
1550 if (pktpool_resv_lfrag == NULL) {
1551 err = BCME_ERROR;
1552 ASSERT(0);
1553 goto error;
1554 }
1555 #endif /* RESVFRAGPOOL */
1556 #endif /* FRAGPOOL */
1557
1558 #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1559 pktpool_shared_alfrag = MALLOCZ(osh, sizeof(pktpool_t));
1560 if (pktpool_shared_alfrag == NULL) {
1561 ASSERT(0);
1562 err = BCME_NOMEM;
1563 goto error;
1564 }
1565
1566 pktpool_shared_alfrag_data = MALLOCZ(osh, sizeof(pktpool_t));
1567 if (pktpool_shared_alfrag_data == NULL) {
1568 ASSERT(0);
1569 err = BCME_NOMEM;
1570 goto error;
1571 }
1572 #endif /* BCMCTFRAGPOOL */
1573
1574 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1575 pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1576 if (pktpool_shared_rxlfrag == NULL) {
1577 ASSERT(0);
1578 err = BCME_NOMEM;
1579 goto error;
1580 }
1581 #endif
1582
1583 #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLE)
1584 pktpool_shared_rxdata = MALLOCZ(osh, sizeof(pktpool_t));
1585 if (pktpool_shared_rxdata == NULL) {
1586 ASSERT(0);
1587 err = BCME_NOMEM;
1588 goto error;
1589 }
1590 #endif
1591
1592 /*
1593 * At this early stage, there's not enough memory to allocate all
1594 * requested pkts in the shared pool. Need to add to the pool
1595 * after reclaim
1596 *
1597 * n = NRXBUFPOST + SDPCMD_RXBUFS;
1598 *
1599 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1600 * registry is not initialized or the registry is depleted.
1601 *
1602 * A BCME_NOMEM error only indicates that the requested number of packets
1603 * were not filled into the pool.
1604 */
1605 n = 1;
1606 MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
1607 if ((err = pktpool_init(osh, pktpool_shared,
1608 &n, PKTBUFSZ, FALSE, lbuf_basic, FALSE, 0, 0)) != BCME_OK) {
1609 ASSERT(0);
1610 goto error;
1611 }
1612 pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1613
1614 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1615 n = 1;
1616 #if (((defined(EVENTLOG_D3_PRESERVE) && !defined(EVENTLOG_D3_PRESERVE_DISABLED)) || \
1617 defined(BCMPOOLRECLAIM)))
1618 is_heap_pool = TRUE;
1619 #else
1620 is_heap_pool = FALSE;
1621 #endif /* (( EVENTLOG_D3_PRESERVE && !EVENTLOG_D3_PRESERVE_DISABLED) || BCMPOOLRECLAIM) */
1622
1623 if ((err = pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
1624 is_heap_pool, POOL_HEAP_FLAG_D3, SHARED_FRAG_POOL_LEN >> 3)) !=
1625 BCME_OK) {
1626 ASSERT(0);
1627 goto error;
1628 }
1629 pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1630
1631 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1632 n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1633 #ifdef RESV_POOL_HEAP
1634 is_heap_pool = TRUE;
1635 #else
1636 is_heap_pool = FALSE;
1637 #endif /* RESV_POOL_HEAP */
1638
1639 if ((err = pktpool_init(osh, pktpool_resv_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag,
1640 is_heap_pool, POOL_HEAP_FLAG_RSRVPOOL, 0)) != BCME_OK) {
1641 ASSERT(0);
1642 goto error;
1643 }
1644 pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1645 #endif /* RESVFRAGPOOL */
1646 #endif /* BCMFRAGPOOL */
1647
1648 #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1649 n = 1;
1650 is_heap_pool = FALSE;
1651
1652 if ((err = pktpool_init(osh, pktpool_shared_alfrag, &n, PKTFRAGSZ, TRUE, lbuf_alfrag,
1653 is_heap_pool, 0, SHARED_ALFRAG_POOL_LEN >> 3)) != BCME_OK) {
1654 ASSERT(0);
1655 goto error;
1656 }
1657 pktpool_setmaxlen(pktpool_shared_alfrag, SHARED_ALFRAG_POOL_LEN);
1658
1659 n = 0;
1660 if ((err = pktpool_init(osh, pktpool_shared_alfrag_data, &n, TXPKTALFRAG_DATA_BUFSZ, TRUE,
1661 lbuf_alfrag_data, FALSE, 0, SHARED_ALFRAG_DATA_POOL_LEN >> 3)) != BCME_OK) {
1662 ASSERT(0);
1663 goto error;
1664 }
1665 pktpool_setmaxlen(pktpool_shared_alfrag_data, SHARED_ALFRAG_DATA_POOL_LEN);
1666
1667 #endif /* BCMCTFRAGPOOL */
1668
1669 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1670 #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
1671 n = 1;
1672 if ((err = pktpool_init(osh, pktpool_shared_rxdata, &n, RXPKTFRAGDATASZ, TRUE, lbuf_rxdata,
1673 FALSE, 0, 0)) != BCME_OK) {
1674 ASSERT(0);
1675 goto error;
1676 }
1677 pktpool_setmaxlen(pktpool_shared_rxdata, SHARED_RXDATA_POOL_LEN);
1678
1679 pktsz = 0;
1680 #else
1681 pktsz = RXPKTFRAGDATASZ;
1682 #endif /* defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED) */
1683
1684 #ifdef RESV_POOL_HEAP
1685 is_heap_pool = BCMPOOLRECLAIM_ENAB() ? TRUE : FALSE;
1686 #else
1687 is_heap_pool = FALSE;
1688 #endif /* RESV_POOL_HEAP */
1689
1690 n = 1;
1691 if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, &n, pktsz, TRUE, lbuf_rxfrag,
1692 is_heap_pool, POOL_HEAP_FLAG_D3, 0)) != BCME_OK) {
1693 ASSERT(0);
1694 goto error;
1695 }
1696
1697 pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1698 #endif /* defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED) */
1699
1700 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1701 /* Attach poolreorg module */
1702 if ((frwd_poolreorg_info = poolreorg_attach(osh,
1703 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1704 pktpool_shared_lfrag,
1705 #else
1706 NULL,
1707 #endif /* defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED) */
1708 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1709 pktpool_shared_rxlfrag,
1710 #else
1711 NULL,
1712 #endif /* BCMRXFRAGPOOL */
1713 pktpool_shared)) == NULL) {
1714 ASSERT(0);
1715 err = BCME_NOMEM;
1716 goto error;
1717 }
1718 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1719
1720 pktpool_osh = osh;
1721 MALLOC_CLEAR_NOPERSIST(osh);
1722
1723 #ifdef POOL_HEAP_RECONFIG
1724 lbuf_free_cb_set(hnd_pktpool_lbuf_free_cb);
1725 #endif
1726
1727 return BCME_OK;
1728
1729 error:
1730 hnd_pktpool_deinit(osh);
1731
1732 return err;
1733 } /* hnd_pktpool_init */
1734
1735 void
BCMATTACHFN(hnd_pktpool_deinit)1736 BCMATTACHFN(hnd_pktpool_deinit)(osl_t *osh)
1737 {
1738 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1739 if (frwd_poolreorg_info != NULL) {
1740 poolreorg_detach(frwd_poolreorg_info);
1741 }
1742 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1743
1744 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1745 if (pktpool_shared_rxlfrag != NULL) {
1746 if (pktpool_shared_rxlfrag->inited) {
1747 pktpool_deinit(osh, pktpool_shared_rxlfrag);
1748 }
1749
1750 hnd_free(pktpool_shared_rxlfrag);
1751 pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1752 }
1753 #endif
1754
1755 #if defined(BCMRXDATAPOOL) && !defined(BCMRXDATAPOOL_DISABLED)
1756 if (pktpool_shared_rxdata != NULL) {
1757 if (pktpool_shared_rxdata->inited) {
1758 pktpool_deinit(osh, pktpool_shared_rxdata);
1759 }
1760
1761 hnd_free(pktpool_shared_rxdata);
1762 pktpool_shared_rxdata = (pktpool_t *)NULL;
1763 }
1764 #endif
1765
1766 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1767 if (pktpool_shared_lfrag != NULL) {
1768 if (pktpool_shared_lfrag->inited) {
1769 pktpool_deinit(osh, pktpool_shared_lfrag);
1770 }
1771 hnd_free(pktpool_shared_lfrag);
1772 pktpool_shared_lfrag = (pktpool_t *)NULL;
1773 }
1774 #endif /* BCMFRAGPOOL */
1775
1776 #if defined(BCMALFRAGPOOL) && !defined(BCMALFRAGPOOL_DISABLED)
1777 if (pktpool_shared_alfrag != NULL) {
1778 if (pktpool_shared_alfrag->inited) {
1779 pktpool_deinit(osh, pktpool_shared_alfrag);
1780 }
1781 hnd_free(pktpool_shared_alfrag);
1782 pktpool_shared_alfrag = (pktpool_t *)NULL;
1783 }
1784
1785 if (pktpool_shared_alfrag_data != NULL) {
1786 if (pktpool_shared_alfrag_data->inited) {
1787 pktpool_deinit(osh, pktpool_shared_alfrag_data);
1788 }
1789
1790 hnd_free(pktpool_shared_alfrag_data);
1791 pktpool_shared_alfrag_data = (pktpool_t *)NULL;
1792 }
1793 #endif /* BCMFRAGPOOL */
1794
1795 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1796 if (resv_pool_info != NULL) {
1797 if (pktpool_resv_lfrag != NULL) {
1798 pktpool_resv_lfrag = NULL;
1799 }
1800 hnd_free(resv_pool_info);
1801 }
1802 #endif /* RESVFRAGPOOL */
1803
1804 if (pktpool_shared != NULL) {
1805 if (pktpool_shared->inited) {
1806 pktpool_deinit(osh, pktpool_shared);
1807 }
1808
1809 hnd_free(pktpool_shared);
1810 pktpool_shared = (pktpool_t *)NULL;
1811 }
1812
1813 pktpool_dettach(osh);
1814
1815 MALLOC_CLEAR_NOPERSIST(osh);
1816 }
1817
1818 /** is called at each 'wl up' */
1819 int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1820 hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1821 {
1822 return (pktpool_fill(pktpool_osh, pktpool, minimal));
1823 }
1824
1825 /** refills pktpools after reclaim, is called once */
1826 void
hnd_pktpool_refill(bool minimal)1827 hnd_pktpool_refill(bool minimal)
1828 {
1829 if (POOL_ENAB(pktpool_shared)) {
1830 #if defined(SRMEM)
1831 if (SRMEM_ENAB()) {
1832 int maxlen = pktpool_max_pkts(pktpool_shared);
1833 int n_pkts = pktpool_tot_pkts(pktpool_shared);
1834
1835 for (; n_pkts < maxlen; n_pkts++) {
1836 void *p;
1837 if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
1838 break;
1839 pktpool_add(pktpool_shared, p);
1840 }
1841 }
1842 #endif /* SRMEM */
1843 pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1844 }
1845 /* fragpool reclaim */
1846 #ifdef BCMFRAGPOOL
1847 if (POOL_ENAB(pktpool_shared_lfrag)) {
1848 pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1849 }
1850 #endif /* BCMFRAGPOOL */
1851
1852 /* alfragpool reclaim */
1853 #ifdef BCMALFRAGPOOL
1854 if (POOL_ENAB(pktpool_shared_alfrag)) {
1855 pktpool_fill(pktpool_osh, pktpool_shared_alfrag, minimal);
1856 }
1857
1858 if (POOL_ENAB(pktpool_shared_alfrag_data)) {
1859 pktpool_fill(pktpool_osh, pktpool_shared_alfrag_data, minimal);
1860 }
1861 #endif /* BCMALFRAGPOOL */
1862
1863 /* rx fragpool reclaim */
1864 #ifdef BCMRXFRAGPOOL
1865 if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1866 pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1867 }
1868 #endif
1869
1870 #ifdef BCMRXDATAPOOL
1871 if (POOL_ENAB(pktpool_shared_rxdata)) {
1872 pktpool_fill(pktpool_osh, pktpool_shared_rxdata, minimal);
1873 }
1874 #endif
1875
1876 #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1877 if (POOL_ENAB(pktpool_resv_lfrag)) {
1878 int resv_size = (pktpool_resv_lfrag->max_pkt_bytes + LBUFFRAGSZ) *
1879 pktpool_resv_lfrag->maxlen;
1880 hnd_resv_pool_init(resv_pool_info, resv_size);
1881 hnd_resv_pool_enable(resv_pool_info);
1882 }
1883 #endif /* BCMRESVFRAGPOOL */
1884 }
1885
1886 #ifdef POOL_HEAP_RECONFIG
1887 #define hnd_pktpool_release_active_set(pktp) (pktpool_heap_rel_active |= (1 << pktp->id))
1888 #define hnd_pktpool_release_active_reset(pktp) (pktpool_heap_rel_active &= ~(1 << pktp->id))
1889 /* Function enable/disable heap pool usage */
1890
1891 void
hnd_pktpool_heap_handle(osl_t * osh,uint32 flag,bool enable)1892 hnd_pktpool_heap_handle(osl_t *osh, uint32 flag, bool enable)
1893 {
1894 int i = 0;
1895 pktpool_t *pktp;
1896 /*
1897 * Loop through all the registerd pktpools.
1898 * Trigger retreave of pkts from the heap back to pool if no
1899 * flags are active.
1900 */
1901 for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
1902 if ((pktp = get_pktpools_registry(i)) != NULL) {
1903 if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
1904 if (enable) {
1905 hnd_pktpool_heap_pkt_release(pktpool_osh, pktp, flag);
1906 } else {
1907 hnd_pktpool_heap_pkt_retrieve(pktp, flag);
1908 }
1909 }
1910 }
1911 }
1912 }
1913
1914 /* Do memory allocation from pool heap memory */
1915 void *
hnd_pktpool_freelist_alloc(uint size,uint alignbits,uint32 flag)1916 hnd_pktpool_freelist_alloc(uint size, uint alignbits, uint32 flag)
1917 {
1918 int i = 0;
1919 pktpool_t *pktp;
1920 void *p = NULL;
1921 for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
1922 if ((pktp = get_pktpools_registry(i)) != NULL) {
1923 if ((flag == pktp->poolheap_flag) && pktp->is_heap_pool) {
1924 p = rte_freelist_mgr_alloc(size, alignbits, pktp->mem_handle);
1925 if (p)
1926 break;
1927 }
1928 }
1929 }
1930 return p;
1931 }
1932
1933 /* Release pkts from pool to free heap */
1934 static void
hnd_pktpool_heap_pkt_release(osl_t * osh,pktpool_t * pktp,uint32 flag)1935 hnd_pktpool_heap_pkt_release(osl_t *osh, pktpool_t *pktp, uint32 flag)
1936 {
1937 pktpool_cb_extn_t cb = NULL;
1938 void *arg = NULL;
1939 int i = 0;
1940 pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
1941
1942 pktp->release_active = FALSE;
1943 hnd_pktpool_release_active_reset(pktp);
1944
1945 if (pktp->n_pkts <= pktp->min_backup_buf)
1946 return;
1947 /* call module specific callbacks */
1948 if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
1949 /* If pool is shared rx frag pool, use call back fn to reclaim host address
1950 * and Rx cpl ID associated with the pkt.
1951 */
1952 ASSERT(pktp->cbext.cb != NULL);
1953 cb = pktp->cbext.cb;
1954 arg = pktp->cbext.arg;
1955 } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
1956 /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
1957 * associated with the pkt.
1958 */
1959 cb = pktp->rxcplidfn.cb;
1960 arg = pktp->rxcplidfn.arg;
1961 }
1962
1963 while (pktp->avail > pktp->min_backup_buf) {
1964 void * p = pktp->freelist;
1965
1966 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
1967 PKTSETFREELIST(p, NULL);
1968
1969 if (cb != NULL) {
1970 if (cb(pktp, arg, p, REMOVE_RXCPLID, NULL)) {
1971 PKTSETFREELIST(p, pktp->freelist);
1972 pktp->freelist = p;
1973 break;
1974 }
1975 }
1976
1977 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
1978
1979 lb_set_nofree(p);
1980 total_pool_pktid_count++;
1981 PKTFREE(osh, p, pktp->istx); /* free the packet */
1982
1983 rte_freelist_mgr_add(p, pktp->mem_handle);
1984 pktp->avail--;
1985 pktp->n_pkts--;
1986 pktp->poolheap_count++;
1987 }
1988
1989 /* Execute call back for upper layer which used pkt from heap */
1990 for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
1991 if ((pktp_heap_cb[i].fn != NULL) &&
1992 (flag == pktp_heap_cb[i].flag))
1993 (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, TRUE);
1994 }
1995
1996 }
1997
1998 static pktpool_heap_cb_reg_t *
BCMRAMFN(hnd_pool_get_cb_registry)1999 BCMRAMFN(hnd_pool_get_cb_registry)(void)
2000 {
2001 return pktpool_heap_cb_reg;
2002 }
2003
2004 static void
BCMFASTPATH(hnd_pktpool_lbuf_free_cb)2005 BCMFASTPATH(hnd_pktpool_lbuf_free_cb)(uint8 poolid)
2006 {
2007 int i = 0;
2008 pktpool_t *pktp;
2009
2010 if (poolid == PKTPOOL_INVALID_ID && pktpool_heap_rel_active) {
2011 for (i = 1; i < PKTPOOL_MAXIMUM_ID; i++) {
2012 if ((pktp = get_pktpools_registry(i)) != NULL) {
2013 if (pktp->is_heap_pool && (pktp->release_active)) {
2014 rte_freelist_mgr_release(pktp->mem_handle);
2015 }
2016 }
2017 }
2018 }
2019 }
2020
2021 /* Take back pkts from free mem and refill pool */
2022 static void
hnd_pktpool_heap_pkt_retrieve(pktpool_t * pktp,uint32 flag)2023 hnd_pktpool_heap_pkt_retrieve(pktpool_t *pktp, uint32 flag)
2024 {
2025 int i = 0;
2026 pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2027 pktp->release_active = TRUE;
2028 hnd_pktpool_release_active_set(pktp);
2029
2030 /* Execute call back for upper layer which used pkt from heap */
2031 for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2032 if ((pktp_heap_cb[i].fn != NULL) &&
2033 (flag == pktp_heap_cb[i].flag))
2034 (pktp_heap_cb[i].fn)(pktp_heap_cb[i].ctxt, FALSE);
2035 }
2036
2037 rte_freelist_mgr_release(pktp->mem_handle);
2038 }
2039
2040 /* Function to add back the pkt to pktpool */
2041 static int
hnd_pktpool_heap_get_cb(uint8 handle,void * ctxt,void * pkt,uint pktsize)2042 hnd_pktpool_heap_get_cb(uint8 handle, void *ctxt, void *pkt, uint pktsize)
2043 {
2044 pktpool_t *pktp = (pktpool_t *)ctxt;
2045 struct lbuf *lb;
2046 int ret = BCME_ERROR;
2047 if (pktp != NULL) {
2048 if ((lb = PKTALLOC_ON_LOC(pktpool_osh, pktp->max_pkt_bytes,
2049 pktp->type, pkt, pktsize)) != NULL) {
2050 if ((ret = pktpool_add(pktp, lb)) == BCME_OK) {
2051 pktp->poolheap_count--;
2052 ASSERT(total_pool_pktid_count > 0);
2053 total_pool_pktid_count--;
2054 if (pktp->poolheap_count == 0) {
2055 pktp->release_active = FALSE;
2056 hnd_pktpool_release_active_reset(pktp);
2057 }
2058 if (pktp->cbcnt) {
2059 if (pktp->empty == FALSE)
2060 pktpool_avail_notify(pktp);
2061 }
2062 } else {
2063 /*
2064 * pktpool_add failed indicate already max
2065 * number of pkts are available in pool. So
2066 * free this buffer to heap
2067 */
2068 PKTFREE(pktpool_osh, lb, pktsize);
2069 }
2070 ret = BCME_OK;
2071 }
2072 }
2073 return ret;
2074 }
2075
2076 int
hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn,void * ctxt,uint32 flag)2077 hnd_pktpool_heap_register_cb(pktpool_heap_cb_t fn, void *ctxt, uint32 flag)
2078 {
2079 int i = 0;
2080 int err = BCME_ERROR;
2081 pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2082
2083 /* Search for free entry */
2084 for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2085 if (pktp_heap_cb[i].fn == NULL)
2086 break;
2087 }
2088
2089 if (i < PKTPOOL_MAX_HEAP_CB) {
2090 pktp_heap_cb[i].fn = fn;
2091 pktp_heap_cb[i].ctxt = ctxt;
2092 pktp_heap_cb[i].flag = flag;
2093 err = BCME_OK;
2094 }
2095 return err;
2096 }
2097
2098 int
hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn)2099 hnd_pktpool_heap_deregister_cb(pktpool_heap_cb_t fn)
2100 {
2101 int i = 0;
2102 int err = BCME_ERROR;
2103 pktpool_heap_cb_reg_t *pktp_heap_cb = hnd_pool_get_cb_registry();
2104
2105 /* Search for matching entry */
2106 for (i = 0; i < PKTPOOL_MAX_HEAP_CB; i++) {
2107 if (pktp_heap_cb[i].fn == fn)
2108 break;
2109 }
2110
2111 if (i < PKTPOOL_MAX_HEAP_CB) {
2112 pktp_heap_cb[i].fn = NULL;
2113 err = BCME_OK;
2114 }
2115 return err;
2116 }
2117
2118 uint16
hnd_pktpool_get_min_bkup_buf(pktpool_t * pktp)2119 hnd_pktpool_get_min_bkup_buf(pktpool_t *pktp)
2120 {
2121 return pktp->min_backup_buf;
2122 }
2123 #endif /* POOL_HEAP_RECONFIG */
2124
2125 uint32
hnd_pktpool_get_total_poolheap_count(void)2126 hnd_pktpool_get_total_poolheap_count(void)
2127 {
2128 return total_pool_pktid_count;
2129 }
2130 #endif /* BCMPKTPOOL */
2131