1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * HND generic packet pool operation primitives
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
29 */
30
31 #include <typedefs.h>
32 #include <osl.h>
33 #include <osl_ext.h>
34 #include <bcmutils.h>
35 #include <hnd_pktpool.h>
36 #ifdef BCMRESVFRAGPOOL
37 #include <hnd_resvpool.h>
38 #endif /* BCMRESVFRAGPOOL */
39 #ifdef BCMFRWDPOOLREORG
40 #include <hnd_poolreorg.h>
41 #endif /* BCMFRWDPOOLREORG */
42
43 /* mutex macros for thread safe */
44 #ifdef HND_PKTPOOL_THREAD_SAFE
45 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
46 #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
47 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) osl_ext_mutex_acquire(mutex, msec)
48 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
49 #else
50 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
51 #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
52 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
53 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
54 #endif // endif
55
56 /* Registry size is one larger than max pools, as slot #0 is reserved */
57 #define PKTPOOLREG_RSVD_ID (0U)
58 #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
59 #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
60
61 #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
62 #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
63
64 /* Tag a registry entry as free for use */
65 #define PKTPOOL_REGISTRY_CLR(id) \
66 PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
67 #define PKTPOOL_REGISTRY_ISCLR(id) \
68 (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
69
70 /* Tag registry entry 0 as reserved */
71 #define PKTPOOL_REGISTRY_RSV() \
72 PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
73 #define PKTPOOL_REGISTRY_ISRSVD() \
74 (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
75
76 /* Walk all un-reserved entries in registry */
77 #define PKTPOOL_REGISTRY_FOREACH(id) \
78 for ((id) = 1U; (id) <= pktpools_max; (id)++)
79
80 enum pktpool_empty_cb_state {
81 EMPTYCB_ENABLED = 0, /* Enable callback when new packets are added to pool */
82 EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
83 EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
84 };
85
86 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
87 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
88
89 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
90 static int pktpool_register(pktpool_t * poolptr);
91 static int pktpool_deregister(pktpool_t * poolptr);
92
93 /** add declaration */
94 static void pktpool_avail_notify(pktpool_t *pktp);
95
96 /** accessor functions required when ROMming this file, forced into RAM */
97
98 pktpool_t *
BCMRAMFN(get_pktpools_registry)99 BCMRAMFN(get_pktpools_registry)(int id)
100 {
101 return pktpools_registry[id];
102 }
103
104 static void
BCMRAMFN(pktpool_registry_set)105 BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
106 {
107 pktpools_registry[id] = pp;
108 }
109
110 static bool
BCMRAMFN(pktpool_registry_cmp)111 BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
112 {
113 return pktpools_registry[id] == pp;
114 }
115
116 /** Constructs a pool registry to serve a maximum of total_pools */
117 int
pktpool_attach(osl_t * osh,uint32 total_pools)118 pktpool_attach(osl_t *osh, uint32 total_pools)
119 {
120 uint32 poolid;
121 BCM_REFERENCE(osh);
122
123 if (pktpools_max != 0U) {
124 return BCME_ERROR;
125 }
126
127 ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
128
129 /* Initialize registry: reserve slot#0 and tag others as free */
130 PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
131
132 PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
133 PKTPOOL_REGISTRY_CLR(poolid);
134 }
135
136 pktpools_max = total_pools;
137
138 return (int)pktpools_max;
139 }
140
141 /** Destructs the pool registry. Ascertain all pools were first de-inited */
142 int
pktpool_dettach(osl_t * osh)143 pktpool_dettach(osl_t *osh)
144 {
145 uint32 poolid;
146 BCM_REFERENCE(osh);
147
148 if (pktpools_max == 0U) {
149 return BCME_OK;
150 }
151
152 /* Ascertain that no pools are still registered */
153 ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
154
155 PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
156 ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
157 }
158
159 pktpools_max = 0U; /* restore boot state */
160
161 return BCME_OK;
162 }
163
164 /** Registers a pool in a free slot; returns the registry slot index */
165 static int
pktpool_register(pktpool_t * poolptr)166 pktpool_register(pktpool_t * poolptr)
167 {
168 uint32 poolid;
169
170 if (pktpools_max == 0U) {
171 return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
172 }
173
174 ASSERT(pktpools_max != 0U);
175
176 /* find an empty slot in pktpools_registry */
177 PKTPOOL_REGISTRY_FOREACH(poolid) {
178 if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
179 PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
180 return (int)poolid; /* return pool ID */
181 }
182 } /* FOREACH */
183
184 return PKTPOOL_INVALID_ID; /* error: registry is full */
185 }
186
187 /** Deregisters a pktpool, given the pool pointer; tag slot as free */
188 static int
pktpool_deregister(pktpool_t * poolptr)189 pktpool_deregister(pktpool_t * poolptr)
190 {
191 uint32 poolid;
192
193 ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
194
195 poolid = POOLID(poolptr);
196 ASSERT(poolid <= pktpools_max);
197
198 /* Asertain that a previously registered poolptr is being de-registered */
199 if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
200 PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
201 } else {
202 ASSERT(0);
203 return BCME_ERROR; /* mismatch in registry */
204 }
205
206 return BCME_OK;
207 }
208
209 /**
210 * pktpool_init:
211 * User provides a pktpool_t structure and specifies the number of packets to
212 * be pre-filled into the pool (n_pkts).
213 * pktpool_init first attempts to register the pool and fetch a unique poolid.
214 * If registration fails, it is considered an BCME_ERR, caused by either the
215 * registry was not pre-created (pktpool_attach) or the registry is full.
216 * If registration succeeds, then the requested number of packets will be filled
217 * into the pool as part of initialization. In the event that there is no
218 * available memory to service the request, then BCME_NOMEM will be returned
219 * along with the count of how many packets were successfully allocated.
220 * In dongle builds, prior to memory reclaimation, one should limit the number
221 * of packets to be allocated during pktpool_init and fill the pool up after
222 * reclaim stage.
223 *
224 * @param n_pkts Number of packets to be pre-filled into the pool
225 * @param max_pkt_bytes The size of all packets in a pool must be the same. E.g. PKTBUFSZ.
226 * @param type e.g. 'lbuf_frag'
227 */
228 int
pktpool_init(osl_t * osh,pktpool_t * pktp,int * n_pkts,int max_pkt_bytes,bool istx,uint8 type)229 pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes, bool istx,
230 uint8 type)
231 {
232 int i, err = BCME_OK;
233 int pktplen;
234 uint8 pktp_id;
235
236 ASSERT(pktp != NULL);
237 ASSERT(osh != NULL);
238 ASSERT(n_pkts != NULL);
239
240 pktplen = *n_pkts;
241
242 bzero(pktp, sizeof(pktpool_t));
243
244 /* assign a unique pktpool id */
245 if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
246 return BCME_ERROR;
247 }
248 POOLSETID(pktp, pktp_id);
249
250 pktp->inited = TRUE;
251 pktp->istx = istx ? TRUE : FALSE;
252 pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
253 pktp->type = type;
254
255 if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
256 return BCME_ERROR;
257 }
258
259 pktp->maxlen = PKTPOOL_LEN_MAX;
260 pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
261
262 for (i = 0; i < pktplen; i++) {
263 void *p;
264 p = PKTGET(osh, max_pkt_bytes, TRUE);
265
266 if (p == NULL) {
267 /* Not able to allocate all requested pkts
268 * so just return what was actually allocated
269 * We can add to the pool later
270 */
271 if (pktp->freelist == NULL) /* pktpool free list is empty */
272 err = BCME_NOMEM;
273
274 goto exit;
275 }
276
277 PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
278
279 PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
280 pktp->freelist = p;
281
282 pktp->avail++;
283
284 #ifdef BCMDBG_POOL
285 pktp->dbg_q[pktp->dbg_qlen++].p = p;
286 #endif // endif
287 }
288
289 exit:
290 pktp->n_pkts = pktp->avail;
291
292 *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
293 return err;
294 } /* pktpool_init */
295
296 /**
297 * pktpool_deinit:
298 * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
299 * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
300 * An assert is in place to ensure that there are no packets still lingering
301 * around. Packets freed to a pool after the deinit will cause a memory
302 * corruption as the pktpool_t structure no longer exists.
303 */
304 int
pktpool_deinit(osl_t * osh,pktpool_t * pktp)305 pktpool_deinit(osl_t *osh, pktpool_t *pktp)
306 {
307 uint16 freed = 0;
308
309 ASSERT(osh != NULL);
310 ASSERT(pktp != NULL);
311
312 #ifdef BCMDBG_POOL
313 {
314 int i;
315 for (i = 0; i <= pktp->n_pkts; i++) {
316 pktp->dbg_q[i].p = NULL;
317 }
318 }
319 #endif // endif
320
321 while (pktp->freelist != NULL) {
322 void * p = pktp->freelist;
323
324 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
325 PKTSETFREELIST(p, NULL);
326
327 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
328
329 PKTFREE(osh, p, pktp->istx); /* free the packet */
330
331 freed++;
332 ASSERT(freed <= pktp->n_pkts);
333 }
334
335 pktp->avail -= freed;
336 ASSERT(pktp->avail == 0);
337
338 pktp->n_pkts -= freed;
339
340 pktpool_deregister(pktp); /* release previously acquired unique pool id */
341 POOLSETID(pktp, PKTPOOL_INVALID_ID);
342
343 if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
344 return BCME_ERROR;
345
346 pktp->inited = FALSE;
347
348 /* Are there still pending pkts? */
349 ASSERT(pktp->n_pkts == 0);
350
351 return 0;
352 }
353
354 int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)355 pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
356 {
357 void *p;
358 int err = 0;
359 int n_pkts, psize, maxlen;
360
361 /* protect shared resource */
362 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
363 return BCME_ERROR;
364
365 ASSERT(pktp->max_pkt_bytes != 0);
366
367 maxlen = pktp->maxlen;
368 psize = minimal ? (maxlen >> 2) : maxlen;
369 for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
370
371 p = PKTGET(osh, pktp->n_pkts, TRUE);
372
373 if (p == NULL) {
374 err = BCME_NOMEM;
375 break;
376 }
377
378 if (pktpool_add(pktp, p) != BCME_OK) {
379 PKTFREE(osh, p, FALSE);
380 err = BCME_ERROR;
381 break;
382 }
383 }
384
385 /* protect shared resource */
386 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
387 return BCME_ERROR;
388
389 if (pktp->cbcnt) {
390 if (pktp->empty == FALSE)
391 pktpool_avail_notify(pktp);
392 }
393
394 return err;
395 }
396
397 #ifdef BCMPOOLRECLAIM
398 /* New API to decrease the pkts from pool, but not deinit
399 */
400 uint16
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt)401 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
402 {
403 uint16 freed = 0;
404
405 pktpool_cb_extn_t cb = NULL;
406 void *arg = NULL;
407
408 ASSERT(osh != NULL);
409 ASSERT(pktp != NULL);
410
411 /* protect shared resource */
412 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
413 return freed;
414 }
415
416 if (pktp->avail < free_cnt) {
417 free_cnt = pktp->avail;
418 }
419
420 if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
421 /* If pool is shared rx frag pool, use call back fn to reclaim host address
422 * and Rx cpl ID associated with the pkt.
423 */
424 ASSERT(pktp->cbext.cb != NULL);
425
426 cb = pktp->cbext.cb;
427 arg = pktp->cbext.arg;
428
429 } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
430 /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
431 * associated with the pkt.
432 */
433 cb = pktp->rxcplidfn.cb;
434 arg = pktp->rxcplidfn.arg;
435 }
436
437 while ((pktp->freelist != NULL) && (free_cnt)) {
438 void * p = pktp->freelist;
439
440 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
441 PKTSETFREELIST(p, NULL);
442
443 if (cb != NULL) {
444 if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
445 PKTSETFREELIST(p, pktp->freelist);
446 pktp->freelist = p;
447 break;
448 }
449 }
450
451 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
452
453 PKTFREE(osh, p, pktp->istx); /* free the packet */
454
455 freed++;
456 free_cnt--;
457 }
458
459 pktp->avail -= freed;
460
461 pktp->n_pkts -= freed;
462
463 /* protect shared resource */
464 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
465 return freed;
466 }
467
468 return freed;
469 }
470 #endif /* #ifdef BCMPOOLRECLAIM */
471
472 /* New API to empty the pkts from pool, but not deinit
473 * NOTE: caller is responsible to ensure,
474 * all pkts are available in pool for free; else LEAK !
475 */
476 int
pktpool_empty(osl_t * osh,pktpool_t * pktp)477 pktpool_empty(osl_t *osh, pktpool_t *pktp)
478 {
479 uint16 freed = 0;
480
481 ASSERT(osh != NULL);
482 ASSERT(pktp != NULL);
483
484 /* protect shared resource */
485 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
486 return BCME_ERROR;
487
488 #ifdef BCMDBG_POOL
489 {
490 int i;
491 for (i = 0; i <= pktp->n_pkts; i++) {
492 pktp->dbg_q[i].p = NULL;
493 }
494 }
495 #endif // endif
496
497 while (pktp->freelist != NULL) {
498 void * p = pktp->freelist;
499
500 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
501 PKTSETFREELIST(p, NULL);
502
503 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
504
505 PKTFREE(osh, p, pktp->istx); /* free the packet */
506
507 freed++;
508 ASSERT(freed <= pktp->n_pkts);
509 }
510
511 pktp->avail -= freed;
512 ASSERT(pktp->avail == 0);
513
514 pktp->n_pkts -= freed;
515
516 ASSERT(pktp->n_pkts == 0);
517
518 /* protect shared resource */
519 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
520 return BCME_ERROR;
521
522 return 0;
523 }
524
525 static void *
pktpool_deq(pktpool_t * pktp)526 pktpool_deq(pktpool_t *pktp)
527 {
528 void *p = NULL;
529
530 if (pktp->avail == 0)
531 return NULL;
532
533 ASSERT(pktp->freelist != NULL);
534
535 p = pktp->freelist; /* dequeue packet from head of pktpool free list */
536 pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
537
538 PKTSETFREELIST(p, NULL);
539
540 pktp->avail--;
541
542 return p;
543 }
544
545 static void
pktpool_enq(pktpool_t * pktp,void * p)546 pktpool_enq(pktpool_t *pktp, void *p)
547 {
548 ASSERT(p != NULL);
549
550 PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
551 pktp->freelist = p; /* free list points to newly inserted packet */
552
553 pktp->avail++;
554 ASSERT(pktp->avail <= pktp->n_pkts);
555 }
556
557 /** utility for registering host addr fill function called from pciedev */
558 int
559 /* BCMATTACHFN */
560 (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
561 {
562
563 ASSERT(cb != NULL);
564
565 ASSERT(pktp->cbext.cb == NULL);
566 pktp->cbext.cb = cb;
567 pktp->cbext.arg = arg;
568 return 0;
569 }
570
571 int
pktpool_rxcplid_fill_register(pktpool_t * pktp,pktpool_cb_extn_t cb,void * arg)572 pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
573 {
574
575 ASSERT(cb != NULL);
576
577 if (pktp == NULL)
578 return BCME_ERROR;
579 ASSERT(pktp->rxcplidfn.cb == NULL);
580 pktp->rxcplidfn.cb = cb;
581 pktp->rxcplidfn.arg = arg;
582 return 0;
583 }
584
585 /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
586 void
pktpool_invoke_dmarxfill(pktpool_t * pktp)587 pktpool_invoke_dmarxfill(pktpool_t *pktp)
588 {
589 ASSERT(pktp->dmarxfill.cb);
590 ASSERT(pktp->dmarxfill.arg);
591
592 if (pktp->dmarxfill.cb)
593 pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
594 }
595
596 /** Registers callback functions for split rx mode */
597 int
pkpool_haddr_avail_register_cb(pktpool_t * pktp,pktpool_cb_t cb,void * arg)598 pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
599 {
600
601 ASSERT(cb != NULL);
602
603 pktp->dmarxfill.cb = cb;
604 pktp->dmarxfill.arg = arg;
605
606 return 0;
607 }
608
609 /**
610 * Registers callback functions.
611 * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
612 */
613 int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)614 pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
615 {
616 int err = 0;
617 int i;
618
619 /* protect shared resource */
620 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
621 return BCME_ERROR;
622
623 ASSERT(cb != NULL);
624
625 for (i = 0; i < pktp->cbcnt; i++) {
626 ASSERT(pktp->cbs[i].cb != NULL);
627 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
628 pktp->cbs[i].refcnt++;
629 goto done;
630 }
631 }
632
633 i = pktp->cbcnt;
634 if (i == PKTPOOL_CB_MAX_AVL) {
635 err = BCME_ERROR;
636 goto done;
637 }
638
639 ASSERT(pktp->cbs[i].cb == NULL);
640 pktp->cbs[i].cb = cb;
641 pktp->cbs[i].arg = arg;
642 pktp->cbs[i].refcnt++;
643 pktp->cbcnt++;
644
645 done:
646 /* protect shared resource */
647 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
648 return BCME_ERROR;
649
650 return err;
651 }
652
653 /* No BCMATTACHFN as it is used in a non-attach function */
654 int
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)655 pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
656 {
657 int err = 0;
658 int i, k;
659
660 /* protect shared resource */
661 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS) {
662 return BCME_ERROR;
663 }
664
665 ASSERT(cb != NULL);
666
667 for (i = 0; i < pktp->cbcnt; i++) {
668 ASSERT(pktp->cbs[i].cb != NULL);
669 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
670 pktp->cbs[i].refcnt--;
671 if (pktp->cbs[i].refcnt) {
672 /* Still there are references to this callback */
673 goto done;
674 }
675 /* Moving any more callbacks to fill the hole */
676 for (k = i+1; k < pktp->cbcnt; i++, k++) {
677 pktp->cbs[i].cb = pktp->cbs[k].cb;
678 pktp->cbs[i].arg = pktp->cbs[k].arg;
679 pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
680 }
681
682 /* reset the last callback */
683 pktp->cbs[i].cb = NULL;
684 pktp->cbs[i].arg = NULL;
685 pktp->cbs[i].refcnt = 0;
686
687 pktp->cbcnt--;
688 goto done;
689 }
690 }
691
692 done:
693 /* protect shared resource */
694 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
695 return BCME_ERROR;
696 }
697
698 return err;
699 }
700
701 /** Registers callback functions */
702 int
pktpool_empty_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)703 pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
704 {
705 int err = 0;
706 int i;
707
708 /* protect shared resource */
709 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
710 return BCME_ERROR;
711
712 ASSERT(cb != NULL);
713
714 i = pktp->ecbcnt;
715 if (i == PKTPOOL_CB_MAX) {
716 err = BCME_ERROR;
717 goto done;
718 }
719
720 ASSERT(pktp->ecbs[i].cb == NULL);
721 pktp->ecbs[i].cb = cb;
722 pktp->ecbs[i].arg = arg;
723 pktp->ecbcnt++;
724
725 done:
726 /* protect shared resource */
727 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
728 return BCME_ERROR;
729
730 return err;
731 }
732
733 /** Calls registered callback functions */
734 static int
pktpool_empty_notify(pktpool_t * pktp)735 pktpool_empty_notify(pktpool_t *pktp)
736 {
737 int i;
738
739 pktp->empty = TRUE;
740 for (i = 0; i < pktp->ecbcnt; i++) {
741 ASSERT(pktp->ecbs[i].cb != NULL);
742 pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
743 }
744 pktp->empty = FALSE;
745
746 return 0;
747 }
748
749 #ifdef BCMDBG_POOL
750 int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)751 pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
752 {
753 int err = 0;
754 int i;
755
756 /* protect shared resource */
757 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
758 return BCME_ERROR;
759
760 ASSERT(cb);
761
762 i = pktp->dbg_cbcnt;
763 if (i == PKTPOOL_CB_MAX) {
764 err = BCME_ERROR;
765 goto done;
766 }
767
768 ASSERT(pktp->dbg_cbs[i].cb == NULL);
769 pktp->dbg_cbs[i].cb = cb;
770 pktp->dbg_cbs[i].arg = arg;
771 pktp->dbg_cbcnt++;
772
773 done:
774 /* protect shared resource */
775 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
776 return BCME_ERROR;
777
778 return err;
779 }
780
781 int pktpool_dbg_notify(pktpool_t *pktp);
782
783 int
pktpool_dbg_notify(pktpool_t * pktp)784 pktpool_dbg_notify(pktpool_t *pktp)
785 {
786 int i;
787
788 /* protect shared resource */
789 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
790 return BCME_ERROR;
791
792 for (i = 0; i < pktp->dbg_cbcnt; i++) {
793 ASSERT(pktp->dbg_cbs[i].cb);
794 pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
795 }
796
797 /* protect shared resource */
798 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
799 return BCME_ERROR;
800
801 return 0;
802 }
803
804 int
pktpool_dbg_dump(pktpool_t * pktp)805 pktpool_dbg_dump(pktpool_t *pktp)
806 {
807 int i;
808
809 /* protect shared resource */
810 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
811 return BCME_ERROR;
812
813 printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
814 for (i = 0; i < pktp->dbg_qlen; i++) {
815 ASSERT(pktp->dbg_q[i].p);
816 printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
817 pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
818 }
819
820 /* protect shared resource */
821 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
822 return BCME_ERROR;
823
824 return 0;
825 }
826
827 int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)828 pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
829 {
830 int i;
831 int state;
832
833 /* protect shared resource */
834 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
835 return BCME_ERROR;
836
837 bzero(stats, sizeof(pktpool_stats_t));
838 for (i = 0; i < pktp->dbg_qlen; i++) {
839 ASSERT(pktp->dbg_q[i].p != NULL);
840
841 state = PKTPOOLSTATE(pktp->dbg_q[i].p);
842 switch (state) {
843 case POOL_TXENQ:
844 stats->enq++; break;
845 case POOL_TXDH:
846 stats->txdh++; break;
847 case POOL_TXD11:
848 stats->txd11++; break;
849 case POOL_RXDH:
850 stats->rxdh++; break;
851 case POOL_RXD11:
852 stats->rxd11++; break;
853 case POOL_RXFILL:
854 stats->rxfill++; break;
855 case POOL_IDLE:
856 stats->idle++; break;
857 }
858 }
859
860 /* protect shared resource */
861 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
862 return BCME_ERROR;
863
864 return 0;
865 }
866
867 int
pktpool_start_trigger(pktpool_t * pktp,void * p)868 pktpool_start_trigger(pktpool_t *pktp, void *p)
869 {
870 uint32 cycles, i;
871
872 /* protect shared resource */
873 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
874 return BCME_ERROR;
875
876 if (!PKTPOOL(OSH_NULL, p))
877 goto done;
878
879 OSL_GETCYCLES(cycles);
880
881 for (i = 0; i < pktp->dbg_qlen; i++) {
882 ASSERT(pktp->dbg_q[i].p != NULL);
883
884 if (pktp->dbg_q[i].p == p) {
885 pktp->dbg_q[i].cycles = cycles;
886 break;
887 }
888 }
889
890 done:
891 /* protect shared resource */
892 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
893 return BCME_ERROR;
894
895 return 0;
896 }
897
898 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
899
900 int
pktpool_stop_trigger(pktpool_t * pktp,void * p)901 pktpool_stop_trigger(pktpool_t *pktp, void *p)
902 {
903 uint32 cycles, i;
904
905 /* protect shared resource */
906 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
907 return BCME_ERROR;
908
909 if (!PKTPOOL(OSH_NULL, p))
910 goto done;
911
912 OSL_GETCYCLES(cycles);
913
914 for (i = 0; i < pktp->dbg_qlen; i++) {
915 ASSERT(pktp->dbg_q[i].p != NULL);
916
917 if (pktp->dbg_q[i].p == p) {
918 if (pktp->dbg_q[i].cycles == 0)
919 break;
920
921 if (cycles >= pktp->dbg_q[i].cycles)
922 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
923 else
924 pktp->dbg_q[i].dur =
925 (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
926
927 pktp->dbg_q[i].cycles = 0;
928 break;
929 }
930 }
931
932 done:
933 /* protect shared resource */
934 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
935 return BCME_ERROR;
936
937 return 0;
938 }
939 #endif /* BCMDBG_POOL */
940
941 int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)942 pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
943 {
944 BCM_REFERENCE(osh);
945 ASSERT(pktp);
946
947 /* protect shared resource */
948 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
949 return BCME_ERROR;
950
951 pktp->availcb_excl = NULL;
952
953 /* protect shared resource */
954 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
955 return BCME_ERROR;
956
957 return 0;
958 }
959
960 int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)961 pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
962 {
963 int i;
964 int err;
965 BCM_REFERENCE(osh);
966
967 ASSERT(pktp);
968
969 /* protect shared resource */
970 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
971 return BCME_ERROR;
972
973 ASSERT(pktp->availcb_excl == NULL);
974 for (i = 0; i < pktp->cbcnt; i++) {
975 if (cb == pktp->cbs[i].cb) {
976 pktp->availcb_excl = &pktp->cbs[i];
977 break;
978 }
979 }
980
981 if (pktp->availcb_excl == NULL)
982 err = BCME_ERROR;
983 else
984 err = 0;
985
986 /* protect shared resource */
987 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
988 return BCME_ERROR;
989
990 return err;
991 }
992
993 static void
pktpool_avail_notify(pktpool_t * pktp)994 pktpool_avail_notify(pktpool_t *pktp)
995 {
996 int i, k, idx;
997 int avail;
998
999 ASSERT(pktp);
1000 if (pktp->availcb_excl != NULL) {
1001 pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1002 return;
1003 }
1004
1005 k = pktp->cbcnt - 1;
1006 for (i = 0; i < pktp->cbcnt; i++) {
1007 avail = pktp->avail;
1008
1009 if (avail) {
1010 if (pktp->cbtoggle)
1011 idx = i;
1012 else
1013 idx = k--;
1014
1015 ASSERT(pktp->cbs[idx].cb != NULL);
1016 pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1017 }
1018 }
1019
1020 /* Alternate between filling from head or tail
1021 */
1022 pktp->cbtoggle ^= 1;
1023
1024 return;
1025 }
1026
1027 /** Gets an empty packet from the caller provided pool */
1028 void *
pktpool_get(pktpool_t * pktp)1029 pktpool_get(pktpool_t *pktp)
1030 {
1031 void *p;
1032
1033 /* protect shared resource */
1034 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1035 return NULL;
1036
1037 p = pktpool_deq(pktp);
1038
1039 if (p == NULL) {
1040 /* Notify and try to reclaim tx pkts */
1041 if (pktp->ecbcnt)
1042 pktpool_empty_notify(pktp);
1043
1044 p = pktpool_deq(pktp);
1045 if (p == NULL)
1046 goto done;
1047 }
1048
1049 done:
1050 /* protect shared resource */
1051 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1052 return NULL;
1053
1054 return p;
1055 }
1056
1057 void
pktpool_free(pktpool_t * pktp,void * p)1058 pktpool_free(pktpool_t *pktp, void *p)
1059 {
1060 /* protect shared resource */
1061 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1062 return;
1063
1064 ASSERT(p != NULL);
1065 #ifdef BCMDBG_POOL
1066 /* pktpool_stop_trigger(pktp, p); */
1067 #endif // endif
1068
1069 pktpool_enq(pktp, p);
1070
1071 /**
1072 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1073 * If any avail callback functions are registered, send a notification
1074 * that a new packet is available in the pool.
1075 */
1076 if (pktp->cbcnt) {
1077 /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1078 * This allows to feed on burst basis as opposed to inefficient per-packet basis.
1079 */
1080 if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1081 /**
1082 * If the call originated from pktpool_empty_notify, the just freed packet
1083 * is needed in pktpool_get.
1084 * Therefore don't call pktpool_avail_notify.
1085 */
1086 if (pktp->empty == FALSE)
1087 pktpool_avail_notify(pktp);
1088 } else {
1089 /**
1090 * The callback is temporarily disabled, log that a packet has been freed.
1091 */
1092 pktp->emptycb_disable = EMPTYCB_SKIPPED;
1093 }
1094 }
1095
1096 /* protect shared resource */
1097 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1098 return;
1099 }
1100
1101 /** Adds a caller provided (empty) packet to the caller provided pool */
1102 int
pktpool_add(pktpool_t * pktp,void * p)1103 pktpool_add(pktpool_t *pktp, void *p)
1104 {
1105 int err = 0;
1106
1107 /* protect shared resource */
1108 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1109 return BCME_ERROR;
1110
1111 ASSERT(p != NULL);
1112
1113 if (pktp->n_pkts == pktp->maxlen) {
1114 err = BCME_RANGE;
1115 goto done;
1116 }
1117
1118 /* pkts in pool have same length */
1119 ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1120 PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1121
1122 pktp->n_pkts++;
1123 pktpool_enq(pktp, p);
1124
1125 #ifdef BCMDBG_POOL
1126 pktp->dbg_q[pktp->dbg_qlen++].p = p;
1127 #endif // endif
1128
1129 done:
1130 /* protect shared resource */
1131 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1132 return BCME_ERROR;
1133
1134 return err;
1135 }
1136
1137 /**
1138 * Force pktpool_setmaxlen () into RAM as it uses a constant
1139 * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1140 */
1141 int
BCMRAMFN(pktpool_setmaxlen)1142 BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1143 {
1144 /* protect shared resource */
1145 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
1146 return BCME_ERROR;
1147
1148 if (maxlen > PKTPOOL_LEN_MAX)
1149 maxlen = PKTPOOL_LEN_MAX;
1150
1151 /* if pool is already beyond maxlen, then just cap it
1152 * since we currently do not reduce the pool len
1153 * already allocated
1154 */
1155 pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1156
1157 /* protect shared resource */
1158 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
1159 return BCME_ERROR;
1160
1161 return pktp->maxlen;
1162 }
1163
1164 void
pktpool_emptycb_disable(pktpool_t * pktp,bool disable)1165 pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
1166 {
1167 ASSERT(pktp);
1168
1169 /**
1170 * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
1171 * If callback is going to be re-enabled, check if any packet got
1172 * freed and added back to the pool while callback was disabled.
1173 * When this is the case do the callback now, provided that callback functions
1174 * are registered and this call did not originate from pktpool_empty_notify.
1175 */
1176 if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1177 (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1178 pktpool_avail_notify(pktp);
1179 }
1180
1181 /* Enable or temporarily disable callback when packet becomes available. */
1182 pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
1183 }
1184
1185 bool
pktpool_emptycb_disabled(pktpool_t * pktp)1186 pktpool_emptycb_disabled(pktpool_t *pktp)
1187 {
1188 ASSERT(pktp);
1189 return pktp->emptycb_disable != EMPTYCB_ENABLED;
1190 }
1191
1192 #ifdef BCMPKTPOOL
1193 #include <hnd_lbuf.h>
1194
1195 pktpool_t *pktpool_shared = NULL;
1196
1197 #ifdef BCMFRAGPOOL
1198 pktpool_t *pktpool_shared_lfrag = NULL;
1199 #ifdef BCMRESVFRAGPOOL
1200 pktpool_t *pktpool_resv_lfrag = NULL;
1201 struct resv_info *resv_pool_info = NULL;
1202 #endif /* BCMRESVFRAGPOOL */
1203 #endif /* BCMFRAGPOOL */
1204
1205 pktpool_t *pktpool_shared_rxlfrag = NULL;
1206
1207 static osl_t *pktpool_osh = NULL;
1208
1209 /**
1210 * Initializes several packet pools and allocates packets within those pools.
1211 */
1212 int
hnd_pktpool_init(osl_t * osh)1213 hnd_pktpool_init(osl_t *osh)
1214 {
1215 int err = BCME_OK;
1216 int n;
1217
1218 /* Construct a packet pool registry before initializing packet pools */
1219 n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1220 if (n != PKTPOOL_MAXIMUM_ID) {
1221 ASSERT(0);
1222 err = BCME_ERROR;
1223 goto error0;
1224 }
1225
1226 pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1227 if (pktpool_shared == NULL) {
1228 ASSERT(0);
1229 err = BCME_NOMEM;
1230 goto error1;
1231 }
1232
1233 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1234 pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1235 if (pktpool_shared_lfrag == NULL) {
1236 ASSERT(0);
1237 err = BCME_NOMEM;
1238 goto error2;
1239 }
1240 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1241 resv_pool_info = hnd_resv_pool_alloc(osh);
1242 if (resv_pool_info == NULL) {
1243 ASSERT(0);
1244 goto error2;
1245 }
1246 pktpool_resv_lfrag = resv_pool_info->pktp;
1247 if (pktpool_resv_lfrag == NULL) {
1248 ASSERT(0);
1249 goto error2;
1250 }
1251 #endif /* RESVFRAGPOOL */
1252 #endif /* FRAGPOOL */
1253
1254 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1255 pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1256 if (pktpool_shared_rxlfrag == NULL) {
1257 ASSERT(0);
1258 err = BCME_NOMEM;
1259 goto error3;
1260 }
1261 #endif // endif
1262
1263 /*
1264 * At this early stage, there's not enough memory to allocate all
1265 * requested pkts in the shared pool. Need to add to the pool
1266 * after reclaim
1267 *
1268 * n = NRXBUFPOST + SDPCMD_RXBUFS;
1269 *
1270 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1271 * registry is not initialized or the registry is depleted.
1272 *
1273 * A BCME_NOMEM error only indicates that the requested number of packets
1274 * were not filled into the pool.
1275 */
1276 n = 1;
1277 MALLOC_SET_NOPERSIST(osh); /* Ensure subsequent allocations are non-persist */
1278 if ((err = pktpool_init(osh, pktpool_shared,
1279 &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
1280 ASSERT(0);
1281 goto error4;
1282 }
1283 pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1284
1285 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1286 n = 1;
1287 if ((err = pktpool_init(osh, pktpool_shared_lfrag,
1288 &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
1289 ASSERT(0);
1290 goto error5;
1291 }
1292 pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1293 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1294 n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1295 if (pktpool_init(osh, pktpool_resv_lfrag,
1296 &n, PKTFRAGSZ, TRUE, lbuf_frag) == BCME_ERROR) {
1297 ASSERT(0);
1298 goto error5;
1299 }
1300 pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1301 #endif /* RESVFRAGPOOL */
1302 #endif /* BCMFRAGPOOL */
1303 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1304 n = 1;
1305 if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
1306 &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
1307 ASSERT(0);
1308 goto error6;
1309 }
1310 pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1311 #endif // endif
1312
1313 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1314 /* Attach poolreorg module */
1315 if ((frwd_poolreorg_info = poolreorg_attach(osh,
1316 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1317 pktpool_shared_lfrag,
1318 #else
1319 NULL,
1320 #endif // endif
1321 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1322 pktpool_shared_rxlfrag,
1323 #else
1324 NULL,
1325 #endif // endif
1326 pktpool_shared)) == NULL) {
1327 ASSERT(0);
1328 goto error7;
1329 }
1330 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1331
1332 pktpool_osh = osh;
1333 MALLOC_CLEAR_NOPERSIST(osh);
1334
1335 return BCME_OK;
1336
1337 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1338 /* detach poolreorg module */
1339 poolreorg_detach(frwd_poolreorg_info);
1340 error7:
1341 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1342
1343 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1344 pktpool_deinit(osh, pktpool_shared_rxlfrag);
1345 error6:
1346 #endif // endif
1347
1348 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1349 pktpool_deinit(osh, pktpool_shared_lfrag);
1350 error5:
1351 #endif // endif
1352
1353 #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
1354 (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
1355 pktpool_deinit(osh, pktpool_shared);
1356 #endif // endif
1357
1358 error4:
1359 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1360 hnd_free(pktpool_shared_rxlfrag);
1361 pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1362 error3:
1363 #endif /* BCMRXFRAGPOOL */
1364
1365 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1366 hnd_free(pktpool_shared_lfrag);
1367 pktpool_shared_lfrag = (pktpool_t *)NULL;
1368 error2:
1369 #endif /* BCMFRAGPOOL */
1370
1371 hnd_free(pktpool_shared);
1372 pktpool_shared = (pktpool_t *)NULL;
1373
1374 error1:
1375 pktpool_dettach(osh);
1376 error0:
1377 MALLOC_CLEAR_NOPERSIST(osh);
1378 return err;
1379 } /* hnd_pktpool_init */
1380
1381 /** is called at each 'wl up' */
1382 int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1383 hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1384 {
1385 return (pktpool_fill(pktpool_osh, pktpool, minimal));
1386 }
1387
1388 /** refills pktpools after reclaim, is called once */
1389 void
hnd_pktpool_refill(bool minimal)1390 hnd_pktpool_refill(bool minimal)
1391 {
1392 if (POOL_ENAB(pktpool_shared)) {
1393 #if defined(SRMEM)
1394 if (SRMEM_ENAB()) {
1395 int maxlen = pktpool_max_pkts(pktpool_shared);
1396 int n_pkts = pktpool_tot_pkts(pktpool_shared);
1397
1398 for (; n_pkts < maxlen; n_pkts++) {
1399 void *p;
1400 if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) == NULL)
1401 break;
1402 pktpool_add(pktpool_shared, p);
1403 }
1404 }
1405 #endif /* SRMEM */
1406 pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1407 }
1408 /* fragpool reclaim */
1409 #ifdef BCMFRAGPOOL
1410 if (POOL_ENAB(pktpool_shared_lfrag)) {
1411 pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1412 }
1413 #endif /* BCMFRAGPOOL */
1414 /* rx fragpool reclaim */
1415 #ifdef BCMRXFRAGPOOL
1416 if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1417 pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1418 }
1419 #endif // endif
1420 #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1421 if (POOL_ENAB(pktpool_resv_lfrag)) {
1422 int resv_size = (PKTFRAGSZ + LBUFFRAGSZ)*RESV_FRAG_POOL_LEN;
1423 hnd_resv_pool_init(resv_pool_info, resv_size);
1424 hnd_resv_pool_enable(resv_pool_info);
1425 }
1426 #endif /* BCMRESVFRAGPOOL */
1427 }
1428 #endif /* BCMPKTPOOL */
1429