1 /*
2 * HND generic packet pool operation primitives
3 *
4 * Copyright (C) 1999-2019, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions
16 * of the license of that module. An independent module is a module which is
17 * not derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 *
25 * <<Broadcom-WL-IPTag/Open:>>
26 *
27 * $Id: hnd_pktpool.c 677681 2017-01-04 09:10:30Z $
28 */
29
30 #include <typedefs.h>
31 #include <osl.h>
32 #include <osl_ext.h>
33 #include <bcmutils.h>
34 #include <hnd_pktpool.h>
35 #ifdef BCMRESVFRAGPOOL
36 #include <hnd_resvpool.h>
37 #endif /* BCMRESVFRAGPOOL */
38 #ifdef BCMFRWDPOOLREORG
39 #include <hnd_poolreorg.h>
40 #endif /* BCMFRWDPOOLREORG */
41
42 /* mutex macros for thread safe */
43 #ifdef HND_PKTPOOL_THREAD_SAFE
44 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) osl_ext_mutex_create(name, mutex)
45 #define HND_PKTPOOL_MUTEX_DELETE(mutex) osl_ext_mutex_delete(mutex)
46 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) \
47 osl_ext_mutex_acquire(mutex, msec)
48 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) osl_ext_mutex_release(mutex)
49 #else
50 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex) OSL_EXT_SUCCESS
51 #define HND_PKTPOOL_MUTEX_DELETE(mutex) OSL_EXT_SUCCESS
52 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec) OSL_EXT_SUCCESS
53 #define HND_PKTPOOL_MUTEX_RELEASE(mutex) OSL_EXT_SUCCESS
54 #endif // endif
55
56 /* Registry size is one larger than max pools, as slot #0 is reserved */
57 #define PKTPOOLREG_RSVD_ID (0U)
58 #define PKTPOOLREG_RSVD_PTR (POOLPTR(0xdeaddead))
59 #define PKTPOOLREG_FREE_PTR (POOLPTR(NULL))
60
61 #define PKTPOOL_REGISTRY_SET(id, pp) (pktpool_registry_set((id), (pp)))
62 #define PKTPOOL_REGISTRY_CMP(id, pp) (pktpool_registry_cmp((id), (pp)))
63
64 /* Tag a registry entry as free for use */
65 #define PKTPOOL_REGISTRY_CLR(id) PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
66 #define PKTPOOL_REGISTRY_ISCLR(id) \
67 (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
68
69 /* Tag registry entry 0 as reserved */
70 #define PKTPOOL_REGISTRY_RSV() \
71 PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
72 #define PKTPOOL_REGISTRY_ISRSVD() \
73 (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
74
75 /* Walk all un-reserved entries in registry */
76 #define PKTPOOL_REGISTRY_FOREACH(id) \
77 for ((id) = 1U; (id) <= pktpools_max; (id)++)
78
79 enum pktpool_empty_cb_state {
80 EMPTYCB_ENABLED =
81 0, /* Enable callback when new packets are added to pool */
82 EMPTYCB_DISABLED, /* Disable callback when new packets are added to pool */
83 EMPTYCB_SKIPPED /* Packet was added to pool when callback was disabled */
84 };
85
86 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
87 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
88
89 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
90 static int pktpool_register(pktpool_t *poolptr);
91 static int pktpool_deregister(pktpool_t *poolptr);
92
93 /** add declaration */
94 static void pktpool_avail_notify(pktpool_t *pktp);
95
96 /** accessor functions required when ROMming this file, forced into RAM */
97
BCMRAMFN(get_pktpools_registry)98 pktpool_t *BCMRAMFN(get_pktpools_registry)(int id)
99 {
100 return pktpools_registry[id];
101 }
102
BCMRAMFN(pktpool_registry_set)103 static void BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
104 {
105 pktpools_registry[id] = pp;
106 }
107
BCMRAMFN(pktpool_registry_cmp)108 static bool BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
109 {
110 return pktpools_registry[id] == pp;
111 }
112
113 /** Constructs a pool registry to serve a maximum of total_pools */
pktpool_attach(osl_t * osh,uint32 total_pools)114 int pktpool_attach(osl_t *osh, uint32 total_pools)
115 {
116 uint32 poolid;
117 BCM_REFERENCE(osh);
118
119 if (pktpools_max != 0U) {
120 return BCME_ERROR;
121 }
122
123 ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
124
125 /* Initialize registry: reserve slot#0 and tag others as free */
126 PKTPOOL_REGISTRY_RSV(); /* reserve slot#0 */
127
128 PKTPOOL_REGISTRY_FOREACH(poolid) { /* tag all unreserved entries as free */
129 PKTPOOL_REGISTRY_CLR(poolid);
130 }
131
132 pktpools_max = total_pools;
133
134 return (int)pktpools_max;
135 }
136
137 /** Destructs the pool registry. Ascertain all pools were first de-inited */
pktpool_dettach(osl_t * osh)138 int pktpool_dettach(osl_t *osh)
139 {
140 uint32 poolid;
141 BCM_REFERENCE(osh);
142
143 if (pktpools_max == 0U) {
144 return BCME_OK;
145 }
146
147 /* Ascertain that no pools are still registered */
148 ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
149
150 PKTPOOL_REGISTRY_FOREACH(poolid) { /* ascertain all others are free */
151 ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
152 }
153
154 pktpools_max = 0U; /* restore boot state */
155
156 return BCME_OK;
157 }
158
159 /** Registers a pool in a free slot; returns the registry slot index */
pktpool_register(pktpool_t * poolptr)160 static int pktpool_register(pktpool_t *poolptr)
161 {
162 uint32 poolid;
163
164 if (pktpools_max == 0U) {
165 return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
166 }
167
168 ASSERT(pktpools_max != 0U);
169
170 /* find an empty slot in pktpools_registry */
171 PKTPOOL_REGISTRY_FOREACH(poolid) {
172 if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
173 PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
174 return (int)poolid; /* return pool ID */
175 }
176 } /* FOREACH */
177
178 return PKTPOOL_INVALID_ID; /* error: registry is full */
179 }
180
181 /** Deregisters a pktpool, given the pool pointer; tag slot as free */
pktpool_deregister(pktpool_t * poolptr)182 static int pktpool_deregister(pktpool_t *poolptr)
183 {
184 uint32 poolid;
185
186 ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
187
188 poolid = POOLID(poolptr);
189 ASSERT(poolid <= pktpools_max);
190
191 /* Asertain that a previously registered poolptr is being de-registered */
192 if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
193 PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
194 } else {
195 ASSERT(0);
196 return BCME_ERROR; /* mismatch in registry */
197 }
198
199 return BCME_OK;
200 }
201
202 /**
203 * pktpool_init:
204 * User provides a pktpool_t structure and specifies the number of packets to
205 * be pre-filled into the pool (n_pkts).
206 * pktpool_init first attempts to register the pool and fetch a unique poolid.
207 * If registration fails, it is considered an BCME_ERR, caused by either the
208 * registry was not pre-created (pktpool_attach) or the registry is full.
209 * If registration succeeds, then the requested number of packets will be filled
210 * into the pool as part of initialization. In the event that there is no
211 * available memory to service the request, then BCME_NOMEM will be returned
212 * along with the count of how many packets were successfully allocated.
213 * In dongle builds, prior to memory reclaimation, one should limit the number
214 * of packets to be allocated during pktpool_init and fill the pool up after
215 * reclaim stage.
216 *
217 * @param n_pkts Number of packets to be pre-filled into the pool
218 * @param max_pkt_bytes The size of all packets in a pool must be the same.
219 * E.g. PKTBUFSZ.
220 * @param type e.g. 'lbuf_frag'
221 */
pktpool_init(osl_t * osh,pktpool_t * pktp,int * n_pkts,int max_pkt_bytes,bool istx,uint8 type)222 int pktpool_init(osl_t *osh, pktpool_t *pktp, int *n_pkts, int max_pkt_bytes,
223 bool istx, uint8 type)
224 {
225 int i, err = BCME_OK;
226 int pktplen;
227 uint8 pktp_id;
228
229 ASSERT(pktp != NULL);
230 ASSERT(osh != NULL);
231 ASSERT(n_pkts != NULL);
232
233 pktplen = *n_pkts;
234
235 bzero(pktp, sizeof(pktpool_t));
236
237 /* assign a unique pktpool id */
238 if ((pktp_id = (uint8)pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
239 return BCME_ERROR;
240 }
241 POOLSETID(pktp, pktp_id);
242
243 pktp->inited = TRUE;
244 pktp->istx = istx ? TRUE : FALSE;
245 pktp->max_pkt_bytes = (uint16)max_pkt_bytes;
246 pktp->type = type;
247
248 if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
249 return BCME_ERROR;
250 }
251
252 pktp->maxlen = PKTPOOL_LEN_MAX;
253 pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
254
255 for (i = 0; i < pktplen; i++) {
256 void *p;
257 p = PKTGET(osh, max_pkt_bytes, TRUE);
258 if (p == NULL) {
259 /* Not able to allocate all requested pkts
260 * so just return what was actually allocated
261 * We can add to the pool later
262 */
263 if (pktp->freelist == NULL) { /* pktpool free list is empty */
264 err = BCME_NOMEM;
265 }
266
267 goto exit;
268 }
269
270 PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
271
272 PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
273 pktp->freelist = p;
274
275 pktp->avail++;
276
277 #ifdef BCMDBG_POOL
278 pktp->dbg_q[pktp->dbg_qlen++].p = p;
279 #endif // endif
280 }
281
282 exit:
283 pktp->n_pkts = pktp->avail;
284
285 *n_pkts = pktp->n_pkts; /* number of packets managed by pool */
286 return err;
287 } /* pktpool_init */
288
289 /**
290 * pktpool_deinit:
291 * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
292 * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
293 * An assert is in place to ensure that there are no packets still lingering
294 * around. Packets freed to a pool after the deinit will cause a memory
295 * corruption as the pktpool_t structure no longer exists.
296 */
pktpool_deinit(osl_t * osh,pktpool_t * pktp)297 int pktpool_deinit(osl_t *osh, pktpool_t *pktp)
298 {
299 uint16 freed = 0;
300
301 ASSERT(osh != NULL);
302 ASSERT(pktp != NULL);
303
304 #ifdef BCMDBG_POOL
305 {
306 int i;
307 for (i = 0; i <= pktp->n_pkts; i++) {
308 pktp->dbg_q[i].p = NULL;
309 }
310 }
311 #endif // endif
312
313 while (pktp->freelist != NULL) {
314 void *p = pktp->freelist;
315
316 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
317 PKTSETFREELIST(p, NULL);
318
319 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
320
321 PKTFREE(osh, p, pktp->istx); /* free the packet */
322
323 freed++;
324 ASSERT(freed <= pktp->n_pkts);
325 }
326
327 pktp->avail -= freed;
328 ASSERT(pktp->avail == 0);
329
330 pktp->n_pkts -= freed;
331
332 pktpool_deregister(pktp); /* release previously acquired unique pool id */
333 POOLSETID(pktp, PKTPOOL_INVALID_ID);
334
335 if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS) {
336 return BCME_ERROR;
337 }
338
339 pktp->inited = FALSE;
340
341 /* Are there still pending pkts? */
342 ASSERT(pktp->n_pkts == 0);
343
344 return 0;
345 }
346
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)347 int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
348 {
349 void *p;
350 int err = 0;
351 int n_pkts, psize, maxlen;
352
353 /* protect shared resource */
354 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
355 OSL_EXT_SUCCESS) {
356 return BCME_ERROR;
357 }
358
359 ASSERT(pktp->max_pkt_bytes != 0);
360
361 maxlen = pktp->maxlen;
362 psize = minimal ? (maxlen >> 0x2) : maxlen;
363 for (n_pkts = (int)pktp->n_pkts; n_pkts < psize; n_pkts++) {
364 p = PKTGET(osh, pktp->n_pkts, TRUE);
365 if (p == NULL) {
366 err = BCME_NOMEM;
367 break;
368 }
369
370 if (pktpool_add(pktp, p) != BCME_OK) {
371 PKTFREE(osh, p, FALSE);
372 err = BCME_ERROR;
373 break;
374 }
375 }
376
377 /* protect shared resource */
378 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
379 return BCME_ERROR;
380 }
381
382 if (pktp->cbcnt) {
383 if (pktp->empty == FALSE) {
384 pktpool_avail_notify(pktp);
385 }
386 }
387
388 return err;
389 }
390
391 #ifdef BCMPOOLRECLAIM
392 /* New API to decrease the pkts from pool, but not deinit
393 */
pktpool_reclaim(osl_t * osh,pktpool_t * pktp,uint16 free_cnt)394 uint16 pktpool_reclaim(osl_t *osh, pktpool_t *pktp, uint16 free_cnt)
395 {
396 uint16 freed = 0;
397
398 pktpool_cb_extn_t cb = NULL;
399 void *arg = NULL;
400
401 ASSERT(osh != NULL);
402 ASSERT(pktp != NULL);
403
404 /* protect shared resource */
405 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
406 OSL_EXT_SUCCESS) {
407 return freed;
408 }
409
410 if (pktp->avail < free_cnt) {
411 free_cnt = pktp->avail;
412 }
413
414 if (BCMSPLITRX_ENAB() && (pktp->type == lbuf_rxfrag)) {
415 /* If pool is shared rx frag pool, use call back fn to reclaim host
416 * address and Rx cpl ID associated with the pkt.
417 */
418 ASSERT(pktp->cbext.cb != NULL);
419
420 cb = pktp->cbext.cb;
421 arg = pktp->cbext.arg;
422 } else if ((pktp->type == lbuf_basic) && (pktp->rxcplidfn.cb != NULL)) {
423 /* If pool is shared rx pool, use call back fn to freeup Rx cpl ID
424 * associated with the pkt.
425 */
426 cb = pktp->rxcplidfn.cb;
427 arg = pktp->rxcplidfn.arg;
428 }
429
430 while ((pktp->freelist != NULL) && (free_cnt)) {
431 void *p = pktp->freelist;
432
433 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
434 PKTSETFREELIST(p, NULL);
435
436 if (cb != NULL) {
437 if (cb(pktp, arg, p, REMOVE_RXCPLID)) {
438 PKTSETFREELIST(p, pktp->freelist);
439 pktp->freelist = p;
440 break;
441 }
442 }
443
444 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
445
446 PKTFREE(osh, p, pktp->istx); /* free the packet */
447
448 freed++;
449 free_cnt--;
450 }
451
452 pktp->avail -= freed;
453
454 pktp->n_pkts -= freed;
455
456 /* protect shared resource */
457 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
458 return freed;
459 }
460
461 return freed;
462 }
463 #endif /* #ifdef BCMPOOLRECLAIM */
464
465 /* New API to empty the pkts from pool, but not deinit
466 * NOTE: caller is responsible to ensure,
467 * all pkts are available in pool for free; else LEAK !
468 */
pktpool_empty(osl_t * osh,pktpool_t * pktp)469 int pktpool_empty(osl_t *osh, pktpool_t *pktp)
470 {
471 uint16 freed = 0;
472
473 ASSERT(osh != NULL);
474 ASSERT(pktp != NULL);
475
476 /* protect shared resource */
477 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
478 OSL_EXT_SUCCESS) {
479 return BCME_ERROR;
480 }
481
482 #ifdef BCMDBG_POOL
483 {
484 int i;
485 for (i = 0; i <= pktp->n_pkts; i++) {
486 pktp->dbg_q[i].p = NULL;
487 }
488 }
489 #endif // endif
490
491 while (pktp->freelist != NULL) {
492 void *p = pktp->freelist;
493
494 pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
495 PKTSETFREELIST(p, NULL);
496
497 PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
498
499 PKTFREE(osh, p, pktp->istx); /* free the packet */
500
501 freed++;
502 ASSERT(freed <= pktp->n_pkts);
503 }
504
505 pktp->avail -= freed;
506 ASSERT(pktp->avail == 0);
507
508 pktp->n_pkts -= freed;
509
510 ASSERT(pktp->n_pkts == 0);
511
512 /* protect shared resource */
513 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
514 return BCME_ERROR;
515 }
516
517 return 0;
518 }
519
pktpool_deq(pktpool_t * pktp)520 static void *pktpool_deq(pktpool_t *pktp)
521 {
522 void *p = NULL;
523
524 if (pktp->avail == 0) {
525 return NULL;
526 }
527
528 ASSERT(pktp->freelist != NULL);
529
530 p = pktp->freelist; /* dequeue packet from head of pktpool free list */
531 pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
532
533 PKTSETFREELIST(p, NULL);
534
535 pktp->avail--;
536
537 return p;
538 }
539
pktpool_enq(pktpool_t * pktp,void * p)540 static void pktpool_enq(pktpool_t *pktp, void *p)
541 {
542 ASSERT(p != NULL);
543
544 PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
545 pktp->freelist = p; /* free list points to newly inserted packet */
546
547 pktp->avail++;
548 ASSERT(pktp->avail <= pktp->n_pkts);
549 }
550
551 /** utility for registering host addr fill function called from pciedev */
552 int
553 /* BCMATTACHFN */
554 (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb,
555 void *arg)
556 {
557 ASSERT(cb != NULL);
558 ASSERT(pktp->cbext.cb == NULL);
559 pktp->cbext.cb = cb;
560 pktp->cbext.arg = arg;
561 return 0;
562 }
563
pktpool_rxcplid_fill_register(pktpool_t * pktp,pktpool_cb_extn_t cb,void * arg)564 int pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb,
565 void *arg)
566 {
567 ASSERT(cb != NULL);
568
569 if (pktp == NULL) {
570 return BCME_ERROR;
571 }
572 ASSERT(pktp->rxcplidfn.cb == NULL);
573 pktp->rxcplidfn.cb = cb;
574 pktp->rxcplidfn.arg = arg;
575 return 0;
576 }
577
578 /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
pktpool_invoke_dmarxfill(pktpool_t * pktp)579 void pktpool_invoke_dmarxfill(pktpool_t *pktp)
580 {
581 ASSERT(pktp->dmarxfill.cb);
582 ASSERT(pktp->dmarxfill.arg);
583
584 if (pktp->dmarxfill.cb) {
585 pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
586 }
587 }
588
589 /** Registers callback functions for split rx mode */
pkpool_haddr_avail_register_cb(pktpool_t * pktp,pktpool_cb_t cb,void * arg)590 int pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
591 {
592 ASSERT(cb != NULL);
593
594 pktp->dmarxfill.cb = cb;
595 pktp->dmarxfill.arg = arg;
596 return 0;
597 }
598
599 /**
600 * Registers callback functions.
601 * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
602 */
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)603 int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
604 {
605 int err = 0;
606 int i;
607
608 /* protect shared resource */
609 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
610 OSL_EXT_SUCCESS) {
611 return BCME_ERROR;
612 }
613
614 ASSERT(cb != NULL);
615
616 for (i = 0; i < pktp->cbcnt; i++) {
617 ASSERT(pktp->cbs[i].cb != NULL);
618 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
619 pktp->cbs[i].refcnt++;
620 goto done;
621 }
622 }
623
624 i = pktp->cbcnt;
625 if (i == PKTPOOL_CB_MAX_AVL) {
626 err = BCME_ERROR;
627 goto done;
628 }
629
630 ASSERT(pktp->cbs[i].cb == NULL);
631 pktp->cbs[i].cb = cb;
632 pktp->cbs[i].arg = arg;
633 pktp->cbs[i].refcnt++;
634 pktp->cbcnt++;
635
636 done:
637 /* protect shared resource */
638 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
639 return BCME_ERROR;
640 }
641
642 return err;
643 }
644
645 /* No BCMATTACHFN as it is used in a non-attach function */
pktpool_avail_deregister(pktpool_t * pktp,pktpool_cb_t cb,void * arg)646 int pktpool_avail_deregister(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
647 {
648 int err = 0;
649 int i, k;
650
651 /* protect shared resource */
652 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
653 OSL_EXT_SUCCESS) {
654 return BCME_ERROR;
655 }
656
657 ASSERT(cb != NULL);
658
659 for (i = 0; i < pktp->cbcnt; i++) {
660 ASSERT(pktp->cbs[i].cb != NULL);
661 if ((cb == pktp->cbs[i].cb) && (arg == pktp->cbs[i].arg)) {
662 pktp->cbs[i].refcnt--;
663 if (pktp->cbs[i].refcnt) {
664 /* Still there are references to this callback */
665 goto done;
666 }
667 /* Moving any more callbacks to fill the hole */
668 for (k = i + 1; k < pktp->cbcnt; i++, k++) {
669 pktp->cbs[i].cb = pktp->cbs[k].cb;
670 pktp->cbs[i].arg = pktp->cbs[k].arg;
671 pktp->cbs[i].refcnt = pktp->cbs[k].refcnt;
672 }
673
674 /* reset the last callback */
675 pktp->cbs[i].cb = NULL;
676 pktp->cbs[i].arg = NULL;
677 pktp->cbs[i].refcnt = 0;
678
679 pktp->cbcnt--;
680 goto done;
681 }
682 }
683
684 done:
685 /* protect shared resource */
686 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
687 return BCME_ERROR;
688 }
689
690 return err;
691 }
692
693 /** Registers callback functions */
pktpool_empty_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)694 int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
695 {
696 int err = 0;
697 int i;
698
699 /* protect shared resource */
700 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
701 OSL_EXT_SUCCESS) {
702 return BCME_ERROR;
703 }
704
705 ASSERT(cb != NULL);
706
707 i = pktp->ecbcnt;
708 if (i == PKTPOOL_CB_MAX) {
709 err = BCME_ERROR;
710 goto done;
711 }
712
713 ASSERT(pktp->ecbs[i].cb == NULL);
714 pktp->ecbs[i].cb = cb;
715 pktp->ecbs[i].arg = arg;
716 pktp->ecbcnt++;
717
718 done:
719 /* protect shared resource */
720 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
721 return BCME_ERROR;
722 }
723
724 return err;
725 }
726
727 /** Calls registered callback functions */
pktpool_empty_notify(pktpool_t * pktp)728 static int pktpool_empty_notify(pktpool_t *pktp)
729 {
730 int i;
731
732 pktp->empty = TRUE;
733 for (i = 0; i < pktp->ecbcnt; i++) {
734 ASSERT(pktp->ecbs[i].cb != NULL);
735 pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
736 }
737 pktp->empty = FALSE;
738
739 return 0;
740 }
741
742 #ifdef BCMDBG_POOL
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)743 int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
744 {
745 int err = 0;
746 int i;
747
748 /* protect shared resource */
749 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
750 OSL_EXT_SUCCESS) {
751 return BCME_ERROR;
752 }
753
754 ASSERT(cb);
755
756 i = pktp->dbg_cbcnt;
757 if (i == PKTPOOL_CB_MAX) {
758 err = BCME_ERROR;
759 goto done;
760 }
761
762 ASSERT(pktp->dbg_cbs[i].cb == NULL);
763 pktp->dbg_cbs[i].cb = cb;
764 pktp->dbg_cbs[i].arg = arg;
765 pktp->dbg_cbcnt++;
766
767 done:
768 /* protect shared resource */
769 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
770 return BCME_ERROR;
771 }
772
773 return err;
774 }
775
776 int pktpool_dbg_notify(pktpool_t *pktp);
777
pktpool_dbg_notify(pktpool_t * pktp)778 int pktpool_dbg_notify(pktpool_t *pktp)
779 {
780 int i;
781
782 /* protect shared resource */
783 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
784 OSL_EXT_SUCCESS) {
785 return BCME_ERROR;
786 }
787
788 for (i = 0; i < pktp->dbg_cbcnt; i++) {
789 ASSERT(pktp->dbg_cbs[i].cb);
790 pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
791 }
792
793 /* protect shared resource */
794 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
795 return BCME_ERROR;
796 }
797
798 return 0;
799 }
800
pktpool_dbg_dump(pktpool_t * pktp)801 int pktpool_dbg_dump(pktpool_t *pktp)
802 {
803 int i;
804
805 /* protect shared resource */
806 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
807 OSL_EXT_SUCCESS) {
808 return BCME_ERROR;
809 }
810
811 printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen);
812 for (i = 0; i < pktp->dbg_qlen; i++) {
813 ASSERT(pktp->dbg_q[i].p);
814 printf("%d, p: 0x%x dur:%lu us state:%d\n", i, pktp->dbg_q[i].p,
815 pktp->dbg_q[i].dur / 0x64, PKTPOOLSTATE(pktp->dbg_q[i].p));
816 }
817
818 /* protect shared resource */
819 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
820 return BCME_ERROR;
821 }
822
823 return 0;
824 }
825
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)826 int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
827 {
828 int i;
829 int state;
830
831 /* protect shared resource */
832 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
833 OSL_EXT_SUCCESS) {
834 return BCME_ERROR;
835 }
836
837 bzero(stats, sizeof(pktpool_stats_t));
838 for (i = 0; i < pktp->dbg_qlen; i++) {
839 ASSERT(pktp->dbg_q[i].p != NULL);
840
841 state = PKTPOOLSTATE(pktp->dbg_q[i].p);
842 switch (state) {
843 case POOL_TXENQ:
844 stats->enq++;
845 break;
846 case POOL_TXDH:
847 stats->txdh++;
848 break;
849 case POOL_TXD11:
850 stats->txd11++;
851 break;
852 case POOL_RXDH:
853 stats->rxdh++;
854 break;
855 case POOL_RXD11:
856 stats->rxd11++;
857 break;
858 case POOL_RXFILL:
859 stats->rxfill++;
860 break;
861 case POOL_IDLE:
862 stats->idle++;
863 break;
864 }
865 }
866
867 /* protect shared resource */
868 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
869 return BCME_ERROR;
870 }
871
872 return 0;
873 }
874
pktpool_start_trigger(pktpool_t * pktp,void * p)875 int pktpool_start_trigger(pktpool_t *pktp, void *p)
876 {
877 uint32 cycles, i;
878
879 /* protect shared resource */
880 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
881 OSL_EXT_SUCCESS) {
882 return BCME_ERROR;
883 }
884
885 if (!PKTPOOL(OSH_NULL, p)) {
886 goto done;
887 }
888
889 OSL_GETCYCLES(cycles);
890
891 for (i = 0; i < pktp->dbg_qlen; i++) {
892 ASSERT(pktp->dbg_q[i].p != NULL);
893
894 if (pktp->dbg_q[i].p == p) {
895 pktp->dbg_q[i].cycles = cycles;
896 break;
897 }
898 }
899
900 done:
901 /* protect shared resource */
902 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
903 return BCME_ERROR;
904 }
905
906 return 0;
907 }
908
909 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
910
pktpool_stop_trigger(pktpool_t * pktp,void * p)911 int pktpool_stop_trigger(pktpool_t *pktp, void *p)
912 {
913 uint32 cycles, i;
914
915 /* protect shared resource */
916 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
917 OSL_EXT_SUCCESS) {
918 return BCME_ERROR;
919 }
920
921 if (!PKTPOOL(OSH_NULL, p)) {
922 goto done;
923 }
924
925 OSL_GETCYCLES(cycles);
926
927 for (i = 0; i < pktp->dbg_qlen; i++) {
928 ASSERT(pktp->dbg_q[i].p != NULL);
929
930 if (pktp->dbg_q[i].p == p) {
931 if (pktp->dbg_q[i].cycles == 0) {
932 break;
933 }
934
935 if (cycles >= pktp->dbg_q[i].cycles) {
936 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
937 } else {
938 pktp->dbg_q[i].dur =
939 (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
940 }
941
942 pktp->dbg_q[i].cycles = 0;
943 break;
944 }
945 }
946
947 done:
948 /* protect shared resource */
949 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
950 return BCME_ERROR;
951 }
952
953 return 0;
954 }
955 #endif /* BCMDBG_POOL */
956
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)957 int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
958 {
959 BCM_REFERENCE(osh);
960 ASSERT(pktp);
961
962 /* protect shared resource */
963 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
964 OSL_EXT_SUCCESS) {
965 return BCME_ERROR;
966 }
967
968 pktp->availcb_excl = NULL;
969
970 /* protect shared resource */
971 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
972 return BCME_ERROR;
973 }
974
975 return 0;
976 }
977
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)978 int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
979 {
980 int i;
981 int err;
982 BCM_REFERENCE(osh);
983
984 ASSERT(pktp);
985
986 /* protect shared resource */
987 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
988 OSL_EXT_SUCCESS) {
989 return BCME_ERROR;
990 }
991
992 ASSERT(pktp->availcb_excl == NULL);
993 for (i = 0; i < pktp->cbcnt; i++) {
994 if (cb == pktp->cbs[i].cb) {
995 pktp->availcb_excl = &pktp->cbs[i];
996 break;
997 }
998 }
999
1000 if (pktp->availcb_excl == NULL) {
1001 err = BCME_ERROR;
1002 } else {
1003 err = 0;
1004 }
1005
1006 /* protect shared resource */
1007 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
1008 return BCME_ERROR;
1009 }
1010
1011 return err;
1012 }
1013
pktpool_avail_notify(pktpool_t * pktp)1014 static void pktpool_avail_notify(pktpool_t *pktp)
1015 {
1016 int i, k, idx;
1017 int avail;
1018
1019 ASSERT(pktp);
1020 if (pktp->availcb_excl != NULL) {
1021 pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
1022 return;
1023 }
1024
1025 k = pktp->cbcnt - 1;
1026 for (i = 0; i < pktp->cbcnt; i++) {
1027 avail = pktp->avail;
1028
1029 if (avail) {
1030 if (pktp->cbtoggle) {
1031 idx = i;
1032 } else {
1033 idx = k--;
1034 }
1035
1036 ASSERT(pktp->cbs[idx].cb != NULL);
1037 pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
1038 }
1039 }
1040
1041 /* Alternate between filling from head or tail
1042 */
1043 pktp->cbtoggle ^= 1;
1044
1045 return;
1046 }
1047
1048 /** Gets an empty packet from the caller provided pool */
pktpool_get(pktpool_t * pktp)1049 void *pktpool_get(pktpool_t *pktp)
1050 {
1051 void *p;
1052
1053 /* protect shared resource */
1054 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
1055 OSL_EXT_SUCCESS) {
1056 return NULL;
1057 }
1058
1059 p = pktpool_deq(pktp);
1060 if (p == NULL) {
1061 /* Notify and try to reclaim tx pkts */
1062 if (pktp->ecbcnt) {
1063 pktpool_empty_notify(pktp);
1064 }
1065 p = pktpool_deq(pktp);
1066 if (p == NULL) {
1067 goto done;
1068 }
1069 }
1070
1071 done:
1072 /* protect shared resource */
1073 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
1074 return NULL;
1075 }
1076
1077 return p;
1078 }
1079
pktpool_free(pktpool_t * pktp,void * p)1080 void pktpool_free(pktpool_t *pktp, void *p)
1081 {
1082 /* protect shared resource */
1083 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
1084 OSL_EXT_SUCCESS) {
1085 return;
1086 }
1087
1088 ASSERT(p != NULL);
1089 #ifdef BCMDBG_POOL
1090
1091 #endif // endif
1092
1093 pktpool_enq(pktp, p);
1094
1095 /**
1096 * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
1097 * If any avail callback functions are registered, send a notification
1098 * that a new packet is available in the pool.
1099 */
1100 if (pktp->cbcnt) {
1101 /* To more efficiently use the cpu cycles, callbacks can be temporarily
1102 * disabled. This allows to feed on burst basis as opposed to
1103 * inefficient per-packet basis.
1104 */
1105 if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
1106 /**
1107 * If the call originated from pktpool_empty_notify, the just freed
1108 * packet is needed in pktpool_get. Therefore don't call
1109 * pktpool_avail_notify.
1110 */
1111 if (pktp->empty == FALSE) {
1112 pktpool_avail_notify(pktp);
1113 }
1114 } else {
1115 /**
1116 * The callback is temporarily disabled, log that a packet has been
1117 * freed.
1118 */
1119 pktp->emptycb_disable = EMPTYCB_SKIPPED;
1120 }
1121 }
1122
1123 /* protect shared resource */
1124 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
1125 return;
1126 }
1127 }
1128
1129 /** Adds a caller provided (empty) packet to the caller provided pool */
pktpool_add(pktpool_t * pktp,void * p)1130 int pktpool_add(pktpool_t *pktp, void *p)
1131 {
1132 int err = 0;
1133
1134 /* protect shared resource */
1135 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
1136 OSL_EXT_SUCCESS) {
1137 return BCME_ERROR;
1138 }
1139
1140 ASSERT(p != NULL);
1141
1142 if (pktp->n_pkts == pktp->maxlen) {
1143 err = BCME_RANGE;
1144 goto done;
1145 }
1146
1147 /* pkts in pool have same length */
1148 ASSERT(pktp->max_pkt_bytes == PKTLEN(OSH_NULL, p));
1149 PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
1150
1151 pktp->n_pkts++;
1152 pktpool_enq(pktp, p);
1153
1154 #ifdef BCMDBG_POOL
1155 pktp->dbg_q[pktp->dbg_qlen++].p = p;
1156 #endif // endif
1157
1158 done:
1159 /* protect shared resource */
1160 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
1161 return BCME_ERROR;
1162 }
1163
1164 return err;
1165 }
1166
1167 /**
1168 * Force pktpool_setmaxlen () into RAM as it uses a constant
1169 * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
1170 */
BCMRAMFN(pktpool_setmaxlen)1171 int BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
1172 {
1173 /* protect shared resource */
1174 if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) !=
1175 OSL_EXT_SUCCESS) {
1176 return BCME_ERROR;
1177 }
1178
1179 if (maxlen > PKTPOOL_LEN_MAX) {
1180 maxlen = PKTPOOL_LEN_MAX;
1181 }
1182
1183 /* if pool is already beyond maxlen, then just cap it
1184 * since we currently do not reduce the pool len
1185 * already allocated
1186 */
1187 pktp->maxlen = (pktp->n_pkts > maxlen) ? pktp->n_pkts : maxlen;
1188
1189 /* protect shared resource */
1190 if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS) {
1191 return BCME_ERROR;
1192 }
1193
1194 return pktp->maxlen;
1195 }
1196
pktpool_emptycb_disable(pktpool_t * pktp,bool disable)1197 void pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
1198 {
1199 ASSERT(pktp);
1200
1201 /**
1202 * To more efficiently use the cpu cycles, callbacks can be temporarily
1203 * disabled. If callback is going to be re-enabled, check if any packet got
1204 * freed and added back to the pool while callback was disabled.
1205 * When this is the case do the callback now, provided that callback
1206 * functions are registered and this call did not originate from
1207 * pktpool_empty_notify.
1208 */
1209 if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
1210 (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
1211 pktpool_avail_notify(pktp);
1212 }
1213
1214 /* Enable or temporarily disable callback when packet becomes available. */
1215 pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
1216 }
1217
pktpool_emptycb_disabled(pktpool_t * pktp)1218 bool pktpool_emptycb_disabled(pktpool_t *pktp)
1219 {
1220 ASSERT(pktp);
1221 return pktp->emptycb_disable != EMPTYCB_ENABLED;
1222 }
1223
1224 #ifdef BCMPKTPOOL
1225 #include <hnd_lbuf.h>
1226
1227 pktpool_t *pktpool_shared = NULL;
1228
1229 #ifdef BCMFRAGPOOL
1230 pktpool_t *pktpool_shared_lfrag = NULL;
1231 #ifdef BCMRESVFRAGPOOL
1232 pktpool_t *pktpool_resv_lfrag = NULL;
1233 struct resv_info *resv_pool_info = NULL;
1234 #endif /* BCMRESVFRAGPOOL */
1235 #endif /* BCMFRAGPOOL */
1236
1237 pktpool_t *pktpool_shared_rxlfrag = NULL;
1238
1239 static osl_t *pktpool_osh = NULL;
1240
1241 /**
1242 * Initializes several packet pools and allocates packets within those pools.
1243 */
hnd_pktpool_init(osl_t * osh)1244 int hnd_pktpool_init(osl_t *osh)
1245 {
1246 int err = BCME_OK;
1247 int n;
1248
1249 /* Construct a packet pool registry before initializing packet pools */
1250 n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1251 if (n != PKTPOOL_MAXIMUM_ID) {
1252 ASSERT(0);
1253 err = BCME_ERROR;
1254 goto error0;
1255 }
1256
1257 pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1258 if (pktpool_shared == NULL) {
1259 ASSERT(0);
1260 err = BCME_NOMEM;
1261 goto error1;
1262 }
1263
1264 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1265 pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1266 if (pktpool_shared_lfrag == NULL) {
1267 ASSERT(0);
1268 err = BCME_NOMEM;
1269 goto error2;
1270 }
1271 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1272 resv_pool_info = hnd_resv_pool_alloc(osh);
1273 if (resv_pool_info == NULL) {
1274 ASSERT(0);
1275 goto error2;
1276 }
1277 pktpool_resv_lfrag = resv_pool_info->pktp;
1278 if (pktpool_resv_lfrag == NULL) {
1279 ASSERT(0);
1280 goto error2;
1281 }
1282 #endif /* RESVFRAGPOOL */
1283 #endif /* FRAGPOOL */
1284
1285 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1286 pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1287 if (pktpool_shared_rxlfrag == NULL) {
1288 ASSERT(0);
1289 err = BCME_NOMEM;
1290 goto error3;
1291 }
1292 #endif // endif
1293
1294 /*
1295 * At this early stage, there's not enough memory to allocate all
1296 * requested pkts in the shared pool. Need to add to the pool
1297 * after reclaim
1298 *
1299 * n = NRXBUFPOST + SDPCMD_RXBUFS;
1300 *
1301 * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1302 * registry is not initialized or the registry is depleted.
1303 *
1304 * A BCME_NOMEM error only indicates that the requested number of packets
1305 * were not filled into the pool.
1306 */
1307 n = 1;
1308 MALLOC_SET_NOPERSIST(
1309 osh); /* Ensure subsequent allocations are non-persist */
1310 if ((err = pktpool_init(osh, pktpool_shared, &n, PKTBUFSZ, FALSE,
1311 lbuf_basic)) != BCME_OK) {
1312 ASSERT(0);
1313 goto error4;
1314 }
1315 pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1316
1317 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1318 n = 1;
1319 if ((err = pktpool_init(osh, pktpool_shared_lfrag, &n, PKTFRAGSZ, TRUE,
1320 lbuf_frag)) != BCME_OK) {
1321 ASSERT(0);
1322 goto error5;
1323 }
1324 pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1325 #if defined(BCMRESVFRAGPOOL) && !defined(BCMRESVFRAGPOOL_DISABLED)
1326 n = 0; /* IMPORTANT: DO NOT allocate any packets in resv pool */
1327 if (pktpool_init(osh, pktpool_resv_lfrag, &n, PKTFRAGSZ, TRUE, lbuf_frag) ==
1328 BCME_ERROR) {
1329 ASSERT(0);
1330 goto error5;
1331 }
1332 pktpool_setmaxlen(pktpool_resv_lfrag, RESV_FRAG_POOL_LEN);
1333 #endif /* RESVFRAGPOOL */
1334 #endif /* BCMFRAGPOOL */
1335 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1336 n = 1;
1337 if ((err = pktpool_init(osh, pktpool_shared_rxlfrag, &n, PKTRXFRAGSZ, TRUE,
1338 lbuf_rxfrag)) != BCME_OK) {
1339 ASSERT(0);
1340 goto error6;
1341 }
1342 pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1343 #endif // endif
1344
1345 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1346 /* Attach poolreorg module */
1347 if ((frwd_poolreorg_info = poolreorg_attach(osh,
1348 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1349 pktpool_shared_lfrag,
1350 #else
1351 NULL,
1352 #endif // endif
1353 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1354 pktpool_shared_rxlfrag,
1355 #else
1356 NULL,
1357 #endif // endif
1358 pktpool_shared)) == NULL) {
1359 ASSERT(0);
1360 goto error7;
1361 }
1362 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1363
1364 pktpool_osh = osh;
1365 MALLOC_CLEAR_NOPERSIST(osh);
1366
1367 return BCME_OK;
1368
1369 #if defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED)
1370 /* detach poolreorg module */
1371 poolreorg_detach(frwd_poolreorg_info);
1372 error7:
1373 #endif /* defined(BCMFRWDPOOLREORG) && !defined(BCMFRWDPOOLREORG_DISABLED) */
1374
1375 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1376 pktpool_deinit(osh, pktpool_shared_rxlfrag);
1377 error6:
1378 #endif // endif
1379
1380 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1381 pktpool_deinit(osh, pktpool_shared_lfrag);
1382 error5:
1383 #endif // endif
1384
1385 #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
1386 (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
1387 pktpool_deinit(osh, pktpool_shared);
1388 #endif // endif
1389
1390 error4:
1391 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1392 hnd_free(pktpool_shared_rxlfrag);
1393 pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1394 error3:
1395 #endif /* BCMRXFRAGPOOL */
1396
1397 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1398 hnd_free(pktpool_shared_lfrag);
1399 pktpool_shared_lfrag = (pktpool_t *)NULL;
1400 error2:
1401 #endif /* BCMFRAGPOOL */
1402
1403 hnd_free(pktpool_shared);
1404 pktpool_shared = (pktpool_t *)NULL;
1405
1406 error1:
1407 pktpool_dettach(osh);
1408 error0:
1409 MALLOC_CLEAR_NOPERSIST(osh);
1410 return err;
1411 } /* hnd_pktpool_init */
1412
1413 /** is called at each 'wl up' */
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1414 int hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1415 {
1416 return (pktpool_fill(pktpool_osh, pktpool, minimal));
1417 }
1418
1419 /** refills pktpools after reclaim, is called once */
hnd_pktpool_refill(bool minimal)1420 void hnd_pktpool_refill(bool minimal)
1421 {
1422 if (POOL_ENAB(pktpool_shared)) {
1423 #if defined(SRMEM)
1424 if (SRMEM_ENAB()) {
1425 int maxlen = pktpool_max_pkts(pktpool_shared);
1426 int n_pkts = pktpool_tot_pkts(pktpool_shared);
1427
1428 for (; n_pkts < maxlen; n_pkts++) {
1429 void *p;
1430 if ((p = PKTSRGET(pktpool_max_pkt_bytes(pktpool_shared))) ==
1431 NULL) {
1432 break;
1433 }
1434 pktpool_add(pktpool_shared, p);
1435 }
1436 }
1437 #endif /* SRMEM */
1438 pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1439 }
1440 /* fragpool reclaim */
1441 #ifdef BCMFRAGPOOL
1442 if (POOL_ENAB(pktpool_shared_lfrag)) {
1443 pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1444 }
1445 #endif /* BCMFRAGPOOL */
1446 /* rx fragpool reclaim */
1447 #ifdef BCMRXFRAGPOOL
1448 if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1449 pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1450 }
1451 #endif // endif
1452 #if defined(BCMFRAGPOOL) && defined(BCMRESVFRAGPOOL)
1453 if (POOL_ENAB(pktpool_resv_lfrag)) {
1454 int resv_size = (PKTFRAGSZ + LBUFFRAGSZ) * RESV_FRAG_POOL_LEN;
1455 hnd_resv_pool_init(resv_pool_info, resv_size);
1456 hnd_resv_pool_enable(resv_pool_info);
1457 }
1458 #endif /* BCMRESVFRAGPOOL */
1459 }
1460 #endif /* BCMPKTPOOL */
1461