• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * HND generic packet pool operation primitives
3  *
4  * Copyright (C) 1999-2017, Broadcom Corporation
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: hnd_pktpool.c 613891 2016-01-20 10:05:44Z $
28  */
29 
30 #include <typedefs.h>
31 #include <osl.h>
32 #include <osl_ext.h>
33 #include <bcmutils.h>
34 #include <hnd_pktpool.h>
35 
36 
37 /* mutex macros for thread safe */
38 #ifdef HND_PKTPOOL_THREAD_SAFE
39 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex)    osl_ext_mutex_create(name, mutex)
40 #define HND_PKTPOOL_MUTEX_DELETE(mutex)        osl_ext_mutex_delete(mutex)
41 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)    osl_ext_mutex_acquire(mutex, msec)
42 #define HND_PKTPOOL_MUTEX_RELEASE(mutex)    osl_ext_mutex_release(mutex)
43 #else
44 #define HND_PKTPOOL_MUTEX_CREATE(name, mutex)    OSL_EXT_SUCCESS
45 #define HND_PKTPOOL_MUTEX_DELETE(mutex)        OSL_EXT_SUCCESS
46 #define HND_PKTPOOL_MUTEX_ACQUIRE(mutex, msec)    OSL_EXT_SUCCESS
47 #define HND_PKTPOOL_MUTEX_RELEASE(mutex)    OSL_EXT_SUCCESS
48 #endif
49 
50 /* Registry size is one larger than max pools, as slot #0 is reserved */
51 #define PKTPOOLREG_RSVD_ID                (0U)
52 #define PKTPOOLREG_RSVD_PTR                (POOLPTR(0xdeaddead))
53 #define PKTPOOLREG_FREE_PTR                (POOLPTR(NULL))
54 
55 #define PKTPOOL_REGISTRY_SET(id, pp)    (pktpool_registry_set((id), (pp)))
56 #define PKTPOOL_REGISTRY_CMP(id, pp)    (pktpool_registry_cmp((id), (pp)))
57 
58 /* Tag a registry entry as free for use */
59 #define PKTPOOL_REGISTRY_CLR(id)        \
60         PKTPOOL_REGISTRY_SET((id), PKTPOOLREG_FREE_PTR)
61 #define PKTPOOL_REGISTRY_ISCLR(id)        \
62         (PKTPOOL_REGISTRY_CMP((id), PKTPOOLREG_FREE_PTR))
63 
64 /* Tag registry entry 0 as reserved */
65 #define PKTPOOL_REGISTRY_RSV()            \
66         PKTPOOL_REGISTRY_SET(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR)
67 #define PKTPOOL_REGISTRY_ISRSVD()        \
68         (PKTPOOL_REGISTRY_CMP(PKTPOOLREG_RSVD_ID, PKTPOOLREG_RSVD_PTR))
69 
70 /* Walk all un-reserved entries in registry */
71 #define PKTPOOL_REGISTRY_FOREACH(id)    \
72         for ((id) = 1U; (id) <= pktpools_max; (id)++)
73 
74 enum pktpool_empty_cb_state {
75     EMPTYCB_ENABLED = 0,    /* Enable callback when new packets are added to pool */
76     EMPTYCB_DISABLED,    /* Disable callback when new packets are added to pool */
77     EMPTYCB_SKIPPED        /* Packet was added to pool when callback was disabled */
78 };
79 
80 uint32 pktpools_max = 0U; /* maximum number of pools that may be initialized */
81 pktpool_t *pktpools_registry[PKTPOOL_MAXIMUM_ID + 1]; /* Pktpool registry */
82 
83 /* Register/Deregister a pktpool with registry during pktpool_init/deinit */
84 static int pktpool_register(pktpool_t * poolptr);
85 static int pktpool_deregister(pktpool_t * poolptr);
86 
87 /** add declaration */
88 static void pktpool_avail_notify(pktpool_t *pktp);
89 
90 /** accessor functions required when ROMming this file, forced into RAM */
91 
92 
93 pktpool_t *
BCMRAMFN(get_pktpools_registry)94 BCMRAMFN(get_pktpools_registry)(int id)
95 {
96     return pktpools_registry[id];
97 }
98 
99 static void
BCMRAMFN(pktpool_registry_set)100 BCMRAMFN(pktpool_registry_set)(int id, pktpool_t *pp)
101 {
102     pktpools_registry[id] = pp;
103 }
104 
105 static bool
BCMRAMFN(pktpool_registry_cmp)106 BCMRAMFN(pktpool_registry_cmp)(int id, pktpool_t *pp)
107 {
108     return pktpools_registry[id] == pp;
109 }
110 
111 /** Constructs a pool registry to serve a maximum of total_pools */
112 int
pktpool_attach(osl_t * osh,uint32 total_pools)113 pktpool_attach(osl_t *osh, uint32 total_pools)
114 {
115     uint32 poolid;
116     BCM_REFERENCE(osh);
117 
118     if (pktpools_max != 0U) {
119         return BCME_ERROR;
120     }
121 
122     ASSERT(total_pools <= PKTPOOL_MAXIMUM_ID);
123 
124     /* Initialize registry: reserve slot#0 and tag others as free */
125     PKTPOOL_REGISTRY_RSV();        /* reserve slot#0 */
126 
127     PKTPOOL_REGISTRY_FOREACH(poolid) {    /* tag all unreserved entries as free */
128         PKTPOOL_REGISTRY_CLR(poolid);
129     }
130 
131     pktpools_max = total_pools;
132 
133     return (int)pktpools_max;
134 }
135 
136 /** Destructs the pool registry. Ascertain all pools were first de-inited */
137 int
pktpool_dettach(osl_t * osh)138 pktpool_dettach(osl_t *osh)
139 {
140     uint32 poolid;
141     BCM_REFERENCE(osh);
142 
143     if (pktpools_max == 0U) {
144         return BCME_OK;
145     }
146 
147     /* Ascertain that no pools are still registered */
148     ASSERT(PKTPOOL_REGISTRY_ISRSVD()); /* assert reserved slot */
149 
150     PKTPOOL_REGISTRY_FOREACH(poolid) {    /* ascertain all others are free */
151         ASSERT(PKTPOOL_REGISTRY_ISCLR(poolid));
152     }
153 
154     pktpools_max = 0U; /* restore boot state */
155 
156     return BCME_OK;
157 }
158 
159 /** Registers a pool in a free slot; returns the registry slot index */
160 static int
pktpool_register(pktpool_t * poolptr)161 pktpool_register(pktpool_t * poolptr)
162 {
163     uint32 poolid;
164 
165     if (pktpools_max == 0U) {
166         return PKTPOOL_INVALID_ID; /* registry has not yet been constructed */
167     }
168 
169     ASSERT(pktpools_max != 0U);
170 
171     /* find an empty slot in pktpools_registry */
172     PKTPOOL_REGISTRY_FOREACH(poolid) {
173         if (PKTPOOL_REGISTRY_ISCLR(poolid)) {
174             PKTPOOL_REGISTRY_SET(poolid, POOLPTR(poolptr)); /* register pool */
175             return (int)poolid; /* return pool ID */
176         }
177     } /* FOREACH */
178 
179     return PKTPOOL_INVALID_ID;    /* error: registry is full */
180 }
181 
182 /** Deregisters a pktpool, given the pool pointer; tag slot as free */
183 static int
pktpool_deregister(pktpool_t * poolptr)184 pktpool_deregister(pktpool_t * poolptr)
185 {
186     uint32 poolid;
187 
188     ASSERT(POOLPTR(poolptr) != POOLPTR(NULL));
189 
190     poolid = POOLID(poolptr);
191     ASSERT(poolid <= pktpools_max);
192 
193     /* Asertain that a previously registered poolptr is being de-registered */
194     if (PKTPOOL_REGISTRY_CMP(poolid, POOLPTR(poolptr))) {
195         PKTPOOL_REGISTRY_CLR(poolid); /* mark as free */
196     } else {
197         ASSERT(0);
198         return BCME_ERROR; /* mismatch in registry */
199     }
200 
201     return BCME_OK;
202 }
203 
204 /**
205  * pktpool_init:
206  * User provides a pktpool_t structure and specifies the number of packets to
207  * be pre-filled into the pool (pplen).
208  * pktpool_init first attempts to register the pool and fetch a unique poolid.
209  * If registration fails, it is considered an BCME_ERR, caused by either the
210  * registry was not pre-created (pktpool_attach) or the registry is full.
211  * If registration succeeds, then the requested number of packets will be filled
212  * into the pool as part of initialization. In the event that there is no
213  * available memory to service the request, then BCME_NOMEM will be returned
214  * along with the count of how many packets were successfully allocated.
215  * In dongle builds, prior to memory reclaimation, one should limit the number
216  * of packets to be allocated during pktpool_init and fill the pool up after
217  * reclaim stage.
218  *
219  * @param pplen  Number of packets to be pre-filled into the pool
220  * @param plen   The size of all packets in a pool must be the same, [bytes] units. E.g. PKTBUFSZ.
221  * @param type   e.g. 'lbuf_frag'
222  */
223 int
pktpool_init(osl_t * osh,pktpool_t * pktp,int * pplen,int plen,bool istx,uint8 type)224 pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx, uint8 type)
225 {
226     int i, err = BCME_OK;
227     int pktplen;
228     uint8 pktp_id;
229 
230     ASSERT(pktp != NULL);
231     ASSERT(osh != NULL);
232     ASSERT(pplen != NULL);
233 
234     pktplen = *pplen;
235 
236     bzero(pktp, sizeof(pktpool_t));
237 
238     /* assign a unique pktpool id */
239     if ((pktp_id = (uint8) pktpool_register(pktp)) == PKTPOOL_INVALID_ID) {
240         return BCME_ERROR;
241     }
242     POOLSETID(pktp, pktp_id);
243 
244     pktp->inited = TRUE;
245     pktp->istx = istx ? TRUE : FALSE;
246     pktp->plen = (uint16)plen;
247     pktp->type = type;
248 
249     if (HND_PKTPOOL_MUTEX_CREATE("pktpool", &pktp->mutex) != OSL_EXT_SUCCESS) {
250         return BCME_ERROR;
251     }
252 
253     pktp->maxlen = PKTPOOL_LEN_MAX;
254     pktplen = LIMIT_TO_MAX(pktplen, pktp->maxlen);
255 
256     for (i = 0; i < pktplen; i++) {
257         void *p;
258         p = PKTGET(osh, plen, TRUE);
259 
260         if (p == NULL) {
261             /* Not able to allocate all requested pkts
262              * so just return what was actually allocated
263              * We can add to the pool later
264              */
265             if (pktp->freelist == NULL) /* pktpool free list is empty */
266                 err = BCME_NOMEM;
267 
268             goto exit;
269         }
270 
271         PKTSETPOOL(osh, p, TRUE, pktp); /* Tag packet with pool ID */
272 
273         PKTSETFREELIST(p, pktp->freelist); /* insert p at head of free list */
274         pktp->freelist = p;
275 
276         pktp->avail++;
277 
278 #ifdef BCMDBG_POOL
279         pktp->dbg_q[pktp->dbg_qlen++].p = p;
280 #endif
281     }
282 
283 exit:
284     pktp->len = pktp->avail;
285 
286     *pplen = pktp->len; /* number of packets managed by pool */
287     return err;
288 } /* pktpool_init */
289 
290 /**
291  * pktpool_deinit:
292  * Prior to freeing a pktpool, all packets must be first freed into the pktpool.
293  * Upon pktpool_deinit, all packets in the free pool will be freed to the heap.
294  * An assert is in place to ensure that there are no packets still lingering
295  * around. Packets freed to a pool after the deinit will cause a memory
296  * corruption as the pktpool_t structure no longer exists.
297  */
298 int
pktpool_deinit(osl_t * osh,pktpool_t * pktp)299 pktpool_deinit(osl_t *osh, pktpool_t *pktp)
300 {
301     uint16 freed = 0;
302 
303     ASSERT(osh != NULL);
304     ASSERT(pktp != NULL);
305 
306 #ifdef BCMDBG_POOL
307     {
308         int i;
309         for (i = 0; i <= pktp->len; i++) {
310             pktp->dbg_q[i].p = NULL;
311         }
312     }
313 #endif
314 
315     while (pktp->freelist != NULL) {
316         void * p = pktp->freelist;
317 
318         pktp->freelist = PKTFREELIST(p); /* unlink head packet from free list */
319         PKTSETFREELIST(p, NULL);
320 
321         PKTSETPOOL(osh, p, FALSE, NULL); /* clear pool ID tag in pkt */
322 
323         PKTFREE(osh, p, pktp->istx); /* free the packet */
324 
325         freed++;
326         ASSERT(freed <= pktp->len);
327     }
328 
329     pktp->avail -= freed;
330     ASSERT(pktp->avail == 0);
331 
332     pktp->len -= freed;
333 
334     pktpool_deregister(pktp); /* release previously acquired unique pool id */
335     POOLSETID(pktp, PKTPOOL_INVALID_ID);
336 
337     if (HND_PKTPOOL_MUTEX_DELETE(&pktp->mutex) != OSL_EXT_SUCCESS)
338         return BCME_ERROR;
339 
340     pktp->inited = FALSE;
341 
342     /* Are there still pending pkts? */
343     ASSERT(pktp->len == 0);
344 
345     return 0;
346 }
347 
348 int
pktpool_fill(osl_t * osh,pktpool_t * pktp,bool minimal)349 pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal)
350 {
351     void *p;
352     int err = 0;
353     int len, psize, maxlen;
354 
355     /* protect shared resource */
356     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
357         return BCME_ERROR;
358 
359     ASSERT(pktp->plen != 0);
360 
361     maxlen = pktp->maxlen;
362     psize = minimal ? (maxlen >> 2) : maxlen;
363     for (len = (int)pktp->len; len < psize; len++) {
364         p = PKTGET(osh, pktp->len, TRUE);
365 
366         if (p == NULL) {
367             err = BCME_NOMEM;
368             break;
369         }
370 
371         if (pktpool_add(pktp, p) != BCME_OK) {
372             PKTFREE(osh, p, FALSE);
373             err = BCME_ERROR;
374             break;
375         }
376     }
377 
378     /* protect shared resource */
379     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
380         return BCME_ERROR;
381 
382     if (pktp->cbcnt) {
383         if (pktp->empty == FALSE)
384             pktpool_avail_notify(pktp);
385     }
386 
387     return err;
388 }
389 
390 static void *
pktpool_deq(pktpool_t * pktp)391 pktpool_deq(pktpool_t *pktp)
392 {
393     void *p = NULL;
394 
395     if (pktp->avail == 0)
396         return NULL;
397 
398     ASSERT(pktp->freelist != NULL);
399 
400     p = pktp->freelist;  /* dequeue packet from head of pktpool free list */
401     pktp->freelist = PKTFREELIST(p); /* free list points to next packet */
402 
403 
404     PKTSETFREELIST(p, NULL);
405 
406     pktp->avail--;
407 
408     return p;
409 }
410 
411 static void
pktpool_enq(pktpool_t * pktp,void * p)412 pktpool_enq(pktpool_t *pktp, void *p)
413 {
414     ASSERT(p != NULL);
415 
416     PKTSETFREELIST(p, pktp->freelist); /* insert at head of pktpool free list */
417     pktp->freelist = p; /* free list points to newly inserted packet */
418 
419 
420     pktp->avail++;
421     ASSERT(pktp->avail <= pktp->len);
422 }
423 
424 /** utility for registering host addr fill function called from pciedev */
425 int
426 /* BCMATTACHFN */
427 (pktpool_hostaddr_fill_register)(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
428 {
429     ASSERT(cb != NULL);
430 
431     ASSERT(pktp->cbext.cb == NULL);
432     pktp->cbext.cb = cb;
433     pktp->cbext.arg = arg;
434     return 0;
435 }
436 
437 int
pktpool_rxcplid_fill_register(pktpool_t * pktp,pktpool_cb_extn_t cb,void * arg)438 pktpool_rxcplid_fill_register(pktpool_t *pktp, pktpool_cb_extn_t cb, void *arg)
439 {
440     ASSERT(cb != NULL);
441 
442     if (pktp == NULL)
443         return BCME_ERROR;
444     ASSERT(pktp->rxcplidfn.cb == NULL);
445     pktp->rxcplidfn.cb = cb;
446     pktp->rxcplidfn.arg = arg;
447     return 0;
448 }
449 
450 /** whenever host posts rxbuffer, invoke dma_rxfill from pciedev layer */
451 void
pktpool_invoke_dmarxfill(pktpool_t * pktp)452 pktpool_invoke_dmarxfill(pktpool_t *pktp)
453 {
454     ASSERT(pktp->dmarxfill.cb);
455     ASSERT(pktp->dmarxfill.arg);
456 
457     if (pktp->dmarxfill.cb)
458         pktp->dmarxfill.cb(pktp, pktp->dmarxfill.arg);
459 }
460 
461 /** Registers callback functions for split rx mode */
462 int
pkpool_haddr_avail_register_cb(pktpool_t * pktp,pktpool_cb_t cb,void * arg)463 pkpool_haddr_avail_register_cb(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
464 {
465     ASSERT(cb != NULL);
466 
467     pktp->dmarxfill.cb = cb;
468     pktp->dmarxfill.arg = arg;
469 
470     return 0;
471 }
472 
473 /**
474  * Registers callback functions.
475  * No BCMATTACHFN as it is used in xdc_enable_ep which is not an attach function
476  */
477 int
pktpool_avail_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)478 pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
479 {
480     int err = 0;
481     int i;
482 
483     /* protect shared resource */
484     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
485         return BCME_ERROR;
486 
487     ASSERT(cb != NULL);
488 
489     i = pktp->cbcnt;
490     if (i == PKTPOOL_CB_MAX_AVL) {
491         err = BCME_ERROR;
492         goto done;
493     }
494 
495     ASSERT(pktp->cbs[i].cb == NULL);
496     pktp->cbs[i].cb = cb;
497     pktp->cbs[i].arg = arg;
498     pktp->cbcnt++;
499 
500 done:
501     /* protect shared resource */
502     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
503         return BCME_ERROR;
504 
505     return err;
506 }
507 
508 /** Registers callback functions */
509 int
pktpool_empty_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)510 pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
511 {
512     int err = 0;
513     int i;
514 
515     /* protect shared resource */
516     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
517         return BCME_ERROR;
518 
519     ASSERT(cb != NULL);
520 
521     i = pktp->ecbcnt;
522     if (i == PKTPOOL_CB_MAX) {
523         err = BCME_ERROR;
524         goto done;
525     }
526 
527     ASSERT(pktp->ecbs[i].cb == NULL);
528     pktp->ecbs[i].cb = cb;
529     pktp->ecbs[i].arg = arg;
530     pktp->ecbcnt++;
531 
532 done:
533     /* protect shared resource */
534     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
535         return BCME_ERROR;
536 
537     return err;
538 }
539 
540 /** Calls registered callback functions */
541 static int
pktpool_empty_notify(pktpool_t * pktp)542 pktpool_empty_notify(pktpool_t *pktp)
543 {
544     int i;
545 
546     pktp->empty = TRUE;
547     for (i = 0; i < pktp->ecbcnt; i++) {
548         ASSERT(pktp->ecbs[i].cb != NULL);
549         pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg);
550     }
551     pktp->empty = FALSE;
552 
553     return 0;
554 }
555 
556 #ifdef BCMDBG_POOL
557 int
pktpool_dbg_register(pktpool_t * pktp,pktpool_cb_t cb,void * arg)558 pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg)
559 {
560     int err = 0;
561     int i;
562 
563     /* protect shared resource */
564     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
565         return BCME_ERROR;
566 
567     ASSERT(cb);
568 
569     i = pktp->dbg_cbcnt;
570     if (i == PKTPOOL_CB_MAX) {
571         err = BCME_ERROR;
572         goto done;
573     }
574 
575     ASSERT(pktp->dbg_cbs[i].cb == NULL);
576     pktp->dbg_cbs[i].cb = cb;
577     pktp->dbg_cbs[i].arg = arg;
578     pktp->dbg_cbcnt++;
579 
580 done:
581     /* protect shared resource */
582     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
583         return BCME_ERROR;
584 
585     return err;
586 }
587 
588 int pktpool_dbg_notify(pktpool_t *pktp);
589 
590 int
pktpool_dbg_notify(pktpool_t * pktp)591 pktpool_dbg_notify(pktpool_t *pktp)
592 {
593     int i;
594 
595     /* protect shared resource */
596     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
597         return BCME_ERROR;
598 
599     for (i = 0; i < pktp->dbg_cbcnt; i++) {
600         ASSERT(pktp->dbg_cbs[i].cb);
601         pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg);
602     }
603 
604     /* protect shared resource */
605     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
606         return BCME_ERROR;
607 
608     return 0;
609 }
610 
611 int
pktpool_dbg_dump(pktpool_t * pktp)612 pktpool_dbg_dump(pktpool_t *pktp)
613 {
614     int i;
615 
616     /* protect shared resource */
617     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
618         return BCME_ERROR;
619 
620     printf("pool len=%d maxlen=%d\n",  pktp->dbg_qlen, pktp->maxlen);
621     for (i = 0; i < pktp->dbg_qlen; i++) {
622         ASSERT(pktp->dbg_q[i].p);
623         printf("%d, p: 0x%x dur:%lu us state:%d\n", i,
624             pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p));
625     }
626 
627     /* protect shared resource */
628     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
629         return BCME_ERROR;
630 
631     return 0;
632 }
633 
634 int
pktpool_stats_dump(pktpool_t * pktp,pktpool_stats_t * stats)635 pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats)
636 {
637     int i;
638     int state;
639 
640     /* protect shared resource */
641     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
642         return BCME_ERROR;
643 
644     bzero(stats, sizeof(pktpool_stats_t));
645     for (i = 0; i < pktp->dbg_qlen; i++) {
646         ASSERT(pktp->dbg_q[i].p != NULL);
647 
648         state = PKTPOOLSTATE(pktp->dbg_q[i].p);
649         switch (state) {
650             case POOL_TXENQ:
651                 stats->enq++; break;
652             case POOL_TXDH:
653                 stats->txdh++; break;
654             case POOL_TXD11:
655                 stats->txd11++; break;
656             case POOL_RXDH:
657                 stats->rxdh++; break;
658             case POOL_RXD11:
659                 stats->rxd11++; break;
660             case POOL_RXFILL:
661                 stats->rxfill++; break;
662             case POOL_IDLE:
663                 stats->idle++; break;
664         }
665     }
666 
667     /* protect shared resource */
668     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
669         return BCME_ERROR;
670 
671     return 0;
672 }
673 
674 int
pktpool_start_trigger(pktpool_t * pktp,void * p)675 pktpool_start_trigger(pktpool_t *pktp, void *p)
676 {
677     uint32 cycles, i;
678 
679     /* protect shared resource */
680     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
681         return BCME_ERROR;
682 
683     if (!PKTPOOL(OSH_NULL, p))
684         goto done;
685 
686     OSL_GETCYCLES(cycles);
687 
688     for (i = 0; i < pktp->dbg_qlen; i++) {
689         ASSERT(pktp->dbg_q[i].p != NULL);
690 
691         if (pktp->dbg_q[i].p == p) {
692             pktp->dbg_q[i].cycles = cycles;
693             break;
694         }
695     }
696 
697 done:
698     /* protect shared resource */
699     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
700         return BCME_ERROR;
701 
702     return 0;
703 }
704 
705 int pktpool_stop_trigger(pktpool_t *pktp, void *p);
706 
707 int
pktpool_stop_trigger(pktpool_t * pktp,void * p)708 pktpool_stop_trigger(pktpool_t *pktp, void *p)
709 {
710     uint32 cycles, i;
711 
712     /* protect shared resource */
713     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
714         return BCME_ERROR;
715 
716     if (!PKTPOOL(OSH_NULL, p))
717         goto done;
718 
719     OSL_GETCYCLES(cycles);
720 
721     for (i = 0; i < pktp->dbg_qlen; i++) {
722         ASSERT(pktp->dbg_q[i].p != NULL);
723 
724         if (pktp->dbg_q[i].p == p) {
725             if (pktp->dbg_q[i].cycles == 0)
726                 break;
727 
728             if (cycles >= pktp->dbg_q[i].cycles)
729                 pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles;
730             else
731                 pktp->dbg_q[i].dur =
732                     (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1;
733 
734             pktp->dbg_q[i].cycles = 0;
735             break;
736         }
737     }
738 
739 done:
740     /* protect shared resource */
741     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
742         return BCME_ERROR;
743 
744     return 0;
745 }
746 #endif /* BCMDBG_POOL */
747 
748 int
pktpool_avail_notify_normal(osl_t * osh,pktpool_t * pktp)749 pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp)
750 {
751     BCM_REFERENCE(osh);
752     ASSERT(pktp);
753 
754     /* protect shared resource */
755     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
756         return BCME_ERROR;
757 
758     pktp->availcb_excl = NULL;
759 
760     /* protect shared resource */
761     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
762         return BCME_ERROR;
763 
764     return 0;
765 }
766 
767 int
pktpool_avail_notify_exclusive(osl_t * osh,pktpool_t * pktp,pktpool_cb_t cb)768 pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb)
769 {
770     int i;
771     int err;
772     BCM_REFERENCE(osh);
773 
774     ASSERT(pktp);
775 
776     /* protect shared resource */
777     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
778         return BCME_ERROR;
779 
780     ASSERT(pktp->availcb_excl == NULL);
781     for (i = 0; i < pktp->cbcnt; i++) {
782         if (cb == pktp->cbs[i].cb) {
783             pktp->availcb_excl = &pktp->cbs[i];
784             break;
785         }
786     }
787 
788     if (pktp->availcb_excl == NULL)
789         err = BCME_ERROR;
790     else
791         err = 0;
792 
793     /* protect shared resource */
794     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
795         return BCME_ERROR;
796 
797     return err;
798 }
799 
800 static void
pktpool_avail_notify(pktpool_t * pktp)801 pktpool_avail_notify(pktpool_t *pktp)
802 {
803     int i, k, idx;
804     int avail;
805 
806     ASSERT(pktp);
807     if (pktp->availcb_excl != NULL) {
808         pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg);
809         return;
810     }
811 
812     k = pktp->cbcnt - 1;
813     for (i = 0; i < pktp->cbcnt; i++) {
814         avail = pktp->avail;
815 
816         if (avail) {
817             if (pktp->cbtoggle)
818                 idx = i;
819             else
820                 idx = k--;
821 
822             ASSERT(pktp->cbs[idx].cb != NULL);
823             pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg);
824         }
825     }
826 
827     /* Alternate between filling from head or tail
828      */
829     pktp->cbtoggle ^= 1;
830 
831     return;
832 }
833 
834 /** Gets an empty packet from the caller provided pool */
835 void *
pktpool_get(pktpool_t * pktp)836 pktpool_get(pktpool_t *pktp)
837 {
838     void *p;
839 
840     /* protect shared resource */
841     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
842         return NULL;
843 
844 
845     p = pktpool_deq(pktp);
846 
847     if (p == NULL) {
848         /* Notify and try to reclaim tx pkts */
849         if (pktp->ecbcnt)
850             pktpool_empty_notify(pktp);
851 
852         p = pktpool_deq(pktp);
853         if (p == NULL)
854             goto done;
855     }
856 
857 
858 done:
859     /* protect shared resource */
860     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
861         return NULL;
862 
863     return p;
864 }
865 
866 void
pktpool_free(pktpool_t * pktp,void * p)867 pktpool_free(pktpool_t *pktp, void *p)
868 {
869     /* protect shared resource */
870     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
871         return;
872 
873     ASSERT(p != NULL);
874 #ifdef BCMDBG_POOL
875 #endif
876 
877     pktpool_enq(pktp, p);
878 
879     /**
880      * Feed critical DMA with freshly freed packets, to avoid DMA starvation.
881      * If any avail callback functions are registered, send a notification
882      * that a new packet is available in the pool.
883      */
884     if (pktp->cbcnt) {
885         /* To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
886          * This allows to feed on burst basis as opposed to inefficient per-packet basis.
887          */
888         if (pktp->emptycb_disable == EMPTYCB_ENABLED) {
889             /**
890              * If the call originated from pktpool_empty_notify, the just freed packet
891              * is needed in pktpool_get.
892              * Therefore don't call pktpool_avail_notify.
893              */
894             if (pktp->empty == FALSE)
895                 pktpool_avail_notify(pktp);
896         } else {
897             /**
898              * The callback is temporarily disabled, log that a packet has been freed.
899              */
900             pktp->emptycb_disable = EMPTYCB_SKIPPED;
901         }
902     }
903 
904     /* protect shared resource */
905     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
906         return;
907 }
908 
909 /** Adds a caller provided (empty) packet to the caller provided pool */
910 int
pktpool_add(pktpool_t * pktp,void * p)911 pktpool_add(pktpool_t *pktp, void *p)
912 {
913     int err = 0;
914 
915     /* protect shared resource */
916     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
917         return BCME_ERROR;
918 
919     ASSERT(p != NULL);
920 
921     if (pktp->len == pktp->maxlen) {
922         err = BCME_RANGE;
923         goto done;
924     }
925 
926     /* pkts in pool have same length */
927     ASSERT(pktp->plen == PKTLEN(OSH_NULL, p));
928     PKTSETPOOL(OSH_NULL, p, TRUE, pktp);
929 
930     pktp->len++;
931     pktpool_enq(pktp, p);
932 
933 #ifdef BCMDBG_POOL
934     pktp->dbg_q[pktp->dbg_qlen++].p = p;
935 #endif
936 
937 done:
938     /* protect shared resource */
939     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
940         return BCME_ERROR;
941 
942     return err;
943 }
944 
945 /**
946  * Force pktpool_setmaxlen () into RAM as it uses a constant
947  * (PKTPOOL_LEN_MAX) that may be changed post tapeout for ROM-based chips.
948  */
949 int
BCMRAMFN(pktpool_setmaxlen)950 BCMRAMFN(pktpool_setmaxlen)(pktpool_t *pktp, uint16 maxlen)
951 {
952     /* protect shared resource */
953     if (HND_PKTPOOL_MUTEX_ACQUIRE(&pktp->mutex, OSL_EXT_TIME_FOREVER) != OSL_EXT_SUCCESS)
954         return BCME_ERROR;
955 
956     if (maxlen > PKTPOOL_LEN_MAX)
957         maxlen = PKTPOOL_LEN_MAX;
958 
959     /* if pool is already beyond maxlen, then just cap it
960      * since we currently do not reduce the pool len
961      * already allocated
962      */
963     pktp->maxlen = (pktp->len > maxlen) ? pktp->len : maxlen;
964 
965     /* protect shared resource */
966     if (HND_PKTPOOL_MUTEX_RELEASE(&pktp->mutex) != OSL_EXT_SUCCESS)
967         return BCME_ERROR;
968 
969     return pktp->maxlen;
970 }
971 
972 void
pktpool_emptycb_disable(pktpool_t * pktp,bool disable)973 pktpool_emptycb_disable(pktpool_t *pktp, bool disable)
974 {
975     ASSERT(pktp);
976 
977     /**
978      * To more efficiently use the cpu cycles, callbacks can be temporarily disabled.
979      * If callback is going to be re-enabled, check if any packet got
980      * freed and added back to the pool while callback was disabled.
981      * When this is the case do the callback now, provided that callback functions
982      * are registered and this call did not originate from pktpool_empty_notify.
983      */
984     if ((!disable) && (pktp->cbcnt) && (pktp->empty == FALSE) &&
985         (pktp->emptycb_disable == EMPTYCB_SKIPPED)) {
986             pktpool_avail_notify(pktp);
987     }
988 
989     /* Enable or temporarily disable callback when packet becomes available. */
990     pktp->emptycb_disable = disable ? EMPTYCB_DISABLED : EMPTYCB_ENABLED;
991 }
992 
993 bool
pktpool_emptycb_disabled(pktpool_t * pktp)994 pktpool_emptycb_disabled(pktpool_t *pktp)
995 {
996     ASSERT(pktp);
997     return pktp->emptycb_disable != EMPTYCB_ENABLED;
998 }
999 
1000 #ifdef BCMPKTPOOL
1001 #include <hnd_lbuf.h>
1002 
1003 pktpool_t *pktpool_shared = NULL;
1004 
1005 #ifdef BCMFRAGPOOL
1006 pktpool_t *pktpool_shared_lfrag = NULL;
1007 #endif /* BCMFRAGPOOL */
1008 
1009 pktpool_t *pktpool_shared_rxlfrag = NULL;
1010 
1011 static osl_t *pktpool_osh = NULL;
1012 
1013 /**
1014  * Initializes several packet pools and allocates packets within those pools.
1015  */
1016 int
hnd_pktpool_init(osl_t * osh)1017 hnd_pktpool_init(osl_t *osh)
1018 {
1019     int err;
1020     int n;
1021 
1022     /* Construct a packet pool registry before initializing packet pools */
1023     n = pktpool_attach(osh, PKTPOOL_MAXIMUM_ID);
1024     if (n != PKTPOOL_MAXIMUM_ID) {
1025         ASSERT(0);
1026         err = BCME_ERROR;
1027         goto error0;
1028     }
1029 
1030     pktpool_shared = MALLOCZ(osh, sizeof(pktpool_t));
1031     if (pktpool_shared == NULL) {
1032         ASSERT(0);
1033         err = BCME_NOMEM;
1034         goto error1;
1035     }
1036 
1037 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1038     pktpool_shared_lfrag = MALLOCZ(osh, sizeof(pktpool_t));
1039     if (pktpool_shared_lfrag == NULL) {
1040         ASSERT(0);
1041         err = BCME_NOMEM;
1042         goto error2;
1043     }
1044 #endif
1045 
1046 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1047     pktpool_shared_rxlfrag = MALLOCZ(osh, sizeof(pktpool_t));
1048     if (pktpool_shared_rxlfrag == NULL) {
1049         ASSERT(0);
1050         err = BCME_NOMEM;
1051         goto error3;
1052     }
1053 #endif
1054 
1055 
1056     /*
1057      * At this early stage, there's not enough memory to allocate all
1058      * requested pkts in the shared pool.  Need to add to the pool
1059      * after reclaim
1060      *
1061      * n = NRXBUFPOST + SDPCMD_RXBUFS;
1062      *
1063      * Initialization of packet pools may fail (BCME_ERROR), if the packet pool
1064      * registry is not initialized or the registry is depleted.
1065      *
1066      * A BCME_NOMEM error only indicates that the requested number of packets
1067      * were not filled into the pool.
1068      */
1069     n = 1;
1070     if ((err = pktpool_init(osh, pktpool_shared,
1071                             &n, PKTBUFSZ, FALSE, lbuf_basic)) != BCME_OK) {
1072         ASSERT(0);
1073         goto error4;
1074     }
1075     pktpool_setmaxlen(pktpool_shared, SHARED_POOL_LEN);
1076 
1077 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1078     n = 1;
1079     if ((err = pktpool_init(osh, pktpool_shared_lfrag,
1080                             &n, PKTFRAGSZ, TRUE, lbuf_frag)) != BCME_OK) {
1081         ASSERT(0);
1082         goto error5;
1083     }
1084     pktpool_setmaxlen(pktpool_shared_lfrag, SHARED_FRAG_POOL_LEN);
1085 #endif
1086 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1087     n = 1;
1088     if ((err = pktpool_init(osh, pktpool_shared_rxlfrag,
1089                             &n, PKTRXFRAGSZ, TRUE, lbuf_rxfrag)) != BCME_OK) {
1090         ASSERT(0);
1091         goto error6;
1092     }
1093     pktpool_setmaxlen(pktpool_shared_rxlfrag, SHARED_RXFRAG_POOL_LEN);
1094 #endif
1095 
1096     pktpool_osh = osh;
1097 
1098     return BCME_OK;
1099 
1100 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1101 error6:
1102 #endif
1103 
1104 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1105     pktpool_deinit(osh, pktpool_shared_lfrag);
1106 error5:
1107 #endif
1108 
1109 #if (defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)) || \
1110     (defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED))
1111     pktpool_deinit(osh, pktpool_shared);
1112 #endif
1113 
1114 error4:
1115 #if defined(BCMRXFRAGPOOL) && !defined(BCMRXFRAGPOOL_DISABLED)
1116     hnd_free(pktpool_shared_rxlfrag);
1117     pktpool_shared_rxlfrag = (pktpool_t *)NULL;
1118 error3:
1119 #endif /* BCMRXFRAGPOOL */
1120 
1121 #if defined(BCMFRAGPOOL) && !defined(BCMFRAGPOOL_DISABLED)
1122     hnd_free(pktpool_shared_lfrag);
1123     pktpool_shared_lfrag = (pktpool_t *)NULL;
1124 error2:
1125 #endif /* BCMFRAGPOOL */
1126 
1127     hnd_free(pktpool_shared);
1128     pktpool_shared = (pktpool_t *)NULL;
1129 
1130 error1:
1131     pktpool_dettach(osh);
1132 error0:
1133     return err;
1134 } /* hnd_pktpool_init */
1135 
1136 int
hnd_pktpool_fill(pktpool_t * pktpool,bool minimal)1137 hnd_pktpool_fill(pktpool_t *pktpool, bool minimal)
1138 {
1139     return (pktpool_fill(pktpool_osh, pktpool, minimal));
1140 }
1141 
1142 /** refills pktpools after reclaim */
1143 void
hnd_pktpool_refill(bool minimal)1144 hnd_pktpool_refill(bool minimal)
1145 {
1146     if (POOL_ENAB(pktpool_shared)) {
1147         pktpool_fill(pktpool_osh, pktpool_shared, minimal);
1148     }
1149 /* fragpool reclaim */
1150 #ifdef BCMFRAGPOOL
1151     if (POOL_ENAB(pktpool_shared_lfrag)) {
1152 #if defined(SRMEM)
1153         if (SRMEM_ENAB()) {
1154             int maxlen = pktpool_maxlen(pktpool_shared);
1155             int len = pktpool_len(pktpool_shared);
1156 
1157             for (; len < maxlen; len++)    {
1158                 void *p;
1159                 if ((p = PKTSRGET(pktpool_plen(pktpool_shared))) == NULL)
1160                     break;
1161                 pktpool_add(pktpool_shared, p);
1162             }
1163         }
1164 #endif /* SRMEM */
1165         pktpool_fill(pktpool_osh, pktpool_shared_lfrag, minimal);
1166     }
1167 #endif /* BCMFRAGPOOL */
1168 /* rx fragpool reclaim */
1169 #ifdef BCMRXFRAGPOOL
1170     if (POOL_ENAB(pktpool_shared_rxlfrag)) {
1171         pktpool_fill(pktpool_osh, pktpool_shared_rxlfrag, minimal);
1172     }
1173 #endif
1174 }
1175 #endif /* BCMPKTPOOL */
1176