1 /*
2 * Linux OS Independent Layer
3 *
4 * Copyright (C) 1999-2013, Broadcom Corporation
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
23 *
24 * $Id: linux_osl.c 412994 2013-07-17 12:38:03Z $
25 */
26
27 #define LINUX_PORT
28
29 #include <typedefs.h>
30 #include <bcmendian.h>
31 #include <linuxver.h>
32 #include <bcmdefs.h>
33 #include <osl.h>
34 #include <bcmutils.h>
35 #include <linux/delay.h>
36 #include <pcicfg.h>
37
38
39
40 #include <linux/fs.h>
41
42 #define PCI_CFG_RETRY 10
43
44 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
45 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
46
47 #ifdef CONFIG_DHD_USE_STATIC_BUF
48 #define DHD_SKB_HDRSIZE 336
49 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
50 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
51 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
52
53 #define STATIC_BUF_MAX_NUM 16
54 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
55 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
56
57 typedef struct bcm_static_buf {
58 struct semaphore static_sem;
59 unsigned char *buf_ptr;
60 unsigned char buf_use[STATIC_BUF_MAX_NUM];
61 } bcm_static_buf_t;
62
63 static bcm_static_buf_t *bcm_static_buf = 0;
64
65 #define STATIC_PKT_MAX_NUM 8
66 #if defined(ENHANCED_STATIC_BUF)
67 #define STATIC_PKT_4PAGE_NUM 1
68 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_4PAGE_BUFSIZE
69 #else
70 #define STATIC_PKT_4PAGE_NUM 0
71 #define DHD_SKB_MAX_BUFSIZE DHD_SKB_2PAGE_BUFSIZE
72 #endif /* ENHANCED_STATIC_BUF */
73
74 typedef struct bcm_static_pkt {
75 struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
76 struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
77 #ifdef ENHANCED_STATIC_BUF
78 struct sk_buff *skb_16k;
79 #endif
80 struct semaphore osl_pkt_sem;
81 unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM];
82 } bcm_static_pkt_t;
83
84 static bcm_static_pkt_t *bcm_static_skb = 0;
85 #endif /* CONFIG_DHD_USE_STATIC_BUF */
86
87 typedef struct bcm_mem_link {
88 struct bcm_mem_link *prev;
89 struct bcm_mem_link *next;
90 uint size;
91 int line;
92 void *osh;
93 char file[BCM_MEM_FILENAME_LEN];
94 } bcm_mem_link_t;
95
96 struct osl_info {
97 osl_pubinfo_t pub;
98 #ifdef CTFPOOL
99 ctfpool_t *ctfpool;
100 #endif /* CTFPOOL */
101 uint magic;
102 void *pdev;
103 atomic_t malloced;
104 atomic_t pktalloced; /* Number of allocated packet buffers */
105 uint failed;
106 uint bustype;
107 bcm_mem_link_t *dbgmem_list;
108 spinlock_t dbgmem_lock;
109 #ifdef BCMDBG_CTRACE
110 spinlock_t ctrace_lock;
111 struct list_head ctrace_list;
112 int ctrace_num;
113 #endif /* BCMDBG_CTRACE */
114 spinlock_t pktalloc_lock;
115 };
116
117 #define OSL_PKTTAG_CLEAR(p) \
118 do { \
119 struct sk_buff *s = (struct sk_buff *)(p); \
120 ASSERT(OSL_PKTTAG_SZ == 32); \
121 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
122 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
123 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
124 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
125 } while (0)
126
127 /* PCMCIA attribute space access macros */
128
129 /* Global ASSERT type flag */
130 uint32 g_assert_type = FALSE;
131
132 static int16 linuxbcmerrormap[] =
133 { 0, /* 0 */
134 -EINVAL, /* BCME_ERROR */
135 -EINVAL, /* BCME_BADARG */
136 -EINVAL, /* BCME_BADOPTION */
137 -EINVAL, /* BCME_NOTUP */
138 -EINVAL, /* BCME_NOTDOWN */
139 -EINVAL, /* BCME_NOTAP */
140 -EINVAL, /* BCME_NOTSTA */
141 -EINVAL, /* BCME_BADKEYIDX */
142 -EINVAL, /* BCME_RADIOOFF */
143 -EINVAL, /* BCME_NOTBANDLOCKED */
144 -EINVAL, /* BCME_NOCLK */
145 -EINVAL, /* BCME_BADRATESET */
146 -EINVAL, /* BCME_BADBAND */
147 -E2BIG, /* BCME_BUFTOOSHORT */
148 -E2BIG, /* BCME_BUFTOOLONG */
149 -EBUSY, /* BCME_BUSY */
150 -EINVAL, /* BCME_NOTASSOCIATED */
151 -EINVAL, /* BCME_BADSSIDLEN */
152 -EINVAL, /* BCME_OUTOFRANGECHAN */
153 -EINVAL, /* BCME_BADCHAN */
154 -EFAULT, /* BCME_BADADDR */
155 -ENOMEM, /* BCME_NORESOURCE */
156 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
157 -EMSGSIZE, /* BCME_BADLENGTH */
158 -EINVAL, /* BCME_NOTREADY */
159 -EPERM, /* BCME_EPERM */
160 -ENOMEM, /* BCME_NOMEM */
161 -EINVAL, /* BCME_ASSOCIATED */
162 -ERANGE, /* BCME_RANGE */
163 -EINVAL, /* BCME_NOTFOUND */
164 -EINVAL, /* BCME_WME_NOT_ENABLED */
165 -EINVAL, /* BCME_TSPEC_NOTFOUND */
166 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
167 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
168 -EIO, /* BCME_SDIO_ERROR */
169 -ENODEV, /* BCME_DONGLE_DOWN */
170 -EINVAL, /* BCME_VERSION */
171 -EIO, /* BCME_TXFAIL */
172 -EIO, /* BCME_RXFAIL */
173 -ENODEV, /* BCME_NODEVICE */
174 -EINVAL, /* BCME_NMODE_DISABLED */
175 -ENODATA, /* BCME_NONRESIDENT */
176 -EINVAL, /* BCME_SCANREJECT */
177 -EINVAL, /* BCME_USAGE_ERROR */
178 -EIO, /* BCME_IOCTL_ERROR */
179 -EIO, /* BCME_SERIAL_PORT_ERR */
180
181 /* When an new error code is added to bcmutils.h, add os
182 * specific error translation here as well
183 */
184 };
185
186 /* translate bcmerrors into linux errors */
187 int
osl_error(int bcmerror)188 osl_error(int bcmerror)
189 {
190 if (bcmerror > 0)
191 bcmerror = 0;
192 else if (bcmerror < BCME_LAST)
193 bcmerror = BCME_ERROR;
194
195 /* Array bounds covered by ASSERT in osl_attach */
196 return linuxbcmerrormap[-bcmerror];
197 }
198
199 extern uint8* dhd_os_prealloc(void *osh, int section, int size);
200
201 osl_t *
osl_attach(void * pdev,uint bustype,bool pkttag)202 osl_attach(void *pdev, uint bustype, bool pkttag)
203 {
204 osl_t *osh;
205 gfp_t flags;
206
207 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
208 flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
209 #else
210 flags = GFP_ATOMIC;
211 #endif
212 if (!(osh = kmalloc(sizeof(osl_t), flags)))
213 return osh;
214
215 ASSERT(osh);
216
217 bzero(osh, sizeof(osl_t));
218
219 /* Check that error map has the right number of entries in it */
220 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
221
222 osh->magic = OS_HANDLE_MAGIC;
223 atomic_set(&osh->malloced, 0);
224 osh->failed = 0;
225 osh->dbgmem_list = NULL;
226 spin_lock_init(&(osh->dbgmem_lock));
227 osh->pdev = pdev;
228 osh->pub.pkttag = pkttag;
229 osh->bustype = bustype;
230
231 switch (bustype) {
232 case PCI_BUS:
233 case SI_BUS:
234 case PCMCIA_BUS:
235 osh->pub.mmbus = TRUE;
236 break;
237 case JTAG_BUS:
238 case SDIO_BUS:
239 case USB_BUS:
240 case SPI_BUS:
241 case RPC_BUS:
242 osh->pub.mmbus = FALSE;
243 break;
244 default:
245 ASSERT(FALSE);
246 break;
247 }
248
249 #if defined(CONFIG_DHD_USE_STATIC_BUF)
250 if (!bcm_static_buf) {
251 if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
252 STATIC_BUF_TOTAL_LEN))) {
253 printk("can not alloc static buf!\n");
254 bcm_static_skb = NULL;
255 ASSERT(osh->magic == OS_HANDLE_MAGIC);
256 kfree(osh);
257 return NULL;
258 }
259 else
260 printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
261
262
263 sema_init(&bcm_static_buf->static_sem, 1);
264
265 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
266 }
267
268 if (!bcm_static_skb) {
269 int i;
270 void *skb_buff_ptr = 0;
271 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
272 skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
273 if (!skb_buff_ptr) {
274 printk("cannot alloc static buf!\n");
275 bcm_static_buf = NULL;
276 bcm_static_skb = NULL;
277 ASSERT(osh->magic == OS_HANDLE_MAGIC);
278 kfree(osh);
279 return NULL;
280 }
281
282 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
283 (STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM));
284 for (i = 0; i < STATIC_PKT_MAX_NUM * 2 + STATIC_PKT_4PAGE_NUM; i++)
285 bcm_static_skb->pkt_use[i] = 0;
286
287 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
288 }
289 #endif /* CONFIG_DHD_USE_STATIC_BUF */
290
291 #ifdef BCMDBG_CTRACE
292 spin_lock_init(&osh->ctrace_lock);
293 INIT_LIST_HEAD(&osh->ctrace_list);
294 osh->ctrace_num = 0;
295 #endif /* BCMDBG_CTRACE */
296
297 spin_lock_init(&(osh->pktalloc_lock));
298
299 return osh;
300 }
301
302 void
osl_detach(osl_t * osh)303 osl_detach(osl_t *osh)
304 {
305 if (osh == NULL)
306 return;
307
308 #ifdef CONFIG_DHD_USE_STATIC_BUF
309 if (bcm_static_buf) {
310 bcm_static_buf = 0;
311 }
312 if (bcm_static_skb) {
313 bcm_static_skb = 0;
314 }
315 #endif
316
317 ASSERT(osh->magic == OS_HANDLE_MAGIC);
318 kfree(osh);
319 }
320
osl_alloc_skb(osl_t * osh,unsigned int len)321 static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
322 {
323 struct sk_buff *skb;
324 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
325 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
326
327 skb = __dev_alloc_skb(len, flags);
328 #else
329 skb = dev_alloc_skb(len);
330 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
331 return skb;
332 }
333
334 #ifdef CTFPOOL
335
336 #ifdef CTFPOOL_SPINLOCK
337 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
338 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
339 #else
340 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
341 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
342 #endif /* CTFPOOL_SPINLOCK */
343 /*
344 * Allocate and add an object to packet pool.
345 */
346 void *
osl_ctfpool_add(osl_t * osh)347 osl_ctfpool_add(osl_t *osh)
348 {
349 struct sk_buff *skb;
350 #ifdef CTFPOOL_SPINLOCK
351 unsigned long flags;
352 #endif /* CTFPOOL_SPINLOCK */
353
354 if ((osh == NULL) || (osh->ctfpool == NULL))
355 return NULL;
356
357 CTFPOOL_LOCK(osh->ctfpool, flags);
358 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
359
360 /* No need to allocate more objects */
361 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
362 CTFPOOL_UNLOCK(osh->ctfpool, flags);
363 return NULL;
364 }
365
366 /* Allocate a new skb and add it to the ctfpool */
367 skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
368 if (skb == NULL) {
369 printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
370 osh->ctfpool->obj_size);
371 CTFPOOL_UNLOCK(osh->ctfpool, flags);
372 return NULL;
373 }
374
375 /* Add to ctfpool */
376 skb->next = (struct sk_buff *)osh->ctfpool->head;
377 osh->ctfpool->head = skb;
378 osh->ctfpool->fast_frees++;
379 osh->ctfpool->curr_obj++;
380
381 /* Hijack a skb member to store ptr to ctfpool */
382 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
383
384 /* Use bit flag to indicate skb from fast ctfpool */
385 PKTFAST(osh, skb) = FASTBUF;
386
387 CTFPOOL_UNLOCK(osh->ctfpool, flags);
388
389 return skb;
390 }
391
392 /*
393 * Add new objects to the pool.
394 */
395 void
osl_ctfpool_replenish(osl_t * osh,uint thresh)396 osl_ctfpool_replenish(osl_t *osh, uint thresh)
397 {
398 if ((osh == NULL) || (osh->ctfpool == NULL))
399 return;
400
401 /* Do nothing if no refills are required */
402 while ((osh->ctfpool->refills > 0) && (thresh--)) {
403 osl_ctfpool_add(osh);
404 osh->ctfpool->refills--;
405 }
406 }
407
408 /*
409 * Initialize the packet pool with specified number of objects.
410 */
411 int32
osl_ctfpool_init(osl_t * osh,uint numobj,uint size)412 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
413 {
414 gfp_t flags;
415
416 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
417 flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
418 #else
419 flags = GFP_ATOMIC;
420 #endif
421 osh->ctfpool = kzalloc(sizeof(ctfpool_t), flags);
422 ASSERT(osh->ctfpool);
423
424 osh->ctfpool->max_obj = numobj;
425 osh->ctfpool->obj_size = size;
426
427 spin_lock_init(&osh->ctfpool->lock);
428
429 while (numobj--) {
430 if (!osl_ctfpool_add(osh))
431 return -1;
432 osh->ctfpool->fast_frees--;
433 }
434
435 return 0;
436 }
437
438 /*
439 * Cleanup the packet pool objects.
440 */
441 void
osl_ctfpool_cleanup(osl_t * osh)442 osl_ctfpool_cleanup(osl_t *osh)
443 {
444 struct sk_buff *skb, *nskb;
445 #ifdef CTFPOOL_SPINLOCK
446 unsigned long flags;
447 #endif /* CTFPOOL_SPINLOCK */
448
449 if ((osh == NULL) || (osh->ctfpool == NULL))
450 return;
451
452 CTFPOOL_LOCK(osh->ctfpool, flags);
453
454 skb = osh->ctfpool->head;
455
456 while (skb != NULL) {
457 nskb = skb->next;
458 dev_kfree_skb(skb);
459 skb = nskb;
460 osh->ctfpool->curr_obj--;
461 }
462
463 ASSERT(osh->ctfpool->curr_obj == 0);
464 osh->ctfpool->head = NULL;
465 CTFPOOL_UNLOCK(osh->ctfpool, flags);
466
467 kfree(osh->ctfpool);
468 osh->ctfpool = NULL;
469 }
470
471 void
osl_ctfpool_stats(osl_t * osh,void * b)472 osl_ctfpool_stats(osl_t *osh, void *b)
473 {
474 struct bcmstrbuf *bb;
475
476 if ((osh == NULL) || (osh->ctfpool == NULL))
477 return;
478
479 #ifdef CONFIG_DHD_USE_STATIC_BUF
480 if (bcm_static_buf) {
481 bcm_static_buf = 0;
482 }
483 if (bcm_static_skb) {
484 bcm_static_skb = 0;
485 }
486 #endif /* CONFIG_DHD_USE_STATIC_BUF */
487
488 bb = b;
489
490 ASSERT((osh != NULL) && (bb != NULL));
491
492 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
493 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
494 osh->ctfpool->curr_obj, osh->ctfpool->refills);
495 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
496 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
497 osh->ctfpool->slow_allocs);
498 }
499
500 static inline struct sk_buff *
osl_pktfastget(osl_t * osh,uint len)501 osl_pktfastget(osl_t *osh, uint len)
502 {
503 struct sk_buff *skb;
504 #ifdef CTFPOOL_SPINLOCK
505 unsigned long flags;
506 #endif /* CTFPOOL_SPINLOCK */
507
508 /* Try to do fast allocate. Return null if ctfpool is not in use
509 * or if there are no items in the ctfpool.
510 */
511 if (osh->ctfpool == NULL)
512 return NULL;
513
514 CTFPOOL_LOCK(osh->ctfpool, flags);
515 if (osh->ctfpool->head == NULL) {
516 ASSERT(osh->ctfpool->curr_obj == 0);
517 osh->ctfpool->slow_allocs++;
518 CTFPOOL_UNLOCK(osh->ctfpool, flags);
519 return NULL;
520 }
521
522 ASSERT(len <= osh->ctfpool->obj_size);
523
524 /* Get an object from ctfpool */
525 skb = (struct sk_buff *)osh->ctfpool->head;
526 osh->ctfpool->head = (void *)skb->next;
527
528 osh->ctfpool->fast_allocs++;
529 osh->ctfpool->curr_obj--;
530 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
531 CTFPOOL_UNLOCK(osh->ctfpool, flags);
532
533 /* Init skb struct */
534 skb->next = skb->prev = NULL;
535 #if defined(__ARM_ARCH_7A__)
536 skb->data = skb->head + NET_SKB_PAD;
537 skb->tail = skb->head + NET_SKB_PAD;
538 #else
539 skb->data = skb->head + 16;
540 skb->tail = skb->head + 16;
541 #endif /* __ARM_ARCH_7A__ */
542 skb->len = 0;
543 skb->cloned = 0;
544 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
545 skb->list = NULL;
546 #endif
547 atomic_set(&skb->users, 1);
548
549 PKTSETCLINK(skb, NULL);
550 PKTCCLRATTR(skb);
551
552 return skb;
553 }
554 #endif /* CTFPOOL */
555 /* Convert a driver packet to native(OS) packet
556 * In the process, packettag is zeroed out before sending up
557 * IP code depends on skb->cb to be setup correctly with various options
558 * In our case, that means it should be 0
559 */
560 struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t * osh,void * pkt)561 osl_pkt_tonative(osl_t *osh, void *pkt)
562 {
563 struct sk_buff *nskb;
564 #ifdef BCMDBG_CTRACE
565 struct sk_buff *nskb1, *nskb2;
566 #endif
567
568 if (osh->pub.pkttag)
569 OSL_PKTTAG_CLEAR(pkt);
570
571 /* Decrement the packet counter */
572 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
573 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
574
575 #ifdef BCMDBG_CTRACE
576 for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
577 if (PKTISCHAINED(nskb1)) {
578 nskb2 = PKTCLINK(nskb1);
579 }
580 else
581 nskb2 = NULL;
582
583 DEL_CTRACE(osh, nskb1);
584 }
585 #endif /* BCMDBG_CTRACE */
586 }
587 return (struct sk_buff *)pkt;
588 }
589
590 /* Convert a native(OS) packet to driver packet.
591 * In the process, native packet is destroyed, there is no copying
592 * Also, a packettag is zeroed out
593 */
594 #ifdef BCMDBG_CTRACE
595 void * BCMFASTPATH
osl_pkt_frmnative(osl_t * osh,void * pkt,int line,char * file)596 osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
597 #else
598 void * BCMFASTPATH
599 osl_pkt_frmnative(osl_t *osh, void *pkt)
600 #endif /* BCMDBG_CTRACE */
601 {
602 struct sk_buff *nskb;
603 #ifdef BCMDBG_CTRACE
604 struct sk_buff *nskb1, *nskb2;
605 #endif
606
607 if (osh->pub.pkttag)
608 OSL_PKTTAG_CLEAR(pkt);
609
610 /* Increment the packet counter */
611 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
612 atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
613
614 #ifdef BCMDBG_CTRACE
615 for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
616 if (PKTISCHAINED(nskb1)) {
617 nskb2 = PKTCLINK(nskb1);
618 }
619 else
620 nskb2 = NULL;
621
622 ADD_CTRACE(osh, nskb1, file, line);
623 }
624 #endif /* BCMDBG_CTRACE */
625 }
626 return (void *)pkt;
627 }
628
629 /* Return a new packet. zero out pkttag */
630 #ifdef BCMDBG_CTRACE
631 void * BCMFASTPATH
osl_pktget(osl_t * osh,uint len,int line,char * file)632 osl_pktget(osl_t *osh, uint len, int line, char *file)
633 #else
634 void * BCMFASTPATH
635 osl_pktget(osl_t *osh, uint len)
636 #endif /* BCMDBG_CTRACE */
637 {
638 struct sk_buff *skb;
639
640 #ifdef CTFPOOL
641 /* Allocate from local pool */
642 skb = osl_pktfastget(osh, len);
643 if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
644 #else /* CTFPOOL */
645 if ((skb = osl_alloc_skb(osh, len))) {
646 #endif /* CTFPOOL */
647 skb->tail += len;
648 skb->len += len;
649 skb->priority = 0;
650
651 #ifdef BCMDBG_CTRACE
652 ADD_CTRACE(osh, skb, file, line);
653 #endif
654 atomic_inc(&osh->pktalloced);
655 }
656
657 return ((void*) skb);
658 }
659
660 #ifdef CTFPOOL
661 static inline void
662 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
663 {
664 ctfpool_t *ctfpool;
665 #ifdef CTFPOOL_SPINLOCK
666 unsigned long flags;
667 #endif /* CTFPOOL_SPINLOCK */
668
669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
670 skb->tstamp.tv.sec = 0;
671 #else
672 skb->stamp.tv_sec = 0;
673 #endif
674
675 /* We only need to init the fields that we change */
676 skb->dev = NULL;
677 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
678 skb->dst = NULL;
679 #endif
680 OSL_PKTTAG_CLEAR(skb);
681 skb->ip_summed = 0;
682
683 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
684 skb_orphan(skb);
685 #else
686 skb->destructor = NULL;
687 #endif
688
689 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
690 ASSERT(ctfpool != NULL);
691
692 /* Add object to the ctfpool */
693 CTFPOOL_LOCK(ctfpool, flags);
694 skb->next = (struct sk_buff *)ctfpool->head;
695 ctfpool->head = (void *)skb;
696
697 ctfpool->fast_frees++;
698 ctfpool->curr_obj++;
699
700 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
701 CTFPOOL_UNLOCK(ctfpool, flags);
702 }
703 #endif /* CTFPOOL */
704
705 /* Free the driver packet. Free the tag if present */
706 void BCMFASTPATH
707 osl_pktfree(osl_t *osh, void *p, bool send)
708 {
709 struct sk_buff *skb, *nskb;
710
711 if (osh == NULL)
712 {
713 printk("%s: osh == NULL \n", __FUNCTION__);
714 return;
715 }
716
717 skb = (struct sk_buff*) p;
718
719 if (send && osh->pub.tx_fn)
720 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
721
722 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
723
724 /* perversion: we use skb->next to chain multi-skb packets */
725 while (skb) {
726 nskb = skb->next;
727 skb->next = NULL;
728
729 #ifdef BCMDBG_CTRACE
730 DEL_CTRACE(osh, skb);
731 #endif
732
733
734 #ifdef CTFPOOL
735 if (PKTISFAST(osh, skb)) {
736 if (atomic_read(&skb->users) == 1)
737 smp_rmb();
738 else if (!atomic_dec_and_test(&skb->users))
739 goto next_skb;
740 osl_pktfastfree(osh, skb);
741 } else
742 #endif
743 {
744 if (skb->destructor)
745 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
746 * destructor exists
747 */
748 dev_kfree_skb_any(skb);
749 else
750 /* can free immediately (even in_irq()) if destructor
751 * does not exist
752 */
753 dev_kfree_skb(skb);
754 }
755 #ifdef CTFPOOL
756 next_skb:
757 #endif
758 atomic_dec(&osh->pktalloced);
759 skb = nskb;
760 }
761 }
762
763 #ifdef CONFIG_DHD_USE_STATIC_BUF
764 void*
765 osl_pktget_static(osl_t *osh, uint len)
766 {
767 int i = 0;
768 struct sk_buff *skb;
769
770 if (len > DHD_SKB_MAX_BUFSIZE) {
771 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
772 return osl_pktget(osh, len);
773 }
774
775 down(&bcm_static_skb->osl_pkt_sem);
776
777 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
778 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
779 if (bcm_static_skb->pkt_use[i] == 0)
780 break;
781 }
782
783 if (i != STATIC_PKT_MAX_NUM) {
784 bcm_static_skb->pkt_use[i] = 1;
785
786 skb = bcm_static_skb->skb_4k[i];
787 skb->tail = skb->data + len;
788 skb->len = len;
789
790 up(&bcm_static_skb->osl_pkt_sem);
791 return skb;
792 }
793 }
794
795 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
796 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
797 if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM]
798 == 0)
799 break;
800 }
801
802 if (i != STATIC_PKT_MAX_NUM) {
803 bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1;
804 skb = bcm_static_skb->skb_8k[i];
805 skb->tail = skb->data + len;
806 skb->len = len;
807
808 up(&bcm_static_skb->osl_pkt_sem);
809 return skb;
810 }
811 }
812
813 #if defined(ENHANCED_STATIC_BUF)
814 if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) {
815 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1;
816
817 skb = bcm_static_skb->skb_16k;
818 skb->tail = skb->data + len;
819 skb->len = len;
820
821 up(&bcm_static_skb->osl_pkt_sem);
822 return skb;
823 }
824 #endif
825
826 up(&bcm_static_skb->osl_pkt_sem);
827 printk("%s: all static pkt in use!\n", __FUNCTION__);
828 return osl_pktget(osh, len);
829 }
830
831 void
832 osl_pktfree_static(osl_t *osh, void *p, bool send)
833 {
834 int i;
835 if (!bcm_static_skb) {
836 osl_pktfree(osh, p, send);
837 return;
838 }
839
840 down(&bcm_static_skb->osl_pkt_sem);
841 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
842 if (p == bcm_static_skb->skb_4k[i]) {
843 bcm_static_skb->pkt_use[i] = 0;
844 up(&bcm_static_skb->osl_pkt_sem);
845 return;
846 }
847 }
848
849 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
850 if (p == bcm_static_skb->skb_8k[i]) {
851 bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
852 up(&bcm_static_skb->osl_pkt_sem);
853 return;
854 }
855 }
856 #ifdef ENHANCED_STATIC_BUF
857 if (p == bcm_static_skb->skb_16k) {
858 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 0;
859 up(&bcm_static_skb->osl_pkt_sem);
860 return;
861 }
862 #endif
863 up(&bcm_static_skb->osl_pkt_sem);
864 osl_pktfree(osh, p, send);
865 }
866 #endif /* CONFIG_DHD_USE_STATIC_BUF */
867
868 int osh_pktpadtailroom(osl_t *osh, void* p, int pad)
869 {
870 int err;
871 int ntail;
872 struct sk_buff* skb = (struct sk_buff*)p;
873
874 ntail = skb->data_len + pad - (skb->end - skb->tail);
875 if (likely(skb_cloned(skb) || ntail > 0)) {
876 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
877 if (unlikely(err))
878 goto done;
879 }
880
881 err = skb_linearize(skb);
882 if (unlikely(err))
883 goto done;
884
885 memset(skb->data + skb->len, 0, pad);
886
887 done:
888 return err;
889 }
890
891 uint32
892 osl_pci_read_config(osl_t *osh, uint offset, uint size)
893 {
894 uint val = 0;
895 uint retry = PCI_CFG_RETRY;
896
897 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
898
899 /* only 4byte access supported */
900 ASSERT(size == 4);
901
902 do {
903 pci_read_config_dword(osh->pdev, offset, &val);
904 if (val != 0xffffffff)
905 break;
906 } while (retry--);
907
908
909 return (val);
910 }
911
912 void
913 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
914 {
915 uint retry = PCI_CFG_RETRY;
916
917 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
918
919 /* only 4byte access supported */
920 ASSERT(size == 4);
921
922 do {
923 pci_write_config_dword(osh->pdev, offset, val);
924 if (offset != PCI_BAR0_WIN)
925 break;
926 if (osl_pci_read_config(osh, offset, size) == val)
927 break;
928 } while (retry--);
929
930 }
931
932 /* return bus # for the pci device pointed by osh->pdev */
933 uint
934 osl_pci_bus(osl_t *osh)
935 {
936 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
937
938 return ((struct pci_dev *)osh->pdev)->bus->number;
939 }
940
941 /* return slot # for the pci device pointed by osh->pdev */
942 uint
943 osl_pci_slot(osl_t *osh)
944 {
945 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
946
947 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
948 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
949 #else
950 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
951 #endif
952 }
953
954 /* return the pci device pointed by osh->pdev */
955 struct pci_dev *
956 osl_pci_device(osl_t *osh)
957 {
958 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
959
960 return osh->pdev;
961 }
962
963 static void
964 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
965 {
966 }
967
968 void
969 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
970 {
971 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
972 }
973
974 void
975 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
976 {
977 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
978 }
979
980 void *
981 osl_malloc(osl_t *osh, uint size)
982 {
983 void *addr;
984 gfp_t flags;
985
986 /* only ASSERT if osh is defined */
987 if (osh)
988 ASSERT(osh->magic == OS_HANDLE_MAGIC);
989
990 #ifdef CONFIG_DHD_USE_STATIC_BUF
991 if (bcm_static_buf)
992 {
993 int i = 0;
994 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
995 {
996 down(&bcm_static_buf->static_sem);
997
998 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
999 {
1000 if (bcm_static_buf->buf_use[i] == 0)
1001 break;
1002 }
1003
1004 if (i == STATIC_BUF_MAX_NUM)
1005 {
1006 up(&bcm_static_buf->static_sem);
1007 printk("all static buff in use!\n");
1008 goto original;
1009 }
1010
1011 bcm_static_buf->buf_use[i] = 1;
1012 up(&bcm_static_buf->static_sem);
1013
1014 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
1015 if (osh)
1016 atomic_add(size, &osh->malloced);
1017
1018 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
1019 }
1020 }
1021 original:
1022 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1023
1024 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25))
1025 flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1026 #else
1027 flags = GFP_ATOMIC;
1028 #endif
1029 if ((addr = kmalloc(size, flags)) == NULL) {
1030 if (osh)
1031 osh->failed++;
1032 return (NULL);
1033 }
1034 if (osh)
1035 atomic_add(size, &osh->malloced);
1036
1037 return (addr);
1038 }
1039
1040 void
1041 osl_mfree(osl_t *osh, void *addr, uint size)
1042 {
1043 #ifdef CONFIG_DHD_USE_STATIC_BUF
1044 if (bcm_static_buf)
1045 {
1046 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
1047 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
1048 {
1049 int buf_idx = 0;
1050
1051 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
1052
1053 down(&bcm_static_buf->static_sem);
1054 bcm_static_buf->buf_use[buf_idx] = 0;
1055 up(&bcm_static_buf->static_sem);
1056
1057 if (osh) {
1058 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1059 atomic_sub(size, &osh->malloced);
1060 }
1061 return;
1062 }
1063 }
1064 #endif /* CONFIG_DHD_USE_STATIC_BUF */
1065 if (osh) {
1066 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1067 atomic_sub(size, &osh->malloced);
1068 }
1069 kfree(addr);
1070 }
1071
1072 uint
1073 osl_malloced(osl_t *osh)
1074 {
1075 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1076 return (atomic_read(&osh->malloced));
1077 }
1078
1079 uint
1080 osl_malloc_failed(osl_t *osh)
1081 {
1082 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1083 return (osh->failed);
1084 }
1085
1086
1087 uint
1088 osl_dma_consistent_align(void)
1089 {
1090 return (PAGE_SIZE);
1091 }
1092
1093 void*
1094 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
1095 {
1096 void *va;
1097 uint16 align = (1 << align_bits);
1098 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1099
1100 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1101 size += align;
1102 *alloced = size;
1103
1104 #ifdef __ARM_ARCH_7A__
1105 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1106 if (va)
1107 *pap = (ulong)__virt_to_phys((ulong)va);
1108 #else
1109 va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
1110 #endif
1111 return va;
1112 }
1113
1114 void
1115 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
1116 {
1117 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1118
1119 #ifdef __ARM_ARCH_7A__
1120 kfree(va);
1121 #else
1122 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1123 #endif
1124 }
1125
1126 uint BCMFASTPATH
1127 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1128 {
1129 int dir;
1130
1131 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1132 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1133
1134 #if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
1135 if (dmah != NULL) {
1136 int32 nsegs, i, totsegs = 0, totlen = 0;
1137 struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
1138 struct sk_buff *skb;
1139 for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1140 sg = &_sg[totsegs];
1141 if (skb_is_nonlinear(skb)) {
1142 nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
1143 ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
1144 pci_map_sg(osh->pdev, sg, nsegs, dir);
1145 } else {
1146 nsegs = 1;
1147 ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
1148 sg->page_link = 0;
1149 sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
1150 pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
1151 }
1152 totsegs += nsegs;
1153 totlen += PKTLEN(osh, skb);
1154 }
1155 dmah->nsegs = totsegs;
1156 dmah->origsize = totlen;
1157 for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
1158 dmah->segs[i].addr = sg_phys(sg);
1159 dmah->segs[i].length = sg->length;
1160 }
1161 return dmah->segs[0].addr;
1162 }
1163 #endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
1164
1165 return (pci_map_single(osh->pdev, va, size, dir));
1166 }
1167
1168 void BCMFASTPATH
1169 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
1170 {
1171 int dir;
1172
1173 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1174 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1175 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1176 }
1177
1178 #if defined(BCMASSERT_LOG)
1179 void
1180 osl_assert(const char *exp, const char *file, int line)
1181 {
1182 char tempbuf[256];
1183 const char *basename;
1184
1185 basename = strrchr(file, '/');
1186 /* skip the '/' */
1187 if (basename)
1188 basename++;
1189
1190 if (!basename)
1191 basename = file;
1192
1193 #ifdef BCMASSERT_LOG
1194 snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
1195 exp, basename, line);
1196 printk("%s", tempbuf);
1197 #endif /* BCMASSERT_LOG */
1198
1199
1200 }
1201 #endif
1202
1203 void
1204 osl_delay(uint usec)
1205 {
1206 uint d;
1207
1208 while (usec > 0) {
1209 d = MIN(usec, 1000);
1210 udelay(d);
1211 usec -= d;
1212 }
1213 }
1214
1215 void
1216 osl_sleep(uint ms)
1217 {
1218 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1219 if (ms < 20)
1220 usleep_range(ms*1000, ms*1000 + 1000);
1221 else
1222 #endif
1223 msleep(ms);
1224 }
1225
1226
1227 /* Clone a packet.
1228 * The pkttag contents are NOT cloned.
1229 */
1230 #ifdef BCMDBG_CTRACE
1231 void *
1232 osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1233 #else
1234 void *
1235 osl_pktdup(osl_t *osh, void *skb)
1236 #endif /* BCMDBG_CTRACE */
1237 {
1238 void * p;
1239 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36))
1240 gfp_t flags;
1241 #endif
1242
1243 ASSERT(!PKTISCHAINED(skb));
1244
1245 /* clear the CTFBUF flag if set and map the rest of the buffer
1246 * before cloning.
1247 */
1248 PKTCTFMAP(osh, skb);
1249
1250 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1251 flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1252 if ((p = pskb_copy((struct sk_buff *)skb, flags)) == NULL)
1253 #else
1254 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1255 #endif
1256 return NULL;
1257
1258 #ifdef CTFPOOL
1259 if (PKTISFAST(osh, skb)) {
1260 ctfpool_t *ctfpool;
1261
1262 /* if the buffer allocated from ctfpool is cloned then
1263 * we can't be sure when it will be freed. since there
1264 * is a chance that we will be losing a buffer
1265 * from our pool, we increment the refill count for the
1266 * object to be alloced later.
1267 */
1268 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1269 ASSERT(ctfpool != NULL);
1270 PKTCLRFAST(osh, p);
1271 PKTCLRFAST(osh, skb);
1272 ctfpool->refills++;
1273 }
1274 #endif /* CTFPOOL */
1275
1276 /* Clear PKTC context */
1277 PKTSETCLINK(p, NULL);
1278 PKTCCLRFLAGS(p);
1279 PKTCSETCNT(p, 1);
1280 PKTCSETLEN(p, PKTLEN(osh, skb));
1281
1282 /* skb_clone copies skb->cb.. we don't want that */
1283 if (osh->pub.pkttag)
1284 OSL_PKTTAG_CLEAR(p);
1285
1286 /* Increment the packet counter */
1287 atomic_inc(&osh->pktalloced);
1288 #ifdef BCMDBG_CTRACE
1289 ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
1290 #endif
1291 return (p);
1292 }
1293
1294 #ifdef BCMDBG_CTRACE
1295 int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
1296 {
1297 unsigned long flags;
1298 struct sk_buff *skb;
1299 int ck = FALSE;
1300
1301 spin_lock_irqsave(&osh->ctrace_lock, flags);
1302
1303 list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1304 if (pkt == skb) {
1305 ck = TRUE;
1306 break;
1307 }
1308 }
1309
1310 spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1311 return ck;
1312 }
1313
1314 void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
1315 {
1316 unsigned long flags;
1317 struct sk_buff *skb;
1318 int idx = 0;
1319 int i, j;
1320
1321 spin_lock_irqsave(&osh->ctrace_lock, flags);
1322
1323 if (b != NULL)
1324 bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
1325 else
1326 printk(" Total %d sbk not free\n", osh->ctrace_num);
1327
1328 list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1329 if (b != NULL)
1330 bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
1331 else
1332 printk("[%d] skb %p:\n", ++idx, skb);
1333
1334 for (i = 0; i < skb->ctrace_count; i++) {
1335 j = (skb->ctrace_start + i) % CTRACE_NUM;
1336 if (b != NULL)
1337 bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]);
1338 else
1339 printk(" [%s(%d)]\n", skb->func[j], skb->line[j]);
1340 }
1341 if (b != NULL)
1342 bcm_bprintf(b, "\n");
1343 else
1344 printk("\n");
1345 }
1346
1347 spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1348
1349 return;
1350 }
1351 #endif /* BCMDBG_CTRACE */
1352
1353
1354 /*
1355 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1356 */
1357
1358 /*
1359 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1360 */
1361
1362 uint
1363 osl_pktalloced(osl_t *osh)
1364 {
1365 return (atomic_read(&osh->pktalloced));
1366 }
1367
1368 /* Linux Kernel: File Operations: start */
1369 void *
1370 osl_os_open_image(char *filename)
1371 {
1372 struct file *fp;
1373
1374 fp = filp_open(filename, O_RDONLY, 0);
1375 /*
1376 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1377 * Alternative:
1378 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1379 * ???
1380 */
1381 if (IS_ERR(fp))
1382 fp = NULL;
1383
1384 return fp;
1385 }
1386
1387 int
1388 osl_os_get_image_block(char *buf, int len, void *image)
1389 {
1390 struct file *fp = (struct file *)image;
1391 int rdlen;
1392
1393 if (!image)
1394 return 0;
1395
1396 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1397 if (rdlen > 0)
1398 fp->f_pos += rdlen;
1399
1400 return rdlen;
1401 }
1402
1403 void
1404 osl_os_close_image(void *image)
1405 {
1406 if (image)
1407 filp_close((struct file *)image, NULL);
1408 }
1409
1410 int
1411 osl_os_image_size(void *image)
1412 {
1413 int len = 0, curroffset;
1414
1415 if (image) {
1416 /* store the current offset */
1417 curroffset = generic_file_llseek(image, 0, 1);
1418 /* goto end of file to get length */
1419 len = generic_file_llseek(image, 0, 2);
1420 /* restore back the offset */
1421 generic_file_llseek(image, curroffset, 0);
1422 }
1423 return len;
1424 }
1425
1426 /* Linux Kernel: File Operations: end */
1427