1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Linux Packet (skb) interface
4 *
5 * Copyright (C) 1999-2019, Broadcom.
6 *
7 * Unless you and Broadcom execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2 (the "GPL"),
10 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
11 * following added to such license:
12 *
13 * As a special exception, the copyright holders of this software give you
14 * permission to link this software with independent modules, and to copy and
15 * distribute the resulting executable under terms of your choice, provided that
16 * you also meet, for each linked independent module, the terms and conditions of
17 * the license of that module. An independent module is a module which is not
18 * derived from this software. The special exception does not apply to any
19 * modifications of the software.
20 *
21 * Notwithstanding the above, under no circumstances may you combine this
22 * software in any way with any other Broadcom software provided under a license
23 * other than the GPL, without Broadcom's express prior written consent.
24 *
25 *
26 * <<Broadcom-WL-IPTag/Open:>>
27 *
28 * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $
29 */
30
31 #include <typedefs.h>
32 #include <bcmendian.h>
33 #include <linuxver.h>
34 #include <bcmdefs.h>
35
36 #include <linux/random.h>
37
38 #include <osl.h>
39 #include <bcmutils.h>
40 #include <pcicfg.h>
41 #include <dngl_stats.h>
42 #include <dhd.h>
43
44 #include <linux/fs.h>
45 #include "linux_osl_priv.h"
46
47 #ifdef CONFIG_DHD_USE_STATIC_BUF
48
49 bcm_static_buf_t *bcm_static_buf = 0;
50 bcm_static_pkt_t *bcm_static_skb = 0;
51
52 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
53 #endif /* CONFIG_DHD_USE_STATIC_BUF */
54
55 #ifdef BCM_OBJECT_TRACE
56 /* don't clear the first 4 byte that is the pkt sn */
57 #define OSL_PKTTAG_CLEAR(p) \
58 do { \
59 struct sk_buff *s = (struct sk_buff *)(p); \
60 uint tagsz = sizeof(s->cb); \
61 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
62 memset(s->cb + 4, 0, tagsz - 4); \
63 } while (0)
64 #else
65 #define OSL_PKTTAG_CLEAR(p) \
66 do { \
67 struct sk_buff *s = (struct sk_buff *)(p); \
68 uint tagsz = sizeof(s->cb); \
69 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
70 memset(s->cb, 0, tagsz); \
71 } while (0)
72 #endif /* BCM_OBJECT_TRACE */
73
osl_static_mem_init(osl_t * osh,void * adapter)74 int osl_static_mem_init(osl_t *osh, void *adapter)
75 {
76 #ifdef CONFIG_DHD_USE_STATIC_BUF
77 if (!bcm_static_buf && adapter) {
78 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
79 DHD_PREALLOC_OSL_BUF, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
80 printk("can not alloc static buf!\n");
81 bcm_static_skb = NULL;
82 ASSERT(osh->magic == OS_HANDLE_MAGIC);
83 return -ENOMEM;
84 } else {
85 printk("succeed to alloc static buf\n");
86 }
87
88 spin_lock_init(&bcm_static_buf->static_lock);
89
90 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
91 }
92
93 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
94 if (!bcm_static_skb && adapter) {
95 int i;
96 void *skb_buff_ptr = 0;
97 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
98 skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
99 if (!skb_buff_ptr) {
100 printk("cannot alloc static buf!\n");
101 bcm_static_buf = NULL;
102 bcm_static_skb = NULL;
103 ASSERT(osh->magic == OS_HANDLE_MAGIC);
104 return -ENOMEM;
105 }
106
107 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
108 (STATIC_PKT_MAX_NUM));
109 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
110 bcm_static_skb->pkt_use[i] = 0;
111 }
112
113 #ifdef DHD_USE_STATIC_CTRLBUF
114 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
115 bcm_static_skb->last_allocated_index = 0;
116 #else
117 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
118 #endif /* DHD_USE_STATIC_CTRLBUF */
119 }
120 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
121 #endif /* CONFIG_DHD_USE_STATIC_BUF */
122
123 return 0;
124 }
125
osl_static_mem_deinit(osl_t * osh,void * adapter)126 int osl_static_mem_deinit(osl_t *osh, void *adapter)
127 {
128 #ifdef CONFIG_DHD_USE_STATIC_BUF
129 if (bcm_static_buf) {
130 bcm_static_buf = 0;
131 }
132 #ifdef BCMSDIO
133 if (bcm_static_skb) {
134 bcm_static_skb = 0;
135 }
136 #endif /* BCMSDIO */
137 #endif /* CONFIG_DHD_USE_STATIC_BUF */
138 return 0;
139 }
140
141 /*
142 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
143 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
144 * explicitly managed from a coherency perspective.
145 */
146 static inline void BCMFASTPATH
osl_fwderbuf_reset(osl_t * osh,struct sk_buff * skb)147 osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
148 {
149 }
150
151 static struct sk_buff * BCMFASTPATH
osl_alloc_skb(osl_t * osh,unsigned int len)152 osl_alloc_skb(osl_t *osh, unsigned int len)
153 {
154 struct sk_buff *skb;
155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
156 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
157 #ifdef DHD_USE_ATOMIC_PKTGET
158 flags = GFP_ATOMIC;
159 #endif /* DHD_USE_ATOMIC_PKTGET */
160 skb = __dev_alloc_skb(len, flags);
161 #else
162 skb = dev_alloc_skb(len);
163 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
164
165 return skb;
166 }
167
168 /* Convert a driver packet to native(OS) packet
169 * In the process, packettag is zeroed out before sending up
170 * IP code depends on skb->cb to be setup correctly with various options
171 * In our case, that means it should be 0
172 */
173 struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t * osh,void * pkt)174 osl_pkt_tonative(osl_t *osh, void *pkt)
175 {
176 struct sk_buff *nskb;
177
178 if (osh->pub.pkttag)
179 OSL_PKTTAG_CLEAR(pkt);
180
181 /* Decrement the packet counter */
182 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
183 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
184
185 }
186 return (struct sk_buff *)pkt;
187 }
188
189 /* Convert a native(OS) packet to driver packet.
190 * In the process, native packet is destroyed, there is no copying
191 * Also, a packettag is zeroed out
192 */
193 void * BCMFASTPATH
osl_pkt_frmnative(osl_t * osh,void * pkt)194 osl_pkt_frmnative(osl_t *osh, void *pkt)
195 {
196 struct sk_buff *cskb;
197 struct sk_buff *nskb;
198 unsigned long pktalloced = 0;
199
200 if (osh->pub.pkttag)
201 OSL_PKTTAG_CLEAR(pkt);
202
203 /* walk the PKTCLINK() list */
204 for (cskb = (struct sk_buff *)pkt;
205 cskb != NULL;
206 cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
207
208 /* walk the pkt buffer list */
209 for (nskb = cskb; nskb; nskb = nskb->next) {
210
211 /* Increment the packet counter */
212 pktalloced++;
213
214 /* clean the 'prev' pointer
215 * Kernel 3.18 is leaving skb->prev pointer set to skb
216 * to indicate a non-fragmented skb
217 */
218 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
219 nskb->prev = NULL;
220 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
221
222 }
223 }
224
225 /* Increment the packet counter */
226 atomic_add(pktalloced, &osh->cmn->pktalloced);
227
228 return (void *)pkt;
229 }
230
231 /* Return a new packet. zero out pkttag */
232 void * BCMFASTPATH
233 #ifdef BCM_OBJECT_TRACE
linux_pktget(osl_t * osh,uint len,int line,const char * caller)234 linux_pktget(osl_t *osh, uint len, int line, const char *caller)
235 #else
236 linux_pktget(osl_t *osh, uint len)
237 #endif /* BCM_OBJECT_TRACE */
238 {
239 struct sk_buff *skb;
240 uchar num = 0;
241 if (lmtest != FALSE) {
242 get_random_bytes(&num, sizeof(uchar));
243 if ((num + 1) <= (256 * lmtest / 100))
244 return NULL;
245 }
246
247 if ((skb = osl_alloc_skb(osh, len))) {
248 skb->tail += len;
249 skb->len += len;
250 skb->priority = 0;
251
252 atomic_inc(&osh->cmn->pktalloced);
253 #ifdef BCM_OBJECT_TRACE
254 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
255 #endif /* BCM_OBJECT_TRACE */
256 }
257
258 return ((void*) skb);
259 }
260
261 void BCMFASTPATH
262 #ifdef BCM_OBJECT_TRACE
linux_pktfree_irq(osl_t * osh,void * p,bool send,int line,const char * caller)263 linux_pktfree_irq(osl_t *osh, void *p, bool send, int line, const char *caller)
264 #else
265 linux_pktfree_irq(osl_t *osh, void *p, bool send)
266 #endif /* BCM_OBJECT_TRACE */
267 {
268 struct sk_buff *skb, *nskb;
269 if (osh == NULL)
270 return;
271
272 skb = (struct sk_buff*) p;
273
274 if (send) {
275 if (osh->pub.tx_fn) {
276 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
277 }
278 } else {
279 if (osh->pub.rx_fn) {
280 osh->pub.rx_fn(osh->pub.rx_ctx, p);
281 }
282 }
283
284 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
285
286 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
287 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
288 printk("%s: pkt %p is from static pool\n",
289 __FUNCTION__, p);
290 dump_stack();
291 return;
292 }
293
294 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
295 printk("%s: pkt %p is from static pool and not in used\n",
296 __FUNCTION__, p);
297 dump_stack();
298 return;
299 }
300 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
301
302 /* perversion: we use skb->next to chain multi-skb packets */
303 while (skb) {
304 nskb = skb->next;
305 skb->next = NULL;
306
307 #ifdef BCM_OBJECT_TRACE
308 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
309 #endif /* BCM_OBJECT_TRACE */
310
311 {
312 if (skb->destructor || irqs_disabled()) {
313 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
314 * destructor exists
315 */
316 dev_kfree_skb_any(skb);
317 } else {
318 /* can free immediately (even in_irq()) if destructor
319 * does not exist
320 */
321 dev_kfree_skb(skb);
322 }
323 }
324 atomic_dec(&osh->cmn->pktalloced);
325 skb = nskb;
326 }
327 }
328
329 /* Free the driver packet. Free the tag if present */
330 void BCMFASTPATH
331 #ifdef BCM_OBJECT_TRACE
linux_pktfree(osl_t * osh,void * p,bool send,int line,const char * caller)332 linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
333 #else
334 linux_pktfree(osl_t *osh, void *p, bool send)
335 #endif /* BCM_OBJECT_TRACE */
336 {
337 struct sk_buff *skb, *nskb;
338 if (osh == NULL)
339 return;
340
341 skb = (struct sk_buff*) p;
342
343 if (send) {
344 if (osh->pub.tx_fn) {
345 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
346 }
347 } else {
348 if (osh->pub.rx_fn) {
349 osh->pub.rx_fn(osh->pub.rx_ctx, p);
350 }
351 }
352
353 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
354
355 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
356 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
357 printk("%s: pkt %p is from static pool\n",
358 __FUNCTION__, p);
359 dump_stack();
360 return;
361 }
362
363 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
364 printk("%s: pkt %p is from static pool and not in used\n",
365 __FUNCTION__, p);
366 dump_stack();
367 return;
368 }
369 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
370
371 /* perversion: we use skb->next to chain multi-skb packets */
372 while (skb) {
373 nskb = skb->next;
374 skb->next = NULL;
375
376 #ifdef BCM_OBJECT_TRACE
377 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
378 #endif /* BCM_OBJECT_TRACE */
379
380 {
381 if (skb->destructor) {
382 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
383 * destructor exists
384 */
385 dev_kfree_skb_any(skb);
386 } else {
387 /* can free immediately (even in_irq()) if destructor
388 * does not exist
389 */
390 dev_kfree_skb(skb);
391 }
392 }
393 atomic_dec(&osh->cmn->pktalloced);
394 skb = nskb;
395 }
396 }
397
398 #ifdef CONFIG_DHD_USE_STATIC_BUF
399 void*
osl_pktget_static(osl_t * osh,uint len)400 osl_pktget_static(osl_t *osh, uint len)
401 {
402 int i = 0;
403 struct sk_buff *skb;
404 #ifdef DHD_USE_STATIC_CTRLBUF
405 unsigned long flags;
406 #endif /* DHD_USE_STATIC_CTRLBUF */
407
408 if (!bcm_static_skb)
409 return linux_pktget(osh, len);
410
411 if (len > DHD_SKB_MAX_BUFSIZE) {
412 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
413 return linux_pktget(osh, len);
414 }
415
416 #ifdef DHD_USE_STATIC_CTRLBUF
417 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
418
419 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
420 uint32 index;
421 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
422 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
423 bcm_static_skb->last_allocated_index++;
424 if (bcm_static_skb->skb_8k[index] &&
425 bcm_static_skb->pkt_use[index] == 0) {
426 break;
427 }
428 }
429
430 if (i < STATIC_PKT_2PAGE_NUM) {
431 bcm_static_skb->pkt_use[index] = 1;
432 skb = bcm_static_skb->skb_8k[index];
433 skb->data = skb->head;
434 #ifdef NET_SKBUFF_DATA_USES_OFFSET
435 skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
436 #else
437 skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
438 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
439 skb->data += PKT_HEADROOM_DEFAULT;
440 skb->cloned = 0;
441 skb->priority = 0;
442 #ifdef NET_SKBUFF_DATA_USES_OFFSET
443 skb_set_tail_pointer(skb, len);
444 #else
445 skb->tail = skb->data + len;
446 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
447 skb->len = len;
448 skb->mac_len = PREALLOC_USED_MAGIC;
449 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
450 return skb;
451 }
452 }
453
454 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
455 printk("%s: all static pkt in use!\n", __FUNCTION__);
456 return NULL;
457 #else
458 down(&bcm_static_skb->osl_pkt_sem);
459
460 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
461 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
462 if (bcm_static_skb->skb_4k[i] &&
463 bcm_static_skb->pkt_use[i] == 0) {
464 break;
465 }
466 }
467
468 if (i != STATIC_PKT_1PAGE_NUM) {
469 bcm_static_skb->pkt_use[i] = 1;
470
471 skb = bcm_static_skb->skb_4k[i];
472 #ifdef NET_SKBUFF_DATA_USES_OFFSET
473 skb_set_tail_pointer(skb, len);
474 #else
475 skb->tail = skb->data + len;
476 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
477 skb->len = len;
478
479 up(&bcm_static_skb->osl_pkt_sem);
480 return skb;
481 }
482 }
483
484 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
485 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
486 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
487 bcm_static_skb->pkt_use[i] == 0) {
488 break;
489 }
490 }
491
492 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
493 bcm_static_skb->pkt_use[i] = 1;
494 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
495 #ifdef NET_SKBUFF_DATA_USES_OFFSET
496 skb_set_tail_pointer(skb, len);
497 #else
498 skb->tail = skb->data + len;
499 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
500 skb->len = len;
501
502 up(&bcm_static_skb->osl_pkt_sem);
503 return skb;
504 }
505 }
506
507 #if defined(ENHANCED_STATIC_BUF)
508 if (bcm_static_skb->skb_16k &&
509 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
510 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
511
512 skb = bcm_static_skb->skb_16k;
513 #ifdef NET_SKBUFF_DATA_USES_OFFSET
514 skb_set_tail_pointer(skb, len);
515 #else
516 skb->tail = skb->data + len;
517 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
518 skb->len = len;
519
520 up(&bcm_static_skb->osl_pkt_sem);
521 return skb;
522 }
523 #endif /* ENHANCED_STATIC_BUF */
524
525 up(&bcm_static_skb->osl_pkt_sem);
526 printk("%s: all static pkt in use!\n", __FUNCTION__);
527 return linux_pktget(osh, len);
528 #endif /* DHD_USE_STATIC_CTRLBUF */
529 }
530
531 void
osl_pktfree_static(osl_t * osh,void * p,bool send)532 osl_pktfree_static(osl_t *osh, void *p, bool send)
533 {
534 int i;
535 #ifdef DHD_USE_STATIC_CTRLBUF
536 struct sk_buff *skb = (struct sk_buff *)p;
537 unsigned long flags;
538 #endif /* DHD_USE_STATIC_CTRLBUF */
539
540 if (!p) {
541 return;
542 }
543
544 if (!bcm_static_skb) {
545 linux_pktfree(osh, p, send);
546 return;
547 }
548
549 #ifdef DHD_USE_STATIC_CTRLBUF
550 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
551
552 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
553 if (p == bcm_static_skb->skb_8k[i]) {
554 if (bcm_static_skb->pkt_use[i] == 0) {
555 printk("%s: static pkt idx %d(%p) is double free\n",
556 __FUNCTION__, i, p);
557 } else {
558 bcm_static_skb->pkt_use[i] = 0;
559 }
560
561 if (skb->mac_len != PREALLOC_USED_MAGIC) {
562 printk("%s: static pkt idx %d(%p) is not in used\n",
563 __FUNCTION__, i, p);
564 }
565
566 skb->mac_len = PREALLOC_FREE_MAGIC;
567 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
568 return;
569 }
570 }
571
572 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
573 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
574 #else
575 down(&bcm_static_skb->osl_pkt_sem);
576 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
577 if (p == bcm_static_skb->skb_4k[i]) {
578 bcm_static_skb->pkt_use[i] = 0;
579 up(&bcm_static_skb->osl_pkt_sem);
580 return;
581 }
582 }
583
584 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
585 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
586 bcm_static_skb->pkt_use[i] = 0;
587 up(&bcm_static_skb->osl_pkt_sem);
588 return;
589 }
590 }
591 #ifdef ENHANCED_STATIC_BUF
592 if (p == bcm_static_skb->skb_16k) {
593 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
594 up(&bcm_static_skb->osl_pkt_sem);
595 return;
596 }
597 #endif // endif
598 up(&bcm_static_skb->osl_pkt_sem);
599 #endif /* DHD_USE_STATIC_CTRLBUF */
600 linux_pktfree(osh, p, send);
601 }
602 #endif /* CONFIG_DHD_USE_STATIC_BUF */
603
604 /* Clone a packet.
605 * The pkttag contents are NOT cloned.
606 */
607 void *
608 #ifdef BCM_OBJECT_TRACE
osl_pktdup(osl_t * osh,void * skb,int line,const char * caller)609 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
610 #else
611 osl_pktdup(osl_t *osh, void *skb)
612 #endif /* BCM_OBJECT_TRACE */
613 {
614 void * p;
615
616 ASSERT(!PKTISCHAINED(skb));
617
618 /* clear the CTFBUF flag if set and map the rest of the buffer
619 * before cloning.
620 */
621 PKTCTFMAP(osh, skb);
622
623 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
624 return NULL;
625
626 /* skb_clone copies skb->cb.. we don't want that */
627 if (osh->pub.pkttag)
628 OSL_PKTTAG_CLEAR(p);
629
630 /* Increment the packet counter */
631 atomic_inc(&osh->cmn->pktalloced);
632 #ifdef BCM_OBJECT_TRACE
633 bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
634 #endif /* BCM_OBJECT_TRACE */
635
636 return (p);
637 }
638
639 /*
640 * BINOSL selects the slightly slower function-call-based binary compatible osl.
641 */
642
643 uint
osl_pktalloced(osl_t * osh)644 osl_pktalloced(osl_t *osh)
645 {
646 if (atomic_read(&osh->cmn->refcount) == 1)
647 return (atomic_read(&osh->cmn->pktalloced));
648 else
649 return 0;
650 }
651
652 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
653 #include <linux/kallsyms.h>
654 #include <net/sock.h>
655 void
osl_pkt_orphan_partial(struct sk_buff * skb,int tsq)656 osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
657 {
658 uint32 fraction;
659 static void *p_tcp_wfree = NULL;
660
661 if (tsq <= 0)
662 return;
663
664 if (!skb->destructor || skb->destructor == sock_wfree)
665 return;
666
667 if (unlikely(!p_tcp_wfree)) {
668 char sym[KSYM_SYMBOL_LEN];
669 sprint_symbol(sym, (unsigned long)skb->destructor);
670 sym[9] = 0;
671 if (!strcmp(sym, "tcp_wfree"))
672 p_tcp_wfree = skb->destructor;
673 else
674 return;
675 }
676
677 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
678 return;
679
680 /* abstract a certain portion of skb truesize from the socket
681 * sk_wmem_alloc to allow more skb can be allocated for this
682 * socket for better cusion meeting WiFi device requirement
683 */
684 fraction = skb->truesize * (tsq - 1) / tsq;
685 skb->truesize -= fraction;
686
687 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
688 atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
689 #else
690 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
691 #endif // endif
692 skb_orphan(skb);
693 }
694 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
695