• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Linux Packet (skb) interface
3  *
4  * Copyright (C) 1999-2019, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions
16  * of the license of that module.  An independent module is a module which is
17  * not derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  *
25  * <<Broadcom-WL-IPTag/Open:>>
26  *
27  * $Id: linux_pkt.c 769682 2018-06-27 07:29:55Z $
28  */
29 
30 #include <typedefs.h>
31 #include <bcmendian.h>
32 #include <linuxver.h>
33 #include <bcmdefs.h>
34 
35 #include <linux/random.h>
36 
37 #include <osl.h>
38 #include <bcmutils.h>
39 #include <pcicfg.h>
40 #include <dngl_stats.h>
41 #include <dhd.h>
42 
43 #include <linux/fs.h>
44 #include "linux_osl_priv.h"
45 
46 #ifdef CONFIG_DHD_USE_STATIC_BUF
47 
48 bcm_static_buf_t *bcm_static_buf = 0;
49 bcm_static_pkt_t *bcm_static_skb = 0;
50 
51 void *wifi_platform_prealloc(void *adapter, int section, unsigned long size);
52 #endif /* CONFIG_DHD_USE_STATIC_BUF */
53 
54 #ifdef BCM_OBJECT_TRACE
55 /* don't clear the first 4 byte that is the pkt sn */
56 #define OSL_PKTTAG_CLEAR(p)                                                    \
57     do {                                                                       \
58         struct sk_buff *s = (struct sk_buff *)(p);                             \
59         uint tagsz = sizeof(s->cb);                                            \
60         ASSERT(OSL_PKTTAG_SZ <= tagsz);                                        \
61         memset(s->cb + 4, 0, tagsz - 4);                                       \
62     } while (0)
63 #else
64 #define OSL_PKTTAG_CLEAR(p)                                                    \
65     do {                                                                       \
66         struct sk_buff *s = (struct sk_buff *)(p);                             \
67         uint tagsz = sizeof(s->cb);                                            \
68         ASSERT(OSL_PKTTAG_SZ <= tagsz);                                        \
69         memset(s->cb, 0, tagsz);                                               \
70     } while (0)
71 #endif /* BCM_OBJECT_TRACE */
72 
osl_static_mem_init(osl_t * osh,void * adapter)73 int osl_static_mem_init(osl_t *osh, void *adapter)
74 {
75 #ifdef CONFIG_DHD_USE_STATIC_BUF
76     if (!bcm_static_buf && adapter) {
77         if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(
78                   adapter, DHD_PREALLOC_OSL_BUF,
79                   STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
80             printk("can not alloc static buf!\n");
81             bcm_static_skb = NULL;
82             ASSERT(osh->magic == OS_HANDLE_MAGIC);
83             return -ENOMEM;
84         } else {
85             printk("succeed to alloc static buf\n");
86         }
87 
88         spin_lock_init(&bcm_static_buf->static_lock);
89 
90         bcm_static_buf->buf_ptr =
91             (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
92     }
93 
94 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
95     if (!bcm_static_skb && adapter) {
96         int i;
97         void *skb_buff_ptr = 0;
98         bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 0x800);
99         skb_buff_ptr = wifi_platform_prealloc(adapter, DHD_PREALLOC_SKB_BUF, 0);
100         if (!skb_buff_ptr) {
101             printk("cannot alloc static buf!\n");
102             bcm_static_buf = NULL;
103             bcm_static_skb = NULL;
104             ASSERT(osh->magic == OS_HANDLE_MAGIC);
105             return -ENOMEM;
106         }
107 
108         bcopy(skb_buff_ptr, bcm_static_skb,
109               sizeof(struct sk_buff *) * (STATIC_PKT_MAX_NUM));
110         for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
111             bcm_static_skb->pkt_use[i] = 0;
112         }
113 
114 #ifdef DHD_USE_STATIC_CTRLBUF
115         spin_lock_init(&bcm_static_skb->osl_pkt_lock);
116         bcm_static_skb->last_allocated_index = 0;
117 #else
118         sema_init(&bcm_static_skb->osl_pkt_sem, 1);
119 #endif /* DHD_USE_STATIC_CTRLBUF */
120     }
121 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
122 #endif /* CONFIG_DHD_USE_STATIC_BUF */
123 
124     return 0;
125 }
126 
osl_static_mem_deinit(osl_t * osh,void * adapter)127 int osl_static_mem_deinit(osl_t *osh, void *adapter)
128 {
129 #ifdef CONFIG_DHD_USE_STATIC_BUF
130     if (bcm_static_buf) {
131         bcm_static_buf = 0;
132     }
133 #ifdef BCMSDIO
134     if (bcm_static_skb) {
135         bcm_static_skb = 0;
136     }
137 #endif /* BCMSDIO */
138 #endif /* CONFIG_DHD_USE_STATIC_BUF */
139     return 0;
140 }
141 
142 /*
143  * To avoid ACP latency, a fwder buf will be sent directly to DDR using
144  * DDR aliasing into non-ACP address space. Such Fwder buffers must be
145  * explicitly managed from a coherency perspective.
146  */
osl_fwderbuf_reset(osl_t * osh,struct sk_buff * skb)147 static inline void BCMFASTPATH osl_fwderbuf_reset(osl_t *osh,
148                                                   struct sk_buff *skb)
149 {
150 }
151 
osl_alloc_skb(osl_t * osh,unsigned int len)152 static struct sk_buff *BCMFASTPATH osl_alloc_skb(osl_t *osh, unsigned int len)
153 {
154     struct sk_buff *skb;
155 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
156     gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
157 #ifdef DHD_USE_ATOMIC_PKTGET
158     flags = GFP_ATOMIC;
159 #endif /* DHD_USE_ATOMIC_PKTGET */
160     skb = __dev_alloc_skb(len, flags);
161 #else
162     skb = dev_alloc_skb(len);
163 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
164 
165     return skb;
166 }
167 
168 /* Convert a driver packet to native(OS) packet
169  * In the process, packettag is zeroed out before sending up
170  * IP code depends on skb->cb to be setup correctly with various options
171  * In our case, that means it should be 0
172  */
osl_pkt_tonative(osl_t * osh,void * pkt)173 struct sk_buff *BCMFASTPATH osl_pkt_tonative(osl_t *osh, void *pkt)
174 {
175     struct sk_buff *nskb;
176 
177     if (osh->pub.pkttag) {
178         OSL_PKTTAG_CLEAR(pkt);
179     }
180 
181     /* Decrement the packet counter */
182     for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
183         atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1,
184                    &osh->cmn->pktalloced);
185     }
186     return (struct sk_buff *)pkt;
187 }
188 
189 /* Convert a native(OS) packet to driver packet.
190  * In the process, native packet is destroyed, there is no copying
191  * Also, a packettag is zeroed out
192  */
osl_pkt_frmnative(osl_t * osh,void * pkt)193 void *BCMFASTPATH osl_pkt_frmnative(osl_t *osh, void *pkt)
194 {
195     struct sk_buff *cskb;
196     struct sk_buff *nskb;
197     unsigned long pktalloced = 0;
198 
199     if (osh->pub.pkttag) {
200         OSL_PKTTAG_CLEAR(pkt);
201     }
202 
203     /* walk the PKTCLINK() list */
204     for (cskb = (struct sk_buff *)pkt; cskb != NULL;
205          cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
206 
207         /* walk the pkt buffer list */
208         for (nskb = cskb; nskb; nskb = nskb->next) {
209 
210             /* Increment the packet counter */
211             pktalloced++;
212 
213             /* clean the 'prev' pointer
214              * Kernel 3.18 is leaving skb->prev pointer set to skb
215              * to indicate a non-fragmented skb
216              */
217 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
218             nskb->prev = NULL;
219 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
220         }
221     }
222 
223     /* Increment the packet counter */
224     atomic_add(pktalloced, &osh->cmn->pktalloced);
225 
226     return (void *)pkt;
227 }
228 
229 /* Return a new packet. zero out pkttag */
230 void *BCMFASTPATH
231 #ifdef BCM_OBJECT_TRACE
linux_pktget(osl_t * osh,uint len,int line,const char * caller)232 linux_pktget(osl_t *osh, uint len, int line, const char *caller)
233 #else
234 linux_pktget(osl_t *osh, uint len)
235 #endif /* BCM_OBJECT_TRACE */
236 {
237     struct sk_buff *skb;
238     uchar num = 0;
239     if (lmtest != FALSE) {
240         get_random_bytes(&num, sizeof(uchar));
241         if ((num + 1) <= (0x8D0 * lmtest / 0x64)) {
242             return NULL;
243         }
244     }
245 
246     if ((skb = osl_alloc_skb(osh, len))) {
247         skb->tail += len;
248         skb->len += len;
249         skb->priority = 0;
250 
251         atomic_inc(&osh->cmn->pktalloced);
252 #ifdef BCM_OBJECT_TRACE
253         bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
254 #endif /* BCM_OBJECT_TRACE */
255     }
256 
257     return ((void *)skb);
258 }
259 
260 /* Free the driver packet. Free the tag if present */
261 void BCMFASTPATH
262 #ifdef BCM_OBJECT_TRACE
linux_pktfree(osl_t * osh,void * p,bool send,int line,const char * caller)263 linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
264 #else
265 linux_pktfree(osl_t *osh, void *p, bool send)
266 #endif /* BCM_OBJECT_TRACE */
267 {
268     struct sk_buff *skb, *nskb;
269     if (osh == NULL) {
270         return;
271     }
272 
273     skb = (struct sk_buff *)p;
274 
275     if (send) {
276         if (osh->pub.tx_fn) {
277             osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
278         }
279     } else {
280         if (osh->pub.rx_fn) {
281             osh->pub.rx_fn(osh->pub.rx_ctx, p);
282         }
283     }
284 
285     PKTDBG_TRACE(osh, (void *)skb, PKTLIST_PKTFREE);
286 
287 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
288     if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
289         printk("%s: pkt %p is from static pool\n", __FUNCTION__, p);
290         dump_stack();
291         return;
292     }
293 
294     if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
295         printk("%s: pkt %p is from static pool and not in used\n", __FUNCTION__,
296                p);
297         dump_stack();
298         return;
299     }
300 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
301 
302     /* perversion: we use skb->next to chain multi-skb packets */
303     while (skb) {
304         nskb = skb->next;
305         skb->next = NULL;
306 
307 #ifdef BCM_OBJECT_TRACE
308         bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
309 #endif /* BCM_OBJECT_TRACE */
310 
311         {
312             if (skb->destructor || irqs_disabled()) {
313                 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
314                  * destructor exists
315                  */
316                 dev_kfree_skb_any(skb);
317             } else {
318                 /* can free immediately (even in_irq()) if destructor
319                  * does not exist
320                  */
321                 dev_kfree_skb(skb);
322             }
323         }
324         atomic_dec(&osh->cmn->pktalloced);
325         skb = nskb;
326     }
327 }
328 
329 #ifdef CONFIG_DHD_USE_STATIC_BUF
osl_pktget_static(osl_t * osh,uint len)330 void *osl_pktget_static(osl_t *osh, uint len)
331 {
332     int i = 0;
333     struct sk_buff *skb;
334 #ifdef DHD_USE_STATIC_CTRLBUF
335     unsigned long flags;
336 #endif /* DHD_USE_STATIC_CTRLBUF */
337 
338     if (!bcm_static_skb) {
339         return linux_pktget(osh, len);
340     }
341 
342     if (len > DHD_SKB_MAX_BUFSIZE) {
343         printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__,
344                len);
345         return linux_pktget(osh, len);
346     }
347 
348 #ifdef DHD_USE_STATIC_CTRLBUF
349     spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
350 
351     if (len <= DHD_SKB_2PAGE_BUFSIZE) {
352         uint32 index;
353         for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
354             index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
355             bcm_static_skb->last_allocated_index++;
356             if (bcm_static_skb->skb_8k[index] &&
357                 bcm_static_skb->pkt_use[index] == 0) {
358                 break;
359             }
360         }
361 
362         if (i < STATIC_PKT_2PAGE_NUM) {
363             bcm_static_skb->pkt_use[index] = 1;
364             skb = bcm_static_skb->skb_8k[index];
365             skb->data = skb->head;
366 #ifdef NET_SKBUFF_DATA_USES_OFFSET
367             skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
368 #else
369             skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
370 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
371             skb->data += PKT_HEADROOM_DEFAULT;
372             skb->cloned = 0;
373             skb->priority = 0;
374 #ifdef NET_SKBUFF_DATA_USES_OFFSET
375             skb_set_tail_pointer(skb, len);
376 #else
377             skb->tail = skb->data + len;
378 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
379             skb->len = len;
380             skb->mac_len = PREALLOC_USED_MAGIC;
381             spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
382             return skb;
383         }
384     }
385 
386     spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
387     printk("%s: all static pkt in use!\n", __FUNCTION__);
388     return NULL;
389 #else
390     down(&bcm_static_skb->osl_pkt_sem);
391 
392     if (len <= DHD_SKB_1PAGE_BUFSIZE) {
393         for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
394             if (bcm_static_skb->skb_4k[i] && bcm_static_skb->pkt_use[i] == 0) {
395                 break;
396             }
397         }
398 
399         if (i != STATIC_PKT_1PAGE_NUM) {
400             bcm_static_skb->pkt_use[i] = 1;
401 
402             skb = bcm_static_skb->skb_4k[i];
403 #ifdef NET_SKBUFF_DATA_USES_OFFSET
404             skb_set_tail_pointer(skb, len);
405 #else
406             skb->tail = skb->data + len;
407 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
408             skb->len = len;
409 
410             up(&bcm_static_skb->osl_pkt_sem);
411             return skb;
412         }
413     }
414 
415     if (len <= DHD_SKB_2PAGE_BUFSIZE) {
416         for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
417             if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
418                 bcm_static_skb->pkt_use[i] == 0) {
419                 break;
420             }
421         }
422 
423         if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
424             bcm_static_skb->pkt_use[i] = 1;
425             skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
426 #ifdef NET_SKBUFF_DATA_USES_OFFSET
427             skb_set_tail_pointer(skb, len);
428 #else
429             skb->tail = skb->data + len;
430 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
431             skb->len = len;
432 
433             up(&bcm_static_skb->osl_pkt_sem);
434             return skb;
435         }
436     }
437 
438 #if defined(ENHANCED_STATIC_BUF)
439     if (bcm_static_skb->skb_16k &&
440         bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
441         bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
442 
443         skb = bcm_static_skb->skb_16k;
444 #ifdef NET_SKBUFF_DATA_USES_OFFSET
445         skb_set_tail_pointer(skb, len);
446 #else
447         skb->tail = skb->data + len;
448 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
449         skb->len = len;
450 
451         up(&bcm_static_skb->osl_pkt_sem);
452         return skb;
453     }
454 #endif /* ENHANCED_STATIC_BUF */
455 
456     up(&bcm_static_skb->osl_pkt_sem);
457     printk("%s: all static pkt in use!\n", __FUNCTION__);
458     return linux_pktget(osh, len);
459 #endif /* DHD_USE_STATIC_CTRLBUF */
460 }
461 
osl_pktfree_static(osl_t * osh,void * p,bool send)462 void osl_pktfree_static(osl_t *osh, void *p, bool send)
463 {
464     int i;
465 #ifdef DHD_USE_STATIC_CTRLBUF
466     struct sk_buff *skb = (struct sk_buff *)p;
467     unsigned long flags;
468 #endif /* DHD_USE_STATIC_CTRLBUF */
469 
470     if (!p) {
471         return;
472     }
473 
474     if (!bcm_static_skb) {
475         linux_pktfree(osh, p, send);
476         return;
477     }
478 
479 #ifdef DHD_USE_STATIC_CTRLBUF
480     spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
481 
482     for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
483         if (p == bcm_static_skb->skb_8k[i]) {
484             if (bcm_static_skb->pkt_use[i] == 0) {
485                 printk("%s: static pkt idx %d(%p) is double free\n",
486                        __FUNCTION__, i, p);
487             } else {
488                 bcm_static_skb->pkt_use[i] = 0;
489             }
490 
491             if (skb->mac_len != PREALLOC_USED_MAGIC) {
492                 printk("%s: static pkt idx %d(%p) is not in used\n",
493                        __FUNCTION__, i, p);
494             }
495 
496             skb->mac_len = PREALLOC_FREE_MAGIC;
497             spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
498             return;
499         }
500     }
501 
502     spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
503     printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
504 #else
505     down(&bcm_static_skb->osl_pkt_sem);
506     for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
507         if (p == bcm_static_skb->skb_4k[i]) {
508             bcm_static_skb->pkt_use[i] = 0;
509             up(&bcm_static_skb->osl_pkt_sem);
510             return;
511         }
512     }
513 
514     for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
515         if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
516             bcm_static_skb->pkt_use[i] = 0;
517             up(&bcm_static_skb->osl_pkt_sem);
518             return;
519         }
520     }
521 #ifdef ENHANCED_STATIC_BUF
522     if (p == bcm_static_skb->skb_16k) {
523         bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
524         up(&bcm_static_skb->osl_pkt_sem);
525         return;
526     }
527 #endif // endif
528     up(&bcm_static_skb->osl_pkt_sem);
529 #endif /* DHD_USE_STATIC_CTRLBUF */
530     linux_pktfree(osh, p, send);
531 }
532 #endif /* CONFIG_DHD_USE_STATIC_BUF */
533 
534 /* Clone a packet.
535  * The pkttag contents are NOT cloned.
536  */
537 void *
538 #ifdef BCM_OBJECT_TRACE
osl_pktdup(osl_t * osh,void * skb,int line,const char * caller)539 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
540 #else
541 osl_pktdup(osl_t *osh, void *skb)
542 #endif /* BCM_OBJECT_TRACE */
543 {
544     void *p;
545 
546     ASSERT(!PKTISCHAINED(skb));
547 
548     /* clear the CTFBUF flag if set and map the rest of the buffer
549      * before cloning.
550      */
551     PKTCTFMAP(osh, skb);
552 
553     if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) {
554         return NULL;
555     }
556 
557     /* skb_clone copies skb->cb.. we don't want that */
558     if (osh->pub.pkttag) {
559         OSL_PKTTAG_CLEAR(p);
560     }
561 
562     /* Increment the packet counter */
563     atomic_inc(&osh->cmn->pktalloced);
564 #ifdef BCM_OBJECT_TRACE
565     bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
566 #endif /* BCM_OBJECT_TRACE */
567 
568     return (p);
569 }
570 
571 /*
572  * BINOSL selects the slightly slower function-call-based binary compatible osl.
573  */
574 
osl_pktalloced(osl_t * osh)575 uint osl_pktalloced(osl_t *osh)
576 {
577     if (atomic_read(&osh->cmn->refcount) == 1) {
578         return (atomic_read(&osh->cmn->pktalloced));
579     } else {
580         return 0;
581     }
582 }
583 
584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
585 #include <linux/kallsyms.h>
586 #include <net/sock.h>
osl_pkt_orphan_partial(struct sk_buff * skb,int tsq)587 void osl_pkt_orphan_partial(struct sk_buff *skb, int tsq)
588 {
589     uint32 fraction;
590     static void *p_tcp_wfree = NULL;
591 
592     if (tsq <= 0) {
593         return;
594     }
595 
596     if (!skb->destructor || skb->destructor == sock_wfree) {
597         return;
598     }
599 
600     if (unlikely(!p_tcp_wfree)) {
601         char sym[KSYM_SYMBOL_LEN];
602         sprint_symbol(sym, (unsigned long)skb->destructor);
603         sym[0x9] = 0;
604         if (!strcmp(sym, "tcp_wfree")) {
605             p_tcp_wfree = skb->destructor;
606         } else {
607             return;
608         }
609     }
610 
611     if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk)) {
612         return;
613     }
614 
615     /* abstract a certain portion of skb truesize from the socket
616      * sk_wmem_alloc to allow more skb can be allocated for this
617      * socket for better cusion meeting WiFi device requirement
618      */
619     fraction = skb->truesize * (tsq - 1) / tsq;
620     skb->truesize -= fraction;
621 
622 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
623     atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
624 #else
625     atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
626 #endif // endif
627     skb_orphan(skb);
628 }
629 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
630