1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2017 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _XMIT_OSDEP_C_
16
17 #include <drv_types.h>
18
19 #define DBG_DUMP_OS_QUEUE_CTL 0
20
rtw_remainder_len(struct pkt_file * pfile)21 uint rtw_remainder_len(struct pkt_file *pfile)
22 {
23 return pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start));
24 }
25
_rtw_open_pktfile(_pkt * pktptr,struct pkt_file * pfile)26 void _rtw_open_pktfile(_pkt *pktptr, struct pkt_file *pfile)
27 {
28
29 pfile->pkt = pktptr;
30 pfile->cur_addr = pfile->buf_start = pktptr->data;
31 pfile->pkt_len = pfile->buf_len = pktptr->len;
32
33 pfile->cur_buffer = pfile->buf_start ;
34
35 }
36
_rtw_pktfile_read(struct pkt_file * pfile,u8 * rmem,uint rlen)37 uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
38 {
39 uint len = 0;
40
41
42 len = rtw_remainder_len(pfile);
43 len = (rlen > len) ? len : rlen;
44
45 if (rmem)
46 skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
47
48 pfile->cur_addr += len;
49 pfile->pkt_len -= len;
50
51
52 return len;
53 }
54
rtw_endofpktfile(struct pkt_file * pfile)55 sint rtw_endofpktfile(struct pkt_file *pfile)
56 {
57
58 if (pfile->pkt_len == 0) {
59 return _TRUE;
60 }
61
62
63 return _FALSE;
64 }
65
rtw_set_tx_chksum_offload(_pkt * pkt,struct pkt_attrib * pattrib)66 void rtw_set_tx_chksum_offload(_pkt *pkt, struct pkt_attrib *pattrib)
67 {
68 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
69 struct sk_buff *skb = (struct sk_buff *)pkt;
70 struct iphdr *iph = NULL;
71 struct ipv6hdr *i6ph = NULL;
72 struct udphdr *uh = NULL;
73 struct tcphdr *th = NULL;
74 u8 protocol = 0xFF;
75
76 if (skb->protocol == htons(ETH_P_IP)) {
77 iph = (struct iphdr *)skb_network_header(skb);
78 protocol = iph->protocol;
79 } else if (skb->protocol == htons(ETH_P_IPV6)) {
80 i6ph = (struct ipv6hdr *)skb_network_header(skb);
81 protocol = i6ph->nexthdr;
82 } else
83 {}
84
85 /* HW unable to compute CSUM if header & payload was be encrypted by SW(cause TXDMA error) */
86 if (pattrib->bswenc == _TRUE) {
87 if (skb->ip_summed == CHECKSUM_PARTIAL)
88 skb_checksum_help(skb);
89 return;
90 }
91
92 /* For HW rule, clear ipv4_csum & UDP/TCP_csum if it is UDP/TCP packet */
93 switch (protocol) {
94 case IPPROTO_UDP:
95 uh = (struct udphdr *)skb_transport_header(skb);
96 uh->check = 0;
97 if (iph)
98 iph->check = 0;
99 pattrib->hw_csum = _TRUE;
100 break;
101 case IPPROTO_TCP:
102 th = (struct tcphdr *)skb_transport_header(skb);
103 th->check = 0;
104 if (iph)
105 iph->check = 0;
106 pattrib->hw_csum = _TRUE;
107 break;
108 default:
109 break;
110 }
111 #endif
112
113 }
114
rtw_os_xmit_resource_alloc(_adapter * padapter,struct xmit_buf * pxmitbuf,u32 alloc_sz,u8 flag)115 int rtw_os_xmit_resource_alloc(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag)
116 {
117 #ifdef CONFIG_PCIE_DMA_COHERENT
118 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
119 struct pci_dev *pdev = pdvobjpriv->ppcidev;
120 #endif
121
122 if (alloc_sz > 0) {
123 #ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
124 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
125 struct usb_device *pusbd = pdvobjpriv->pusbdev;
126
127 pxmitbuf->pallocated_buf = rtw_usb_buffer_alloc(pusbd, (size_t)alloc_sz, &pxmitbuf->dma_transfer_addr);
128 pxmitbuf->pbuf = pxmitbuf->pallocated_buf;
129 if (pxmitbuf->pallocated_buf == NULL)
130 return _FAIL;
131 #else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
132
133 #ifdef CONFIG_PCIE_DMA_COHERENT
134 pxmitbuf->pallocated_buf = dma_alloc_coherent(&pdev->dev, alloc_sz, &pxmitbuf->dma_bpa, GFP_KERNEL);
135 #else
136 pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
137 #endif
138 if (pxmitbuf->pallocated_buf == NULL)
139 return _FAIL;
140
141 pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
142
143 #endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
144 }
145
146 if (flag) {
147 #ifdef CONFIG_USB_HCI
148 int i;
149 for (i = 0; i < 8; i++) {
150 pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
151 if (pxmitbuf->pxmit_urb[i] == NULL) {
152 RTW_INFO("pxmitbuf->pxmit_urb[i]==NULL");
153 return _FAIL;
154 }
155 }
156 #endif
157 }
158
159 return _SUCCESS;
160 }
161
rtw_os_xmit_resource_free(_adapter * padapter,struct xmit_buf * pxmitbuf,u32 free_sz,u8 flag)162 void rtw_os_xmit_resource_free(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag)
163 {
164 #ifdef CONFIG_PCIE_DMA_COHERENT
165 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
166 struct pci_dev *pdev = pdvobjpriv->ppcidev;
167 #endif
168
169 if (flag) {
170 #ifdef CONFIG_USB_HCI
171 int i;
172
173 for (i = 0; i < 8; i++) {
174 if (pxmitbuf->pxmit_urb[i]) {
175 /* usb_kill_urb(pxmitbuf->pxmit_urb[i]); */
176 usb_free_urb(pxmitbuf->pxmit_urb[i]);
177 }
178 }
179 #endif
180 }
181
182 if (free_sz > 0) {
183 #ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
184 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
185 struct usb_device *pusbd = pdvobjpriv->pusbdev;
186
187 rtw_usb_buffer_free(pusbd, (size_t)free_sz, pxmitbuf->pallocated_buf, pxmitbuf->dma_transfer_addr);
188 pxmitbuf->pallocated_buf = NULL;
189 pxmitbuf->dma_transfer_addr = 0;
190 #else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
191 if (pxmitbuf->pallocated_buf)
192 #ifdef CONFIG_PCIE_DMA_COHERENT
193 dma_free_coherent(&pdev->dev, free_sz, pxmitbuf->pallocated_buf, pxmitbuf->dma_bpa);
194 #else
195 rtw_mfree(pxmitbuf->pallocated_buf, free_sz);
196 #endif
197 #endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
198 }
199 }
200
dump_os_queue(void * sel,_adapter * padapter)201 void dump_os_queue(void *sel, _adapter *padapter)
202 {
203 struct net_device *ndev = padapter->pnetdev;
204
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
206 int i;
207
208 for (i = 0; i < 4; i++) {
209 RTW_PRINT_SEL(sel, "os_queue[%d]:%s\n"
210 , i, __netif_subqueue_stopped(ndev, i) ? "stopped" : "waked");
211 }
212 #else
213 RTW_PRINT_SEL(sel, "os_queue:%s\n"
214 , netif_queue_stopped(ndev) ? "stopped" : "waked");
215 #endif
216 }
217
218 #define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
219
rtw_os_need_wake_queue(_adapter * padapter,u16 os_qid)220 static inline bool rtw_os_need_wake_queue(_adapter *padapter, u16 os_qid)
221 {
222 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
223 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
224
225 if (padapter->registrypriv.wifi_spec) {
226 if (pxmitpriv->hwxmits[os_qid].accnt < WMM_XMIT_THRESHOLD)
227 return _TRUE;
228 #ifdef DBG_CONFIG_ERROR_DETECT
229 #ifdef DBG_CONFIG_ERROR_RESET
230 } else if (rtw_hal_sreset_inprogress(padapter) == _TRUE) {
231 return _FALSE;
232 #endif/* #ifdef DBG_CONFIG_ERROR_RESET */
233 #endif/* #ifdef DBG_CONFIG_ERROR_DETECT */
234 } else {
235 #ifdef CONFIG_MCC_MODE
236 if (MCC_EN(padapter)) {
237 if (rtw_hal_check_mcc_status(padapter, MCC_STATUS_DOING_MCC)
238 && MCC_STOP(padapter))
239 return _FALSE;
240 }
241 #endif /* CONFIG_MCC_MODE */
242 return _TRUE;
243 }
244 return _FALSE;
245 #else
246 #ifdef CONFIG_MCC_MODE
247 if (MCC_EN(padapter)) {
248 if (rtw_hal_check_mcc_status(padapter, MCC_STATUS_DOING_MCC)
249 && MCC_STOP(padapter))
250 return _FALSE;
251 }
252 #endif /* CONFIG_MCC_MODE */
253 return _TRUE;
254 #endif
255 }
256
rtw_os_need_stop_queue(_adapter * padapter,u16 os_qid)257 static inline bool rtw_os_need_stop_queue(_adapter *padapter, u16 os_qid)
258 {
259 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
260 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
261 if (padapter->registrypriv.wifi_spec) {
262 /* No free space for Tx, tx_worker is too slow */
263 if (pxmitpriv->hwxmits[os_qid].accnt > WMM_XMIT_THRESHOLD)
264 return _TRUE;
265 } else {
266 if (pxmitpriv->free_xmitframe_cnt <= 4)
267 return _TRUE;
268 }
269 #else
270 if (pxmitpriv->free_xmitframe_cnt <= 4)
271 return _TRUE;
272 #endif
273 return _FALSE;
274 }
275
rtw_os_pkt_complete(_adapter * padapter,_pkt * pkt)276 void rtw_os_pkt_complete(_adapter *padapter, _pkt *pkt)
277 {
278 rtw_skb_free(pkt);
279 }
280
rtw_os_xmit_complete(_adapter * padapter,struct xmit_frame * pxframe)281 void rtw_os_xmit_complete(_adapter *padapter, struct xmit_frame *pxframe)
282 {
283 if (pxframe->pkt)
284 rtw_os_pkt_complete(padapter, pxframe->pkt);
285
286 pxframe->pkt = NULL;
287 }
288
rtw_os_xmit_schedule(_adapter * padapter)289 void rtw_os_xmit_schedule(_adapter *padapter)
290 {
291 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
292 _adapter *pri_adapter;
293
294 if (!padapter)
295 return;
296 pri_adapter = GET_PRIMARY_ADAPTER(padapter);
297
298 if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
299 _rtw_up_sema(&pri_adapter->xmitpriv.xmit_sema);
300
301
302 #else
303 _irqL irqL;
304 struct xmit_priv *pxmitpriv;
305
306 if (!padapter)
307 return;
308
309 pxmitpriv = &padapter->xmitpriv;
310
311 _enter_critical_bh(&pxmitpriv->lock, &irqL);
312
313 if (rtw_txframes_pending(padapter))
314 tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
315
316 _exit_critical_bh(&pxmitpriv->lock, &irqL);
317
318 #if defined(CONFIG_PCI_HCI) && defined(CONFIG_XMIT_THREAD_MODE)
319 if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
320 _rtw_up_sema(&padapter->xmitpriv.xmit_sema);
321 #endif
322
323
324 #endif
325 }
326
rtw_os_check_wakup_queue(_adapter * adapter,u16 os_qid)327 void rtw_os_check_wakup_queue(_adapter *adapter, u16 os_qid)
328 {
329 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
330 if (rtw_os_need_wake_queue(adapter, os_qid)) {
331 if (DBG_DUMP_OS_QUEUE_CTL)
332 RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(adapter), os_qid);
333 netif_wake_subqueue(adapter->pnetdev, os_qid);
334 }
335 #else
336 if (rtw_os_need_wake_queue(adapter, 0)) {
337 if (DBG_DUMP_OS_QUEUE_CTL)
338 RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(adapter));
339 netif_wake_queue(adapter->pnetdev);
340 }
341 #endif
342 }
343
rtw_os_check_stop_queue(_adapter * adapter,u16 os_qid)344 bool rtw_os_check_stop_queue(_adapter *adapter, u16 os_qid)
345 {
346 bool busy = _FALSE;
347
348 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
349 if (rtw_os_need_stop_queue(adapter, os_qid)) {
350 if (DBG_DUMP_OS_QUEUE_CTL)
351 RTW_INFO(FUNC_ADPT_FMT": netif_stop_subqueue[%d]\n", FUNC_ADPT_ARG(adapter), os_qid);
352 netif_stop_subqueue(adapter->pnetdev, os_qid);
353 busy = _TRUE;
354 }
355 #else
356 if (rtw_os_need_stop_queue(adapter, 0)) {
357 if (DBG_DUMP_OS_QUEUE_CTL)
358 RTW_INFO(FUNC_ADPT_FMT": netif_stop_queue\n", FUNC_ADPT_ARG(adapter));
359 rtw_netif_stop_queue(adapter->pnetdev);
360 busy = _TRUE;
361 }
362 #endif
363 return busy;
364 }
365
rtw_os_wake_queue_at_free_stainfo(_adapter * padapter,int * qcnt_freed)366 void rtw_os_wake_queue_at_free_stainfo(_adapter *padapter, int *qcnt_freed)
367 {
368 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
369 int i;
370
371 for (i = 0; i < 4; i++) {
372 if (qcnt_freed[i] == 0)
373 continue;
374
375 if (rtw_os_need_wake_queue(padapter, i)) {
376 if (DBG_DUMP_OS_QUEUE_CTL)
377 RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), i);
378 netif_wake_subqueue(padapter->pnetdev, i);
379 }
380 }
381 #else
382 if (qcnt_freed[0] || qcnt_freed[1] || qcnt_freed[2] || qcnt_freed[3]) {
383 if (rtw_os_need_wake_queue(padapter, 0)) {
384 if (DBG_DUMP_OS_QUEUE_CTL)
385 RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter));
386 netif_wake_queue(padapter->pnetdev);
387 }
388 }
389 #endif
390 }
391
_rtw_xmit_entry(_pkt * pkt,_nic_hdl pnetdev)392 int _rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
393 {
394 _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
395 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
396 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
397 struct sk_buff *skb = pkt;
398 struct sk_buff *segs, *nskb;
399 netdev_features_t features = padapter->pnetdev->features;
400 #endif
401 u16 os_qid = 0;
402 s32 res = 0;
403
404 if (padapter->registrypriv.mp_mode) {
405 RTW_INFO("MP_TX_DROP_OS_FRAME\n");
406 goto drop_packet;
407 }
408 DBG_COUNTER(padapter->tx_logs.os_tx);
409
410 if ((rtw_if_up(padapter) == _FALSE)
411 #ifdef CONFIG_LAYER2_ROAMING
412 &&(!padapter->mlmepriv.roam_network)
413 #endif
414 ){
415 DBG_COUNTER(padapter->tx_logs.os_tx_err_up);
416 #ifdef DBG_TX_DROP_FRAME
417 RTW_INFO("DBG_TX_DROP_FRAME %s if_up fail\n", __FUNCTION__);
418 #endif
419 goto drop_packet;
420 }
421
422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
423 os_qid = skb_get_queue_mapping(pkt);
424 #endif
425
426 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
427 if (skb_shinfo(skb)->gso_size) {
428 /* split a big(65k) skb into several small(1.5k) skbs */
429 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
430 segs = skb_gso_segment(skb, features);
431 if (IS_ERR(segs) || !segs)
432 goto drop_packet;
433
434 do {
435 nskb = segs;
436 segs = segs->next;
437 nskb->next = NULL;
438 rtw_mstat_update( MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, nskb->truesize);
439 res = rtw_xmit(padapter, &nskb, os_qid);
440 if (res < 0) {
441 #ifdef DBG_TX_DROP_FRAME
442 RTW_INFO("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
443 #endif
444 pxmitpriv->tx_drop++;
445 rtw_os_pkt_complete(padapter, nskb);
446 }
447 } while (segs);
448 rtw_os_pkt_complete(padapter, skb);
449 goto exit;
450 }
451 #endif
452
453 res = rtw_xmit(padapter, &pkt, os_qid);
454 if (res < 0) {
455 #ifdef DBG_TX_DROP_FRAME
456 RTW_INFO("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
457 #endif
458 goto drop_packet;
459 }
460
461 goto exit;
462
463 drop_packet:
464 pxmitpriv->tx_drop++;
465 rtw_os_pkt_complete(padapter, pkt);
466
467 exit:
468
469
470 return 0;
471 }
472
473 #ifdef CONFIG_CUSTOMER_ALIBABA_GENERAL
check_alibaba_meshpkt(struct sk_buff * skb)474 int check_alibaba_meshpkt(struct sk_buff *skb)
475 {
476 u16 protocol;
477
478 if (skb)
479 return (htons(skb->protocol) == ETH_P_ALL);
480
481 return _FALSE;
482 }
483
rtw_alibaba_mesh_xmit_entry(_pkt * pkt,struct net_device * ndev)484 s32 rtw_alibaba_mesh_xmit_entry(_pkt *pkt, struct net_device *ndev)
485 {
486 u16 frame_ctl;
487
488 _adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
489 struct pkt_file pktfile;
490 struct rtw_ieee80211_hdr *pwlanhdr;
491 struct pkt_attrib *pattrib;
492 struct xmit_frame *pmgntframe;
493 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
494 struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
495 unsigned char *pframe;
496 struct sk_buff *skb = (struct sk_buff *)pkt;
497 int len = skb->len;
498
499 rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, skb->truesize);
500
501 pmgntframe = alloc_mgtxmitframe(pxmitpriv);
502 if (pmgntframe == NULL) {
503 goto fail;
504 return -1;
505 }
506
507 pattrib = &pmgntframe->attrib;
508 update_mgntframe_attrib(padapter, pattrib);
509 _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
510 pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
511 pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
512
513 _rtw_open_pktfile(pkt, &pktfile);
514 _rtw_pktfile_read(&pktfile, pframe, len);
515
516 pattrib->type = pframe[0] & 0x0C;
517 pattrib->subtype = pframe[0] & 0xF0;
518 pattrib->raid = rtw_get_mgntframe_raid(padapter, WIRELESS_11G);
519 pattrib->rate = MGN_24M;
520 pattrib->pktlen = len;
521 SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
522 pmlmeext->mgnt_seq++;
523
524 RTW_DBG_DUMP("rtw_alibaba_mesh_xmit_entry payload:", skb->data, len);
525
526 pattrib->last_txcmdsz = pattrib->pktlen;
527 dump_mgntframe(padapter, pmgntframe);
528
529 fail:
530 rtw_skb_free(skb);
531 return 0;
532 }
533 #endif
534
535 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
rtw_xmit_entry(_pkt * pkt,_nic_hdl pnetdev)536 netdev_tx_t rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
537 #else
538 int rtw_xmit_entry(_pkt *pkt, _nic_hdl pnetdev)
539 #endif
540 {
541 _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
542 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
543 int ret = 0;
544
545 if (pkt) {
546 #ifdef CONFIG_CUSTOMER_ALIBABA_GENERAL
547 if (check_alibaba_meshpkt(pkt)) {
548 ret = rtw_alibaba_mesh_xmit_entry(pkt, pnetdev);
549 goto out;
550 }
551 #endif
552 if (check_fwstate(pmlmepriv, WIFI_MONITOR_STATE) == _TRUE) {
553 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
554 rtw_monitor_xmit_entry((struct sk_buff *)pkt, pnetdev);
555 #endif
556 }
557 else {
558 rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, pkt->truesize);
559 ret = _rtw_xmit_entry(pkt, pnetdev);
560 }
561
562 }
563
564 #ifdef CONFIG_CUSTOMER_ALIBABA_GENERAL
565 out:
566 #endif
567 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
568 return (ret == 0) ? NETDEV_TX_OK : NETDEV_TX_BUSY;
569 #else
570 return ret;
571 #endif
572 }
573