• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /******************************************************************************
3  *
4  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
5  *
6  ******************************************************************************/
7 #define _RTL8188E_XMIT_C_
8 #include <osdep_service.h>
9 #include <drv_types.h>
10 #include <mon.h>
11 #include <wifi.h>
12 #include <osdep_intf.h>
13 #include <usb_ops_linux.h>
14 #include <rtl8188e_hal.h>
15 
rtw_hal_init_xmit_priv(struct adapter * adapt)16 s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
17 {
18 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
19 
20 	tasklet_init(&pxmitpriv->xmit_tasklet,
21 		     (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
22 		     (unsigned long)adapt);
23 	return _SUCCESS;
24 }
25 
urb_zero_packet_chk(struct adapter * adapt,int sz)26 static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
27 {
28 	return !((sz + TXDESC_SIZE) % adapt->HalData->UsbBulkOutSize);
29 }
30 
rtl8188eu_cal_txdesc_chksum(struct tx_desc * ptxdesc)31 static void rtl8188eu_cal_txdesc_chksum(struct tx_desc	*ptxdesc)
32 {
33 	u16 *usptr = (u16 *)ptxdesc;
34 	u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
35 	u32 index;
36 	u16 checksum = 0;
37 
38 	/* Clear first */
39 	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
40 
41 	for (index = 0; index < count; index++)
42 		checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
43 	ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
44 }
45 
46 /*
47  * In normal chip, we should send some packet to Hw which will be used by Fw
48  * in FW LPS mode. The function is to fill the Tx descriptor of this packets,
49  * then Fw can tell Hw to send these packet derectly.
50  */
rtl8188e_fill_fake_txdesc(struct adapter * adapt,u8 * desc,u32 BufferLen,u8 ispspoll,u8 is_btqosnull)51 void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8  ispspoll, u8  is_btqosnull)
52 {
53 	struct tx_desc *ptxdesc;
54 
55 	/*  Clear all status */
56 	ptxdesc = (struct tx_desc *)desc;
57 	memset(desc, 0, TXDESC_SIZE);
58 
59 	/* offset 0 */
60 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
61 
62 	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
63 
64 	ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /*  Buffer size + command header */
65 
66 	/* offset 4 */
67 	ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /*  Fixed queue of Mgnt queue */
68 
69 	/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
70 	if (ispspoll) {
71 		ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
72 	} else {
73 		ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /*  Hw set sequence number */
74 		ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
75 	}
76 
77 	if (is_btqosnull)
78 		ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /*  BT NULL */
79 
80 	/* offset 16 */
81 	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
82 
83 	/*  USB interface drop packet if the checksum of descriptor isn't correct. */
84 	/*  Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
85 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
86 }
87 
fill_txdesc_sectype(struct pkt_attrib * pattrib,struct tx_desc * ptxdesc)88 static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
89 {
90 	if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
91 		switch (pattrib->encrypt) {
92 		/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
93 		case _WEP40_:
94 		case _WEP104_:
95 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
96 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
97 			break;
98 		case _TKIP_:
99 		case _TKIP_WTMIC_:
100 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
101 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
102 			break;
103 		case _AES_:
104 			ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
105 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
106 			break;
107 		case _NO_PRIVACY_:
108 		default:
109 			break;
110 		}
111 	}
112 }
113 
fill_txdesc_vcs(struct pkt_attrib * pattrib,__le32 * pdw)114 static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
115 {
116 	switch (pattrib->vcs_mode) {
117 	case RTS_CTS:
118 		*pdw |= cpu_to_le32(RTS_EN);
119 		break;
120 	case CTS_TO_SELF:
121 		*pdw |= cpu_to_le32(CTS_2_SELF);
122 		break;
123 	case NONE_VCS:
124 	default:
125 		break;
126 	}
127 	if (pattrib->vcs_mode) {
128 		*pdw |= cpu_to_le32(HW_RTS_EN);
129 		/*  Set RTS BW */
130 		if (pattrib->ht_en) {
131 			*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(27)) : 0;
132 
133 			if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
134 				*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
135 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
136 				*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
137 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
138 				*pdw |= 0;
139 			else
140 				*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
141 		}
142 	}
143 }
144 
fill_txdesc_phy(struct pkt_attrib * pattrib,__le32 * pdw)145 static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
146 {
147 	if (pattrib->ht_en) {
148 		*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(25)) : 0;
149 
150 		if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
151 			*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
152 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
153 			*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
154 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
155 			*pdw |= 0;
156 		else
157 			*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
158 	}
159 }
160 
update_txdesc(struct xmit_frame * pxmitframe,u8 * pmem,s32 sz,u8 bagg_pkt)161 static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
162 {
163 	int pull = 0;
164 	uint qsel;
165 	u8 data_rate, pwr_status, offset;
166 	struct adapter *adapt = pxmitframe->padapter;
167 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
168 	struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
169 	struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
170 	struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
171 	struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
172 
173 	if (adapt->registrypriv.mp_mode == 0) {
174 		if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
175 			ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
176 			pull = 1;
177 		}
178 	}
179 
180 	memset(ptxdesc, 0, sizeof(struct tx_desc));
181 
182 	/* 4 offset 0 */
183 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
184 	ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
185 
186 	offset = TXDESC_SIZE + OFFSET_SZ;
187 
188 	ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
189 
190 	if (is_multicast_ether_addr(pattrib->ra))
191 		ptxdesc->txdw0 |= cpu_to_le32(BMC);
192 
193 	if (adapt->registrypriv.mp_mode == 0) {
194 		if (!bagg_pkt) {
195 			if ((pull) && (pxmitframe->pkt_offset > 0))
196 				pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
197 		}
198 	}
199 
200 	/*  pkt_offset, unit:8 bytes padding */
201 	if (pxmitframe->pkt_offset > 0)
202 		ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
203 
204 	/* driver uses rate */
205 	ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
206 
207 	if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
208 		/* offset 4 */
209 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
210 
211 		qsel = (uint)(pattrib->qsel & 0x0000001f);
212 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
213 
214 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
215 
216 		fill_txdesc_sectype(pattrib, ptxdesc);
217 
218 		if (pattrib->ampdu_en) {
219 			ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
220 			ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
221 		} else {
222 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
223 		}
224 
225 		/* offset 8 */
226 
227 		/* offset 12 */
228 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
229 
230 		/* offset 16 , offset 20 */
231 		if (pattrib->qos_en)
232 			ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
233 
234 		/* offset 20 */
235 		if (pxmitframe->agg_num > 1)
236 			ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
237 
238 		if ((pattrib->ether_type != 0x888e) &&
239 		    (pattrib->ether_type != 0x0806) &&
240 		    (pattrib->ether_type != 0x88b4) &&
241 		    (pattrib->dhcp_pkt != 1)) {
242 			/* Non EAP & ARP & DHCP type data packet */
243 
244 			fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
245 			fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
246 
247 			ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
248 			ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS  Rate FB LMT */
249 
250 			if (pattrib->ht_en) {
251 				if (ODM_RA_GetShortGI_8188E(odmpriv, pattrib->mac_id))
252 					ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
253 			}
254 			data_rate = ODM_RA_GetDecisionRate_8188E(odmpriv, pattrib->mac_id);
255 			ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
256 			pwr_status = ODM_RA_GetHwPwrStatus_8188E(odmpriv, pattrib->mac_id);
257 			ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
258 		} else {
259 			/*  EAP data packet and ARP packet and DHCP. */
260 			/*  Use the 1M data rate to send the EAP/ARP packet. */
261 			/*  This will maybe make the handshake smooth. */
262 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
263 			if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
264 				ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/*  DATA_SHORT */
265 			ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
266 		}
267 	} else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
268 		/* offset 4 */
269 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
270 
271 		qsel = (uint)(pattrib->qsel&0x0000001f);
272 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
273 
274 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
275 
276 		/* offset 8 */
277 		/* CCX-TXRPT ack for xmit mgmt frames. */
278 		if (pxmitframe->ack_report)
279 			ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
280 
281 		/* offset 12 */
282 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
283 
284 		/* offset 20 */
285 		ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
286 		if (pattrib->retry_ctrl)
287 			ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
288 		else
289 			ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
290 
291 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
292 	} else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
293 		DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
294 	} else {
295 		DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
296 
297 		/* offset 4 */
298 		ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
299 
300 		ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
301 
302 		/* offset 8 */
303 
304 		/* offset 12 */
305 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
306 
307 		/* offset 20 */
308 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
309 	}
310 
311 	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
312 	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
313 	/*  mgnt frame should be controlled by Hw because Fw will also send null data */
314 	/*  which we cannot control when Fw LPS enable. */
315 	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
316 	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
317 	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
318 	/*  2010.06.23. Added by tynli. */
319 	if (!pattrib->qos_en) {
320 		ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /*  Hw set sequence number */
321 		ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);	/*  Hw set sequence number */
322 	}
323 
324 	rtl88eu_dm_set_tx_ant_by_tx_info(odmpriv, pmem, pattrib->mac_id);
325 
326 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
327 	_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
328 	return pull;
329 }
330 
331 /* for non-agg data frame or management frame */
rtw_dump_xframe(struct adapter * adapt,struct xmit_frame * pxmitframe)332 static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
333 {
334 	s32 ret = _SUCCESS;
335 	s32 inner_ret = _SUCCESS;
336 	int t, sz, w_sz, pull = 0;
337 	u8 *mem_addr;
338 	u32 ff_hwaddr;
339 	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
340 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
341 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
342 
343 	if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
344 	    (pxmitframe->attrib.ether_type != 0x0806) &&
345 	    (pxmitframe->attrib.ether_type != 0x888e) &&
346 	    (pxmitframe->attrib.ether_type != 0x88b4) &&
347 	    (pxmitframe->attrib.dhcp_pkt != 1))
348 		rtw_issue_addbareq_cmd(adapt, pxmitframe);
349 	mem_addr = pxmitframe->buf_addr;
350 
351 	RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
352 
353 	for (t = 0; t < pattrib->nr_frags; t++) {
354 		if (inner_ret != _SUCCESS && ret == _SUCCESS)
355 			ret = _FAIL;
356 
357 		if (t != (pattrib->nr_frags - 1)) {
358 			RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
359 
360 			sz = pxmitpriv->frag_len;
361 			sz = sz - 4 - pattrib->icv_len;
362 		} else {
363 			/* no frag */
364 			sz = pattrib->last_txcmdsz;
365 		}
366 
367 		pull = update_txdesc(pxmitframe, mem_addr, sz, false);
368 
369 		if (pull) {
370 			mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
371 			pxmitframe->buf_addr = mem_addr;
372 			w_sz = sz + TXDESC_SIZE;
373 		} else {
374 			w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
375 		}
376 		ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
377 
378 		inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, pxmitbuf);
379 
380 		rtw_count_tx_stats(adapt, pxmitframe, sz);
381 
382 		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
383 
384 		mem_addr += w_sz;
385 
386 		mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
387 	}
388 
389 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
390 
391 	if  (ret != _SUCCESS)
392 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
393 
394 	return ret;
395 }
396 
xmitframe_need_length(struct xmit_frame * pxmitframe)397 static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
398 {
399 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
400 
401 	u32 len;
402 
403 	/*  no consider fragement */
404 	len = pattrib->hdrlen + pattrib->iv_len +
405 		SNAP_SIZE + sizeof(u16) +
406 		pattrib->pktlen +
407 		((pattrib->bswenc) ? pattrib->icv_len : 0);
408 
409 	if (pattrib->encrypt == _TKIP_)
410 		len += 8;
411 
412 	return len;
413 }
414 
rtl8188eu_xmitframe_complete(struct adapter * adapt,struct xmit_priv * pxmitpriv)415 bool rtl8188eu_xmitframe_complete(struct adapter *adapt,
416 				  struct xmit_priv *pxmitpriv)
417 {
418 	struct xmit_frame *pxmitframe = NULL;
419 	struct xmit_frame *pfirstframe = NULL;
420 	struct xmit_buf *pxmitbuf;
421 
422 	/*  aggregate variable */
423 	struct hw_xmit *phwxmit;
424 	struct sta_info *psta = NULL;
425 	struct tx_servq *ptxservq = NULL;
426 
427 	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
428 
429 	u32 pbuf;	/*  next pkt address */
430 	u32 pbuf_tail;	/*  last pkt tail */
431 	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */
432 
433 	u32 bulksize = adapt->HalData->UsbBulkOutSize;
434 	u8 desc_cnt;
435 	u32 bulkptr;
436 
437 	/*  dump frame variable */
438 	u32 ff_hwaddr;
439 
440 	RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
441 
442 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
443 	if (!pxmitbuf)
444 		return false;
445 
446 	/* 3 1. pick up first frame */
447 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
448 
449 	pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
450 	if (!pxmitframe) {
451 		/*  no more xmit frame, release xmit buffer */
452 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
453 		return false;
454 	}
455 
456 	pxmitframe->pxmitbuf = pxmitbuf;
457 	pxmitframe->buf_addr = pxmitbuf->pbuf;
458 	pxmitbuf->priv_data = pxmitframe;
459 
460 	pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
461 	pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */
462 
463 	rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
464 
465 	/*  always return ndis_packet after rtw_xmitframe_coalesce */
466 	rtw_os_xmit_complete(adapt, pxmitframe);
467 
468 	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
469 	pfirstframe = pxmitframe;
470 	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
471 	pbuf_tail = len;
472 	pbuf = round_up(pbuf_tail, 8);
473 
474 	/*  check pkt amount in one bulk */
475 	desc_cnt = 0;
476 	bulkptr = bulksize;
477 	if (pbuf < bulkptr) {
478 		desc_cnt++;
479 	} else {
480 		desc_cnt = 0;
481 		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
482 	}
483 
484 	/*  dequeue same priority packet from station tx queue */
485 	psta = pfirstframe->attrib.psta;
486 	switch (pfirstframe->attrib.priority) {
487 	case 1:
488 	case 2:
489 		ptxservq = &psta->sta_xmitpriv.bk_q;
490 		phwxmit = pxmitpriv->hwxmits + 3;
491 		break;
492 	case 4:
493 	case 5:
494 		ptxservq = &psta->sta_xmitpriv.vi_q;
495 		phwxmit = pxmitpriv->hwxmits + 1;
496 		break;
497 	case 6:
498 	case 7:
499 		ptxservq = &psta->sta_xmitpriv.vo_q;
500 		phwxmit = pxmitpriv->hwxmits;
501 		break;
502 	case 0:
503 	case 3:
504 	default:
505 		ptxservq = &psta->sta_xmitpriv.be_q;
506 		phwxmit = pxmitpriv->hwxmits + 2;
507 		break;
508 	}
509 	spin_lock_bh(&pxmitpriv->lock);
510 
511 	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
512 	xmitframe_plist = xmitframe_phead->next;
513 
514 	while (xmitframe_phead != xmitframe_plist) {
515 		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
516 		xmitframe_plist = xmitframe_plist->next;
517 
518 		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
519 		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */
520 
521 		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
522 
523 		if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
524 			pxmitframe->agg_num = 1;
525 			pxmitframe->pkt_offset = 1;
526 			break;
527 		}
528 		list_del_init(&pxmitframe->list);
529 		ptxservq->qcnt--;
530 		phwxmit->accnt--;
531 
532 		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
533 
534 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
535 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
536 		rtw_os_xmit_complete(adapt, pxmitframe);
537 
538 		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
539 		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
540 
541 		/*  don't need xmitframe any more */
542 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
543 
544 		/*  handle pointer and stop condition */
545 		pbuf_tail = pbuf + len;
546 		pbuf = round_up(pbuf_tail, 8);
547 
548 		pfirstframe->agg_num++;
549 		if (pfirstframe->agg_num ==  MAX_TX_AGG_PACKET_NUMBER)
550 			break;
551 
552 		if (pbuf < bulkptr) {
553 			desc_cnt++;
554 			if (desc_cnt == adapt->HalData->UsbTxAggDescNum)
555 				break;
556 		} else {
557 			desc_cnt = 0;
558 			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
559 		}
560 	} /* end while (aggregate same priority and same DA(AP or STA) frames) */
561 
562 	if (list_empty(&ptxservq->sta_pending.queue))
563 		list_del_init(&ptxservq->tx_pending);
564 
565 	spin_unlock_bh(&pxmitpriv->lock);
566 	if ((pfirstframe->attrib.ether_type != 0x0806) &&
567 	    (pfirstframe->attrib.ether_type != 0x888e) &&
568 	    (pfirstframe->attrib.ether_type != 0x88b4) &&
569 	    (pfirstframe->attrib.dhcp_pkt != 1))
570 		rtw_issue_addbareq_cmd(adapt, pfirstframe);
571 	/* 3 3. update first frame txdesc */
572 	if ((pbuf_tail % bulksize) == 0) {
573 		/*  remove pkt_offset */
574 		pbuf_tail -= PACKET_OFFSET_SZ;
575 		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
576 		pfirstframe->pkt_offset--;
577 	}
578 
579 	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
580 
581 	/* 3 4. write xmit buffer to USB FIFO */
582 	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
583 	usb_write_port(adapt, ff_hwaddr, pbuf_tail, pxmitbuf);
584 
585 	/* 3 5. update statisitc */
586 	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
587 	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
588 
589 	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
590 
591 	rtw_free_xmitframe(pxmitpriv, pfirstframe);
592 
593 	return true;
594 }
595 
596 /*
597  * Return
598  *	true	dump packet directly
599  *	false	enqueue packet
600  */
rtw_hal_xmit(struct adapter * adapt,struct xmit_frame * pxmitframe)601 bool rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
602 {
603 	s32 res;
604 	struct xmit_buf *pxmitbuf = NULL;
605 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
606 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
607 	struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
608 
609 	spin_lock_bh(&pxmitpriv->lock);
610 
611 	if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
612 		goto enqueue;
613 
614 	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING))
615 		goto enqueue;
616 
617 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
618 	if (!pxmitbuf)
619 		goto enqueue;
620 
621 	spin_unlock_bh(&pxmitpriv->lock);
622 
623 	pxmitframe->pxmitbuf = pxmitbuf;
624 	pxmitframe->buf_addr = pxmitbuf->pbuf;
625 	pxmitbuf->priv_data = pxmitframe;
626 
627 	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
628 
629 	if (res == _SUCCESS) {
630 		rtw_dump_xframe(adapt, pxmitframe);
631 	} else {
632 		DBG_88E("==> %s xmitframe_coalesce failed\n", __func__);
633 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
634 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
635 	}
636 
637 	return true;
638 
639 enqueue:
640 	res = rtw_xmitframe_enqueue(adapt, pxmitframe);
641 	spin_unlock_bh(&pxmitpriv->lock);
642 
643 	if (res != _SUCCESS) {
644 		RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
645 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
646 
647 		/*  Trick, make the statistics correct */
648 		pxmitpriv->tx_pkts--;
649 		pxmitpriv->tx_drop++;
650 		return true;
651 	}
652 
653 	return false;
654 }
655 
rtw_hal_mgnt_xmit(struct adapter * adapt,struct xmit_frame * pmgntframe)656 s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
657 {
658 	struct xmit_priv *xmitpriv = &adapt->xmitpriv;
659 
660 	rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
661 	return rtw_dump_xframe(adapt, pmgntframe);
662 }
663