• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  ******************************************************************************/
15 #define _RTL8188E_XMIT_C_
16 #include <osdep_service.h>
17 #include <drv_types.h>
18 #include <mon.h>
19 #include <wifi.h>
20 #include <osdep_intf.h>
21 #include <usb_ops_linux.h>
22 #include <rtl8188e_hal.h>
23 
rtw_hal_init_xmit_priv(struct adapter * adapt)24 s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
25 {
26 	struct xmit_priv	*pxmitpriv = &adapt->xmitpriv;
27 
28 	tasklet_init(&pxmitpriv->xmit_tasklet,
29 		     (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
30 		     (unsigned long)adapt);
31 	return _SUCCESS;
32 }
33 
urb_zero_packet_chk(struct adapter * adapt,int sz)34 static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
35 {
36 	return !((sz + TXDESC_SIZE) % adapt->HalData->UsbBulkOutSize);
37 }
38 
rtl8188eu_cal_txdesc_chksum(struct tx_desc * ptxdesc)39 static void rtl8188eu_cal_txdesc_chksum(struct tx_desc	*ptxdesc)
40 {
41 	u16	*usptr = (u16 *)ptxdesc;
42 	u32 count = 16;		/*  (32 bytes / 2 bytes per XOR) => 16 times */
43 	u32 index;
44 	u16 checksum = 0;
45 
46 	/* Clear first */
47 	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
48 
49 	for (index = 0; index < count; index++)
50 		checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
51 	ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
52 }
53 
54 /*  Description: In normal chip, we should send some packet to Hw which will be used by Fw */
55 /*			in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
56 /*			Fw can tell Hw to send these packet derectly. */
rtl8188e_fill_fake_txdesc(struct adapter * adapt,u8 * desc,u32 BufferLen,u8 ispspoll,u8 is_btqosnull)57 void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8  ispspoll, u8  is_btqosnull)
58 {
59 	struct tx_desc *ptxdesc;
60 
61 	/*  Clear all status */
62 	ptxdesc = (struct tx_desc *)desc;
63 	memset(desc, 0, TXDESC_SIZE);
64 
65 	/* offset 0 */
66 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
67 
68 	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
69 
70 	ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /*  Buffer size + command header */
71 
72 	/* offset 4 */
73 	ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /*  Fixed queue of Mgnt queue */
74 
75 	/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
76 	if (ispspoll) {
77 		ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
78 	} else {
79 		ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /*  Hw set sequence number */
80 		ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
81 	}
82 
83 	if (is_btqosnull)
84 		ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /*  BT NULL */
85 
86 	/* offset 16 */
87 	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
88 
89 	/*  USB interface drop packet if the checksum of descriptor isn't correct. */
90 	/*  Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
91 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
92 }
93 
fill_txdesc_sectype(struct pkt_attrib * pattrib,struct tx_desc * ptxdesc)94 static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
95 {
96 	if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
97 		switch (pattrib->encrypt) {
98 		/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
99 		case _WEP40_:
100 		case _WEP104_:
101 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
102 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
103 			break;
104 		case _TKIP_:
105 		case _TKIP_WTMIC_:
106 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
107 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
108 			break;
109 		case _AES_:
110 			ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
111 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
112 			break;
113 		case _NO_PRIVACY_:
114 		default:
115 			break;
116 		}
117 	}
118 }
119 
fill_txdesc_vcs(struct pkt_attrib * pattrib,__le32 * pdw)120 static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
121 {
122 	switch (pattrib->vcs_mode) {
123 	case RTS_CTS:
124 		*pdw |= cpu_to_le32(RTS_EN);
125 		break;
126 	case CTS_TO_SELF:
127 		*pdw |= cpu_to_le32(CTS_2_SELF);
128 		break;
129 	case NONE_VCS:
130 	default:
131 		break;
132 	}
133 	if (pattrib->vcs_mode) {
134 		*pdw |= cpu_to_le32(HW_RTS_EN);
135 		/*  Set RTS BW */
136 		if (pattrib->ht_en) {
137 			*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(27)) : 0;
138 
139 			if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
140 				*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
141 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
142 				*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
143 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
144 				*pdw |= 0;
145 			else
146 				*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
147 		}
148 	}
149 }
150 
fill_txdesc_phy(struct pkt_attrib * pattrib,__le32 * pdw)151 static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
152 {
153 	if (pattrib->ht_en) {
154 		*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(25)) : 0;
155 
156 		if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
157 			*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
158 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
159 			*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
160 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
161 			*pdw |= 0;
162 		else
163 			*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
164 	}
165 }
166 
update_txdesc(struct xmit_frame * pxmitframe,u8 * pmem,s32 sz,u8 bagg_pkt)167 static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
168 {
169 	int	pull = 0;
170 	uint	qsel;
171 	u8 data_rate, pwr_status, offset;
172 	struct adapter		*adapt = pxmitframe->padapter;
173 	struct pkt_attrib	*pattrib = &pxmitframe->attrib;
174 	struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
175 	struct tx_desc	*ptxdesc = (struct tx_desc *)pmem;
176 	struct mlme_ext_priv	*pmlmeext = &adapt->mlmeextpriv;
177 	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
178 	int	bmcst = IS_MCAST(pattrib->ra);
179 
180 	if (adapt->registrypriv.mp_mode == 0) {
181 		if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
182 			ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
183 			pull = 1;
184 		}
185 	}
186 
187 	memset(ptxdesc, 0, sizeof(struct tx_desc));
188 
189 	/* 4 offset 0 */
190 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
191 	ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
192 
193 	offset = TXDESC_SIZE + OFFSET_SZ;
194 
195 	ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
196 
197 	if (bmcst)
198 		ptxdesc->txdw0 |= cpu_to_le32(BMC);
199 
200 	if (adapt->registrypriv.mp_mode == 0) {
201 		if (!bagg_pkt) {
202 			if ((pull) && (pxmitframe->pkt_offset > 0))
203 				pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
204 		}
205 	}
206 
207 	/*  pkt_offset, unit:8 bytes padding */
208 	if (pxmitframe->pkt_offset > 0)
209 		ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
210 
211 	/* driver uses rate */
212 	ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
213 
214 	if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
215 		/* offset 4 */
216 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
217 
218 		qsel = (uint)(pattrib->qsel & 0x0000001f);
219 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
220 
221 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
222 
223 		fill_txdesc_sectype(pattrib, ptxdesc);
224 
225 		if (pattrib->ampdu_en) {
226 			ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
227 			ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
228 		} else {
229 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
230 		}
231 
232 		/* offset 8 */
233 
234 		/* offset 12 */
235 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
236 
237 		/* offset 16 , offset 20 */
238 		if (pattrib->qos_en)
239 			ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
240 
241 		/* offset 20 */
242 		if (pxmitframe->agg_num > 1)
243 			ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
244 
245 		if ((pattrib->ether_type != 0x888e) &&
246 		    (pattrib->ether_type != 0x0806) &&
247 		    (pattrib->ether_type != 0x88b4) &&
248 		    (pattrib->dhcp_pkt != 1)) {
249 			/* Non EAP & ARP & DHCP type data packet */
250 
251 			fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
252 			fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
253 
254 			ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
255 			ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS  Rate FB LMT */
256 
257 			if (pattrib->ht_en) {
258 				if (ODM_RA_GetShortGI_8188E(odmpriv, pattrib->mac_id))
259 					ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
260 			}
261 			data_rate = ODM_RA_GetDecisionRate_8188E(odmpriv, pattrib->mac_id);
262 			ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
263 			pwr_status = ODM_RA_GetHwPwrStatus_8188E(odmpriv, pattrib->mac_id);
264 			ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
265 		} else {
266 			/*  EAP data packet and ARP packet and DHCP. */
267 			/*  Use the 1M data rate to send the EAP/ARP packet. */
268 			/*  This will maybe make the handshake smooth. */
269 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
270 			if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
271 				ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/*  DATA_SHORT */
272 			ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
273 		}
274 	} else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
275 		/* offset 4 */
276 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
277 
278 		qsel = (uint)(pattrib->qsel&0x0000001f);
279 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
280 
281 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
282 
283 		/* offset 8 */
284 		/* CCX-TXRPT ack for xmit mgmt frames. */
285 		if (pxmitframe->ack_report)
286 			ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
287 
288 		/* offset 12 */
289 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
290 
291 		/* offset 20 */
292 		ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
293 		if (pattrib->retry_ctrl)
294 			ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
295 		else
296 			ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
297 
298 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
299 	} else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
300 		DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
301 	} else {
302 		DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
303 
304 		/* offset 4 */
305 		ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
306 
307 		ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
308 
309 		/* offset 8 */
310 
311 		/* offset 12 */
312 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
313 
314 		/* offset 20 */
315 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
316 	}
317 
318 	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
319 	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
320 	/*  mgnt frame should be controlled by Hw because Fw will also send null data */
321 	/*  which we cannot control when Fw LPS enable. */
322 	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
323 	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
324 	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
325 	/*  2010.06.23. Added by tynli. */
326 	if (!pattrib->qos_en) {
327 		ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /*  Hw set sequence number */
328 		ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);	/*  Hw set sequence number */
329 	}
330 
331 	rtl88eu_dm_set_tx_ant_by_tx_info(odmpriv, pmem, pattrib->mac_id);
332 
333 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
334 	_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
335 	return pull;
336 }
337 
338 /* for non-agg data frame or  management frame */
rtw_dump_xframe(struct adapter * adapt,struct xmit_frame * pxmitframe)339 static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
340 {
341 	s32 ret = _SUCCESS;
342 	s32 inner_ret = _SUCCESS;
343 	int t, sz, w_sz, pull = 0;
344 	u8 *mem_addr;
345 	u32 ff_hwaddr;
346 	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
347 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
348 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
349 
350 	if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
351 	    (pxmitframe->attrib.ether_type != 0x0806) &&
352 	    (pxmitframe->attrib.ether_type != 0x888e) &&
353 	    (pxmitframe->attrib.ether_type != 0x88b4) &&
354 	    (pxmitframe->attrib.dhcp_pkt != 1))
355 		rtw_issue_addbareq_cmd(adapt, pxmitframe);
356 	mem_addr = pxmitframe->buf_addr;
357 
358 	RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
359 
360 	for (t = 0; t < pattrib->nr_frags; t++) {
361 		if (inner_ret != _SUCCESS && ret == _SUCCESS)
362 			ret = _FAIL;
363 
364 		if (t != (pattrib->nr_frags - 1)) {
365 			RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
366 
367 			sz = pxmitpriv->frag_len;
368 			sz = sz - 4 - pattrib->icv_len;
369 		} else {
370 			/* no frag */
371 			sz = pattrib->last_txcmdsz;
372 		}
373 
374 		pull = update_txdesc(pxmitframe, mem_addr, sz, false);
375 
376 		if (pull) {
377 			mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
378 			pxmitframe->buf_addr = mem_addr;
379 			w_sz = sz + TXDESC_SIZE;
380 		} else {
381 			w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
382 		}
383 		ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
384 
385 		inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, pxmitbuf);
386 
387 		rtw_count_tx_stats(adapt, pxmitframe, sz);
388 
389 		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
390 
391 		mem_addr += w_sz;
392 
393 		mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
394 	}
395 
396 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
397 
398 	if  (ret != _SUCCESS)
399 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
400 
401 	return ret;
402 }
403 
xmitframe_need_length(struct xmit_frame * pxmitframe)404 static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
405 {
406 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
407 
408 	u32 len;
409 
410 	/*  no consider fragement */
411 	len = pattrib->hdrlen + pattrib->iv_len +
412 		SNAP_SIZE + sizeof(u16) +
413 		pattrib->pktlen +
414 		((pattrib->bswenc) ? pattrib->icv_len : 0);
415 
416 	if (pattrib->encrypt == _TKIP_)
417 		len += 8;
418 
419 	return len;
420 }
421 
rtl8188eu_xmitframe_complete(struct adapter * adapt,struct xmit_priv * pxmitpriv)422 s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv)
423 {
424 	struct xmit_frame *pxmitframe = NULL;
425 	struct xmit_frame *pfirstframe = NULL;
426 	struct xmit_buf *pxmitbuf;
427 
428 	/*  aggregate variable */
429 	struct hw_xmit *phwxmit;
430 	struct sta_info *psta = NULL;
431 	struct tx_servq *ptxservq = NULL;
432 
433 	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
434 
435 	u32 pbuf;	/*  next pkt address */
436 	u32 pbuf_tail;	/*  last pkt tail */
437 	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */
438 
439 	u32 bulksize = adapt->HalData->UsbBulkOutSize;
440 	u8 desc_cnt;
441 	u32 bulkptr;
442 
443 	/*  dump frame variable */
444 	u32 ff_hwaddr;
445 
446 	RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
447 
448 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
449 	if (pxmitbuf == NULL)
450 		return false;
451 
452 	/* 3 1. pick up first frame */
453 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
454 
455 	pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
456 	if (pxmitframe == NULL) {
457 		/*  no more xmit frame, release xmit buffer */
458 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
459 		return false;
460 	}
461 
462 	pxmitframe->pxmitbuf = pxmitbuf;
463 	pxmitframe->buf_addr = pxmitbuf->pbuf;
464 	pxmitbuf->priv_data = pxmitframe;
465 
466 	pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
467 	pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */
468 
469 	rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
470 
471 	/*  always return ndis_packet after rtw_xmitframe_coalesce */
472 	rtw_os_xmit_complete(adapt, pxmitframe);
473 
474 	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
475 	pfirstframe = pxmitframe;
476 	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
477 	pbuf_tail = len;
478 	pbuf = round_up(pbuf_tail, 8);
479 
480 	/*  check pkt amount in one bulk */
481 	desc_cnt = 0;
482 	bulkptr = bulksize;
483 	if (pbuf < bulkptr) {
484 		desc_cnt++;
485 	} else {
486 		desc_cnt = 0;
487 		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
488 	}
489 
490 	/*  dequeue same priority packet from station tx queue */
491 	psta = pfirstframe->attrib.psta;
492 	switch (pfirstframe->attrib.priority) {
493 	case 1:
494 	case 2:
495 		ptxservq = &(psta->sta_xmitpriv.bk_q);
496 		phwxmit = pxmitpriv->hwxmits + 3;
497 		break;
498 	case 4:
499 	case 5:
500 		ptxservq = &(psta->sta_xmitpriv.vi_q);
501 		phwxmit = pxmitpriv->hwxmits + 1;
502 		break;
503 	case 6:
504 	case 7:
505 		ptxservq = &(psta->sta_xmitpriv.vo_q);
506 		phwxmit = pxmitpriv->hwxmits;
507 		break;
508 	case 0:
509 	case 3:
510 	default:
511 		ptxservq = &(psta->sta_xmitpriv.be_q);
512 		phwxmit = pxmitpriv->hwxmits + 2;
513 		break;
514 	}
515 	spin_lock_bh(&pxmitpriv->lock);
516 
517 	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
518 	xmitframe_plist = xmitframe_phead->next;
519 
520 	while (xmitframe_phead != xmitframe_plist) {
521 		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
522 		xmitframe_plist = xmitframe_plist->next;
523 
524 		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
525 		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */
526 
527 		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
528 
529 		if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
530 			pxmitframe->agg_num = 1;
531 			pxmitframe->pkt_offset = 1;
532 			break;
533 		}
534 		list_del_init(&pxmitframe->list);
535 		ptxservq->qcnt--;
536 		phwxmit->accnt--;
537 
538 		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
539 
540 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
541 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
542 		rtw_os_xmit_complete(adapt, pxmitframe);
543 
544 		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
545 		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
546 
547 		/*  don't need xmitframe any more */
548 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
549 
550 		/*  handle pointer and stop condition */
551 		pbuf_tail = pbuf + len;
552 		pbuf = round_up(pbuf_tail, 8);
553 
554 		pfirstframe->agg_num++;
555 		if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
556 			break;
557 
558 		if (pbuf < bulkptr) {
559 			desc_cnt++;
560 			if (desc_cnt == adapt->HalData->UsbTxAggDescNum)
561 				break;
562 		} else {
563 			desc_cnt = 0;
564 			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
565 		}
566 	} /* end while (aggregate same priority and same DA(AP or STA) frames) */
567 
568 	if (list_empty(&ptxservq->sta_pending.queue))
569 		list_del_init(&ptxservq->tx_pending);
570 
571 	spin_unlock_bh(&pxmitpriv->lock);
572 	if ((pfirstframe->attrib.ether_type != 0x0806) &&
573 	    (pfirstframe->attrib.ether_type != 0x888e) &&
574 	    (pfirstframe->attrib.ether_type != 0x88b4) &&
575 	    (pfirstframe->attrib.dhcp_pkt != 1))
576 		rtw_issue_addbareq_cmd(adapt, pfirstframe);
577 	/* 3 3. update first frame txdesc */
578 	if ((pbuf_tail % bulksize) == 0) {
579 		/*  remove pkt_offset */
580 		pbuf_tail -= PACKET_OFFSET_SZ;
581 		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
582 		pfirstframe->pkt_offset--;
583 	}
584 
585 	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
586 
587 	/* 3 4. write xmit buffer to USB FIFO */
588 	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
589 	usb_write_port(adapt, ff_hwaddr, pbuf_tail, pxmitbuf);
590 
591 	/* 3 5. update statisitc */
592 	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
593 	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
594 
595 	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
596 
597 	rtw_free_xmitframe(pxmitpriv, pfirstframe);
598 
599 	return true;
600 }
601 
602 /*
603  * Return
604  *	true	dump packet directly
605  *	false	enqueue packet
606  */
rtw_hal_xmit(struct adapter * adapt,struct xmit_frame * pxmitframe)607 s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
608 {
609 	s32 res;
610 	struct xmit_buf *pxmitbuf = NULL;
611 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
612 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
613 	struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
614 
615 	spin_lock_bh(&pxmitpriv->lock);
616 
617 	if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
618 		goto enqueue;
619 
620 	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
621 		goto enqueue;
622 
623 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
624 	if (!pxmitbuf)
625 		goto enqueue;
626 
627 	spin_unlock_bh(&pxmitpriv->lock);
628 
629 	pxmitframe->pxmitbuf = pxmitbuf;
630 	pxmitframe->buf_addr = pxmitbuf->pbuf;
631 	pxmitbuf->priv_data = pxmitframe;
632 
633 	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
634 
635 	if (res == _SUCCESS) {
636 		rtw_dump_xframe(adapt, pxmitframe);
637 	} else {
638 		DBG_88E("==> %s xmitframe_coalesce failed\n", __func__);
639 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
640 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
641 	}
642 
643 	return true;
644 
645 enqueue:
646 	res = rtw_xmitframe_enqueue(adapt, pxmitframe);
647 	spin_unlock_bh(&pxmitpriv->lock);
648 
649 	if (res != _SUCCESS) {
650 		RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
651 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
652 
653 		/*  Trick, make the statistics correct */
654 		pxmitpriv->tx_pkts--;
655 		pxmitpriv->tx_drop++;
656 		return true;
657 	}
658 
659 	return false;
660 }
661 
rtw_hal_mgnt_xmit(struct adapter * adapt,struct xmit_frame * pmgntframe)662 s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
663 {
664 	struct xmit_priv *xmitpriv = &adapt->xmitpriv;
665 
666 	rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
667 	return rtw_dump_xframe(adapt, pmgntframe);
668 }
669