• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17  *
18  *
19  ******************************************************************************/
20 #define _RTL8188E_XMIT_C_
21 #include <osdep_service.h>
22 #include <drv_types.h>
23 #include <mon.h>
24 #include <wifi.h>
25 #include <osdep_intf.h>
26 #include <usb_ops_linux.h>
27 #include <rtl8188e_hal.h>
28 
rtl8188eu_init_xmit_priv(struct adapter * adapt)29 s32	rtl8188eu_init_xmit_priv(struct adapter *adapt)
30 {
31 	struct xmit_priv	*pxmitpriv = &adapt->xmitpriv;
32 
33 	tasklet_init(&pxmitpriv->xmit_tasklet,
34 		     (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
35 		     (unsigned long)adapt);
36 	return _SUCCESS;
37 }
38 
urb_zero_packet_chk(struct adapter * adapt,int sz)39 static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
40 {
41 	u8 set_tx_desc_offset;
42 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
43 	set_tx_desc_offset = (((sz + TXDESC_SIZE) %  haldata->UsbBulkOutSize) == 0) ? 1 : 0;
44 
45 	return set_tx_desc_offset;
46 }
47 
rtl8188eu_cal_txdesc_chksum(struct tx_desc * ptxdesc)48 static void rtl8188eu_cal_txdesc_chksum(struct tx_desc	*ptxdesc)
49 {
50 	u16	*usptr = (u16 *)ptxdesc;
51 	u32 count = 16;		/*  (32 bytes / 2 bytes per XOR) => 16 times */
52 	u32 index;
53 	u16 checksum = 0;
54 
55 	/* Clear first */
56 	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
57 
58 	for (index = 0; index < count; index++)
59 		checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
60 	ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
61 }
62 
63 /*  Description: In normal chip, we should send some packet to Hw which will be used by Fw */
64 /*			in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
65 /*			Fw can tell Hw to send these packet derectly. */
rtl8188e_fill_fake_txdesc(struct adapter * adapt,u8 * desc,u32 BufferLen,u8 ispspoll,u8 is_btqosnull)66 void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8  ispspoll, u8  is_btqosnull)
67 {
68 	struct tx_desc *ptxdesc;
69 
70 	/*  Clear all status */
71 	ptxdesc = (struct tx_desc *)desc;
72 	memset(desc, 0, TXDESC_SIZE);
73 
74 	/* offset 0 */
75 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
76 
77 	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
78 
79 	ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /*  Buffer size + command header */
80 
81 	/* offset 4 */
82 	ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /*  Fixed queue of Mgnt queue */
83 
84 	/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
85 	if (ispspoll) {
86 		ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
87 	} else {
88 		ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /*  Hw set sequence number */
89 		ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
90 	}
91 
92 	if (is_btqosnull)
93 		ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /*  BT NULL */
94 
95 	/* offset 16 */
96 	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
97 
98 	/*  USB interface drop packet if the checksum of descriptor isn't correct. */
99 	/*  Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
100 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
101 }
102 
fill_txdesc_sectype(struct pkt_attrib * pattrib,struct tx_desc * ptxdesc)103 static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
104 {
105 	if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
106 		switch (pattrib->encrypt) {
107 		/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
108 		case _WEP40_:
109 		case _WEP104_:
110 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
111 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
112 			break;
113 		case _TKIP_:
114 		case _TKIP_WTMIC_:
115 			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
116 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
117 			break;
118 		case _AES_:
119 			ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
120 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
121 			break;
122 		case _NO_PRIVACY_:
123 		default:
124 			break;
125 		}
126 	}
127 }
128 
fill_txdesc_vcs(struct pkt_attrib * pattrib,__le32 * pdw)129 static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
130 {
131 	switch (pattrib->vcs_mode) {
132 	case RTS_CTS:
133 		*pdw |= cpu_to_le32(RTS_EN);
134 		break;
135 	case CTS_TO_SELF:
136 		*pdw |= cpu_to_le32(CTS_2_SELF);
137 		break;
138 	case NONE_VCS:
139 	default:
140 		break;
141 	}
142 	if (pattrib->vcs_mode) {
143 		*pdw |= cpu_to_le32(HW_RTS_EN);
144 		/*  Set RTS BW */
145 		if (pattrib->ht_en) {
146 			*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(27)) : 0;
147 
148 			if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
149 				*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
150 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
151 				*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
152 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
153 				*pdw |= 0;
154 			else
155 				*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
156 		}
157 	}
158 }
159 
fill_txdesc_phy(struct pkt_attrib * pattrib,__le32 * pdw)160 static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
161 {
162 	if (pattrib->ht_en) {
163 		*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(25)) : 0;
164 
165 		if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
166 			*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
167 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
168 			*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
169 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
170 			*pdw |= 0;
171 		else
172 			*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
173 	}
174 }
175 
update_txdesc(struct xmit_frame * pxmitframe,u8 * pmem,s32 sz,u8 bagg_pkt)176 static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
177 {
178 	int	pull = 0;
179 	uint	qsel;
180 	u8 data_rate, pwr_status, offset;
181 	struct adapter		*adapt = pxmitframe->padapter;
182 	struct pkt_attrib	*pattrib = &pxmitframe->attrib;
183 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
184 	struct tx_desc	*ptxdesc = (struct tx_desc *)pmem;
185 	struct mlme_ext_priv	*pmlmeext = &adapt->mlmeextpriv;
186 	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
187 	int	bmcst = IS_MCAST(pattrib->ra);
188 
189 	if (adapt->registrypriv.mp_mode == 0) {
190 		if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
191 			ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
192 			pull = 1;
193 		}
194 	}
195 
196 	memset(ptxdesc, 0, sizeof(struct tx_desc));
197 
198 	/* 4 offset 0 */
199 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
200 	ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
201 
202 	offset = TXDESC_SIZE + OFFSET_SZ;
203 
204 	ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
205 
206 	if (bmcst)
207 		ptxdesc->txdw0 |= cpu_to_le32(BMC);
208 
209 	if (adapt->registrypriv.mp_mode == 0) {
210 		if (!bagg_pkt) {
211 			if ((pull) && (pxmitframe->pkt_offset > 0))
212 				pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
213 		}
214 	}
215 
216 	/*  pkt_offset, unit:8 bytes padding */
217 	if (pxmitframe->pkt_offset > 0)
218 		ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
219 
220 	/* driver uses rate */
221 	ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
222 
223 	if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
224 		/* offset 4 */
225 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
226 
227 		qsel = (uint)(pattrib->qsel & 0x0000001f);
228 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
229 
230 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
231 
232 		fill_txdesc_sectype(pattrib, ptxdesc);
233 
234 		if (pattrib->ampdu_en) {
235 			ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
236 			ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
237 		} else {
238 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
239 		}
240 
241 		/* offset 8 */
242 
243 		/* offset 12 */
244 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
245 
246 		/* offset 16 , offset 20 */
247 		if (pattrib->qos_en)
248 			ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
249 
250 		/* offset 20 */
251 		if (pxmitframe->agg_num > 1)
252 			ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
253 
254 		if ((pattrib->ether_type != 0x888e) &&
255 		    (pattrib->ether_type != 0x0806) &&
256 		    (pattrib->ether_type != 0x88b4) &&
257 		    (pattrib->dhcp_pkt != 1)) {
258 			/* Non EAP & ARP & DHCP type data packet */
259 
260 			fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
261 			fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
262 
263 			ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
264 			ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS  Rate FB LMT */
265 
266 			if (pattrib->ht_en) {
267 				if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
268 					ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
269 			}
270 			data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
271 			ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
272 			pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
273 			ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
274 		} else {
275 			/*  EAP data packet and ARP packet and DHCP. */
276 			/*  Use the 1M data rate to send the EAP/ARP packet. */
277 			/*  This will maybe make the handshake smooth. */
278 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
279 			if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
280 				ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/*  DATA_SHORT */
281 			ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
282 		}
283 	} else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
284 		/* offset 4 */
285 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
286 
287 		qsel = (uint)(pattrib->qsel&0x0000001f);
288 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
289 
290 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
291 
292 		/* offset 8 */
293 		/* CCX-TXRPT ack for xmit mgmt frames. */
294 		if (pxmitframe->ack_report)
295 			ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
296 
297 		/* offset 12 */
298 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
299 
300 		/* offset 20 */
301 		ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
302 		if (pattrib->retry_ctrl)
303 			ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
304 		else
305 			ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
306 
307 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
308 	} else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
309 		DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
310 	} else {
311 		DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
312 
313 		/* offset 4 */
314 		ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
315 
316 		ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
317 
318 		/* offset 8 */
319 
320 		/* offset 12 */
321 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
322 
323 		/* offset 20 */
324 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
325 	}
326 
327 	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
328 	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
329 	/*  mgnt frame should be controlled by Hw because Fw will also send null data */
330 	/*  which we cannot control when Fw LPS enable. */
331 	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
332 	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
333 	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
334 	/*  2010.06.23. Added by tynli. */
335 	if (!pattrib->qos_en) {
336 		ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /*  Hw set sequence number */
337 		ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);	/*  Hw set sequence number */
338 	}
339 
340 	rtl88eu_dm_set_tx_ant_by_tx_info(&haldata->odmpriv, pmem,
341 					 pattrib->mac_id);
342 
343 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
344 	_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
345 	return pull;
346 }
347 
348 /* for non-agg data frame or  management frame */
rtw_dump_xframe(struct adapter * adapt,struct xmit_frame * pxmitframe)349 static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
350 {
351 	s32 ret = _SUCCESS;
352 	s32 inner_ret = _SUCCESS;
353 	int t, sz, w_sz, pull = 0;
354 	u8 *mem_addr;
355 	u32 ff_hwaddr;
356 	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
357 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
358 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
359 	struct security_priv *psecuritypriv = &adapt->securitypriv;
360 	if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
361 	    (pxmitframe->attrib.ether_type != 0x0806) &&
362 	    (pxmitframe->attrib.ether_type != 0x888e) &&
363 	    (pxmitframe->attrib.ether_type != 0x88b4) &&
364 	    (pxmitframe->attrib.dhcp_pkt != 1))
365 		rtw_issue_addbareq_cmd(adapt, pxmitframe);
366 	mem_addr = pxmitframe->buf_addr;
367 
368 	RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
369 
370 	for (t = 0; t < pattrib->nr_frags; t++) {
371 		if (inner_ret != _SUCCESS && ret == _SUCCESS)
372 			ret = _FAIL;
373 
374 		if (t != (pattrib->nr_frags - 1)) {
375 			RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
376 
377 			sz = pxmitpriv->frag_len;
378 			sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
379 		} else {
380 			/* no frag */
381 			sz = pattrib->last_txcmdsz;
382 		}
383 
384 		pull = update_txdesc(pxmitframe, mem_addr, sz, false);
385 
386 		if (pull) {
387 			mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
388 			pxmitframe->buf_addr = mem_addr;
389 			w_sz = sz + TXDESC_SIZE;
390 		} else {
391 			w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
392 		}
393 		ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
394 
395 		inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
396 
397 		rtw_count_tx_stats(adapt, pxmitframe, sz);
398 
399 		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
400 
401 		mem_addr += w_sz;
402 
403 		mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
404 	}
405 
406 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
407 
408 	if  (ret != _SUCCESS)
409 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
410 
411 	return ret;
412 }
413 
xmitframe_need_length(struct xmit_frame * pxmitframe)414 static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
415 {
416 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
417 
418 	u32 len = 0;
419 
420 	/*  no consider fragement */
421 	len = pattrib->hdrlen + pattrib->iv_len +
422 		SNAP_SIZE + sizeof(u16) +
423 		pattrib->pktlen +
424 		((pattrib->bswenc) ? pattrib->icv_len : 0);
425 
426 	if (pattrib->encrypt == _TKIP_)
427 		len += 8;
428 
429 	return len;
430 }
431 
rtl8188eu_xmitframe_complete(struct adapter * adapt,struct xmit_priv * pxmitpriv,struct xmit_buf * pxmitbuf)432 s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
433 {
434 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
435 	struct xmit_frame *pxmitframe = NULL;
436 	struct xmit_frame *pfirstframe = NULL;
437 
438 	/*  aggregate variable */
439 	struct hw_xmit *phwxmit;
440 	struct sta_info *psta = NULL;
441 	struct tx_servq *ptxservq = NULL;
442 
443 	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
444 
445 	u32 pbuf;	/*  next pkt address */
446 	u32 pbuf_tail;	/*  last pkt tail */
447 	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */
448 
449 	u32 bulksize = haldata->UsbBulkOutSize;
450 	u8 desc_cnt;
451 	u32 bulkptr;
452 
453 	/*  dump frame variable */
454 	u32 ff_hwaddr;
455 
456 	RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
457 
458 	/*  check xmitbuffer is ok */
459 	if (pxmitbuf == NULL) {
460 		pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
461 		if (pxmitbuf == NULL)
462 			return false;
463 	}
464 
465 	/* 3 1. pick up first frame */
466 	do {
467 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
468 
469 		pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
470 		if (pxmitframe == NULL) {
471 			/*  no more xmit frame, release xmit buffer */
472 			rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
473 			return false;
474 		}
475 
476 		pxmitframe->pxmitbuf = pxmitbuf;
477 		pxmitframe->buf_addr = pxmitbuf->pbuf;
478 		pxmitbuf->priv_data = pxmitframe;
479 
480 		pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
481 		pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */
482 
483 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
484 
485 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
486 		rtw_os_xmit_complete(adapt, pxmitframe);
487 
488 		break;
489 	} while (1);
490 
491 	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
492 	pfirstframe = pxmitframe;
493 	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
494 	pbuf_tail = len;
495 	pbuf = round_up(pbuf_tail, 8);
496 
497 	/*  check pkt amount in one bulk */
498 	desc_cnt = 0;
499 	bulkptr = bulksize;
500 	if (pbuf < bulkptr) {
501 		desc_cnt++;
502 	} else {
503 		desc_cnt = 0;
504 		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
505 	}
506 
507 	/*  dequeue same priority packet from station tx queue */
508 	psta = pfirstframe->attrib.psta;
509 	switch (pfirstframe->attrib.priority) {
510 	case 1:
511 	case 2:
512 		ptxservq = &(psta->sta_xmitpriv.bk_q);
513 		phwxmit = pxmitpriv->hwxmits + 3;
514 		break;
515 	case 4:
516 	case 5:
517 		ptxservq = &(psta->sta_xmitpriv.vi_q);
518 		phwxmit = pxmitpriv->hwxmits + 1;
519 		break;
520 	case 6:
521 	case 7:
522 		ptxservq = &(psta->sta_xmitpriv.vo_q);
523 		phwxmit = pxmitpriv->hwxmits;
524 		break;
525 	case 0:
526 	case 3:
527 	default:
528 		ptxservq = &(psta->sta_xmitpriv.be_q);
529 		phwxmit = pxmitpriv->hwxmits + 2;
530 		break;
531 	}
532 	spin_lock_bh(&pxmitpriv->lock);
533 
534 	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
535 	xmitframe_plist = xmitframe_phead->next;
536 
537 	while (xmitframe_phead != xmitframe_plist) {
538 		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
539 		xmitframe_plist = xmitframe_plist->next;
540 
541 		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
542 		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */
543 
544 		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
545 
546 		if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
547 			pxmitframe->agg_num = 1;
548 			pxmitframe->pkt_offset = 1;
549 			break;
550 		}
551 		list_del_init(&pxmitframe->list);
552 		ptxservq->qcnt--;
553 		phwxmit->accnt--;
554 
555 		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
556 
557 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
558 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
559 		rtw_os_xmit_complete(adapt, pxmitframe);
560 
561 		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
562 		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
563 
564 		/*  don't need xmitframe any more */
565 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
566 
567 		/*  handle pointer and stop condition */
568 		pbuf_tail = pbuf + len;
569 		pbuf = round_up(pbuf_tail, 8);
570 
571 		pfirstframe->agg_num++;
572 		if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
573 			break;
574 
575 		if (pbuf < bulkptr) {
576 			desc_cnt++;
577 			if (desc_cnt == haldata->UsbTxAggDescNum)
578 				break;
579 		} else {
580 			desc_cnt = 0;
581 			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
582 		}
583 	} /* end while (aggregate same priority and same DA(AP or STA) frames) */
584 
585 	if (list_empty(&ptxservq->sta_pending.queue))
586 		list_del_init(&ptxservq->tx_pending);
587 
588 	spin_unlock_bh(&pxmitpriv->lock);
589 	if ((pfirstframe->attrib.ether_type != 0x0806) &&
590 	    (pfirstframe->attrib.ether_type != 0x888e) &&
591 	    (pfirstframe->attrib.ether_type != 0x88b4) &&
592 	    (pfirstframe->attrib.dhcp_pkt != 1))
593 		rtw_issue_addbareq_cmd(adapt, pfirstframe);
594 	/* 3 3. update first frame txdesc */
595 	if ((pbuf_tail % bulksize) == 0) {
596 		/*  remove pkt_offset */
597 		pbuf_tail -= PACKET_OFFSET_SZ;
598 		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
599 		pfirstframe->pkt_offset--;
600 	}
601 
602 	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
603 
604 	/* 3 4. write xmit buffer to USB FIFO */
605 	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
606 	usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
607 
608 	/* 3 5. update statisitc */
609 	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
610 	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
611 
612 	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
613 
614 	rtw_free_xmitframe(pxmitpriv, pfirstframe);
615 
616 	return true;
617 }
618 
xmitframe_direct(struct adapter * adapt,struct xmit_frame * pxmitframe)619 static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
620 {
621 	s32 res = _SUCCESS;
622 
623 	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
624 	if (res == _SUCCESS)
625 		rtw_dump_xframe(adapt, pxmitframe);
626 	else
627 		DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
628 	return res;
629 }
630 
631 /*
632  * Return
633  *	true	dump packet directly
634  *	false	enqueue packet
635  */
pre_xmitframe(struct adapter * adapt,struct xmit_frame * pxmitframe)636 static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
637 {
638 	s32 res;
639 	struct xmit_buf *pxmitbuf = NULL;
640 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
641 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
642 	struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
643 
644 	spin_lock_bh(&pxmitpriv->lock);
645 
646 	if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
647 		goto enqueue;
648 
649 	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
650 		goto enqueue;
651 
652 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
653 	if (!pxmitbuf)
654 		goto enqueue;
655 
656 	spin_unlock_bh(&pxmitpriv->lock);
657 
658 	pxmitframe->pxmitbuf = pxmitbuf;
659 	pxmitframe->buf_addr = pxmitbuf->pbuf;
660 	pxmitbuf->priv_data = pxmitframe;
661 
662 	if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
663 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
664 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
665 	}
666 
667 	return true;
668 
669 enqueue:
670 	res = rtw_xmitframe_enqueue(adapt, pxmitframe);
671 	spin_unlock_bh(&pxmitpriv->lock);
672 
673 	if (res != _SUCCESS) {
674 		RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
675 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
676 
677 		/*  Trick, make the statistics correct */
678 		pxmitpriv->tx_pkts--;
679 		pxmitpriv->tx_drop++;
680 		return true;
681 	}
682 
683 	return false;
684 }
685 
rtl8188eu_mgnt_xmit(struct adapter * adapt,struct xmit_frame * pmgntframe)686 s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
687 {
688 	struct xmit_priv *xmitpriv = &adapt->xmitpriv;
689 
690 	rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
691 	return rtw_dump_xframe(adapt, pmgntframe);
692 }
693 
694 /*
695  * Return
696  *	true	dump packet directly ok
697  *	false	temporary can't transmit packets to hardware
698  */
rtl8188eu_hal_xmit(struct adapter * adapt,struct xmit_frame * pxmitframe)699 s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
700 {
701 	return pre_xmitframe(adapt, pxmitframe);
702 }
703