• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2007 - 2011 Realtek Corporation. */
3 
4 #define _RTL8188E_XMIT_C_
5 #include "../include/osdep_service.h"
6 #include "../include/drv_types.h"
7 #include "../include/wifi.h"
8 #include "../include/osdep_intf.h"
9 #include "../include/usb_ops.h"
10 #include "../include/rtl8188e_hal.h"
11 
rtl8188eu_init_xmit_priv(struct adapter * adapt)12 s32	rtl8188eu_init_xmit_priv(struct adapter *adapt)
13 {
14 	struct xmit_priv	*pxmitpriv = &adapt->xmitpriv;
15 
16 	tasklet_init(&pxmitpriv->xmit_tasklet,
17 		     rtl8188eu_xmit_tasklet,
18 		     (unsigned long)adapt);
19 	return _SUCCESS;
20 }
21 
urb_zero_packet_chk(struct adapter * adapt,int sz)22 static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
23 {
24 	u8 set_tx_desc_offset;
25 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
26 	set_tx_desc_offset = (((sz + TXDESC_SIZE) %  haldata->UsbBulkOutSize) == 0) ? 1 : 0;
27 
28 	return set_tx_desc_offset;
29 }
30 
rtl8188eu_cal_txdesc_chksum(struct tx_desc * ptxdesc)31 static void rtl8188eu_cal_txdesc_chksum(struct tx_desc	*ptxdesc)
32 {
33 	u16	*usptr = (u16 *)ptxdesc;
34 	u32 count = 16;		/*  (32 bytes / 2 bytes per XOR) => 16 times */
35 	u32 index;
36 	u16 checksum = 0;
37 
38 	/* Clear first */
39 	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
40 
41 	for (index = 0; index < count; index++)
42 		checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
43 	ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
44 }
45 
46 /*  Description: In normal chip, we should send some packet to Hw which will be used by Fw */
47 /*			in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
48 /*			Fw can tell Hw to send these packet derectly. */
rtl8188e_fill_fake_txdesc(struct adapter * adapt,u8 * desc,u32 BufferLen,u8 ispspoll,u8 is_btqosnull)49 void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8  ispspoll, u8  is_btqosnull)
50 {
51 	struct tx_desc *ptxdesc;
52 
53 	/*  Clear all status */
54 	ptxdesc = (struct tx_desc *)desc;
55 	memset(desc, 0, TXDESC_SIZE);
56 
57 	/* offset 0 */
58 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
59 
60 	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00ff0000); /* 32 bytes for TX Desc */
61 
62 	ptxdesc->txdw0 |= cpu_to_le32(BufferLen & 0x0000ffff); /*  Buffer size + command header */
63 
64 	/* offset 4 */
65 	ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT << QSEL_SHT) & 0x00001f00); /*  Fixed queue of Mgnt queue */
66 
67 	/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
68 	if (ispspoll) {
69 		ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
70 	} else {
71 		ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /*  Hw set sequence number */
72 		ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
73 	}
74 
75 	if (is_btqosnull)
76 		ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /*  BT NULL */
77 
78 	/* offset 16 */
79 	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
80 
81 	/*  USB interface drop packet if the checksum of descriptor isn't correct. */
82 	/*  Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
83 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
84 }
85 
fill_txdesc_sectype(struct pkt_attrib * pattrib,struct tx_desc * ptxdesc)86 static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
87 {
88 	if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
89 		switch (pattrib->encrypt) {
90 		/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
91 		case _WEP40_:
92 		case _WEP104_:
93 			ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
94 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
95 			break;
96 		case _TKIP_:
97 		case _TKIP_WTMIC_:
98 			ptxdesc->txdw1 |= cpu_to_le32((0x01 << SEC_TYPE_SHT) & 0x00c00000);
99 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
100 			break;
101 		case _AES_:
102 			ptxdesc->txdw1 |= cpu_to_le32((0x03 << SEC_TYPE_SHT) & 0x00c00000);
103 			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
104 			break;
105 		case _NO_PRIVACY_:
106 		default:
107 			break;
108 		}
109 	}
110 }
111 
fill_txdesc_vcs(struct pkt_attrib * pattrib,__le32 * pdw)112 static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
113 {
114 	switch (pattrib->vcs_mode) {
115 	case RTS_CTS:
116 		*pdw |= cpu_to_le32(RTS_EN);
117 		break;
118 	case CTS_TO_SELF:
119 		*pdw |= cpu_to_le32(CTS_2_SELF);
120 		break;
121 	case NONE_VCS:
122 	default:
123 		break;
124 	}
125 	if (pattrib->vcs_mode) {
126 		*pdw |= cpu_to_le32(HW_RTS_EN);
127 		/*  Set RTS BW */
128 		if (pattrib->ht_en) {
129 			*pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
130 
131 			if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
132 				*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
133 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
134 				*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
135 			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
136 				*pdw |= 0;
137 			else
138 				*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
139 		}
140 	}
141 }
142 
fill_txdesc_phy(struct pkt_attrib * pattrib,__le32 * pdw)143 static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
144 {
145 	if (pattrib->ht_en) {
146 		*pdw |= (pattrib->bwmode & HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
147 
148 		if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
149 			*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
150 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
151 			*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
152 		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
153 			*pdw |= 0;
154 		else
155 			*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
156 	}
157 }
158 
update_txdesc(struct xmit_frame * pxmitframe,u8 * pmem,s32 sz,u8 bagg_pkt)159 static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
160 {
161 	int	pull = 0;
162 	uint	qsel;
163 	u8 data_rate, pwr_status, offset;
164 	struct adapter		*adapt = pxmitframe->padapter;
165 	struct pkt_attrib	*pattrib = &pxmitframe->attrib;
166 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
167 	struct tx_desc	*ptxdesc = (struct tx_desc *)pmem;
168 	struct mlme_ext_priv	*pmlmeext = &adapt->mlmeextpriv;
169 	struct mlme_ext_info	*pmlmeinfo = &pmlmeext->mlmext_info;
170 
171 	if (adapt->registrypriv.mp_mode == 0) {
172 		if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
173 			ptxdesc = (struct tx_desc *)(pmem + PACKET_OFFSET_SZ);
174 			pull = 1;
175 		}
176 	}
177 
178 	memset(ptxdesc, 0, sizeof(struct tx_desc));
179 
180 	/* 4 offset 0 */
181 	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
182 	ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
183 
184 	offset = TXDESC_SIZE + OFFSET_SZ;
185 
186 	ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
187 
188 	if (is_multicast_ether_addr(pattrib->ra))
189 		ptxdesc->txdw0 |= cpu_to_le32(BMC);
190 
191 	if (adapt->registrypriv.mp_mode == 0) {
192 		if (!bagg_pkt) {
193 			if ((pull) && (pxmitframe->pkt_offset > 0))
194 				pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
195 		}
196 	}
197 
198 	/*  pkt_offset, unit:8 bytes padding */
199 	if (pxmitframe->pkt_offset > 0)
200 		ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
201 
202 	/* driver uses rate */
203 	ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
204 
205 	if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
206 		/* offset 4 */
207 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
208 
209 		qsel = (uint)(pattrib->qsel & 0x0000001f);
210 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
211 
212 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
213 
214 		fill_txdesc_sectype(pattrib, ptxdesc);
215 
216 		if (pattrib->ampdu_en) {
217 			ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
218 			ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
219 		} else {
220 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
221 		}
222 
223 		/* offset 8 */
224 
225 		/* offset 12 */
226 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
227 
228 		/* offset 16 , offset 20 */
229 		if (pattrib->qos_en)
230 			ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
231 
232 		/* offset 20 */
233 		if (pxmitframe->agg_num > 1)
234 			ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
235 
236 		if ((pattrib->ether_type != 0x888e) &&
237 		    (pattrib->ether_type != 0x0806) &&
238 		    (pattrib->ether_type != 0x88b4) &&
239 		    (pattrib->dhcp_pkt != 1)) {
240 			/* Non EAP & ARP & DHCP type data packet */
241 
242 			fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
243 			fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
244 
245 			ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
246 			ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS  Rate FB LMT */
247 
248 			if (pattrib->ht_en) {
249 				if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
250 					ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
251 			}
252 			data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
253 			ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
254 			pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
255 			ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
256 		} else {
257 			/*  EAP data packet and ARP packet and DHCP. */
258 			/*  Use the 1M data rate to send the EAP/ARP packet. */
259 			/*  This will maybe make the handshake smooth. */
260 			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
261 			if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
262 				ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/*  DATA_SHORT */
263 			ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
264 		}
265 	} else if ((pxmitframe->frame_tag & 0x0f) == MGNT_FRAMETAG) {
266 		/* offset 4 */
267 		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
268 
269 		qsel = (uint)(pattrib->qsel & 0x0000001f);
270 		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
271 
272 		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
273 
274 		/* offset 8 */
275 		/* CCX-TXRPT ack for xmit mgmt frames. */
276 		if (pxmitframe->ack_report)
277 			ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
278 
279 		/* offset 12 */
280 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
281 
282 		/* offset 20 */
283 		ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
284 		if (pattrib->retry_ctrl)
285 			ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
286 		else
287 			ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
288 
289 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
290 	} else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG) {
291 		DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
292 	} else if (((pxmitframe->frame_tag & 0x0f) == MP_FRAMETAG) &&
293 		   (adapt->registrypriv.mp_mode == 1)) {
294 		fill_txdesc_for_mp(adapt, ptxdesc);
295 	} else {
296 		DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
297 
298 		/* offset 4 */
299 		ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
300 
301 		ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
302 
303 		/* offset 8 */
304 
305 		/* offset 12 */
306 		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0fff0000);
307 
308 		/* offset 20 */
309 		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
310 	}
311 
312 	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
313 	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
314 	/*  mgnt frame should be controlled by Hw because Fw will also send null data */
315 	/*  which we cannot control when Fw LPS enable. */
316 	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
317 	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
318 	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
319 	/*  2010.06.23. Added by tynli. */
320 	if (!pattrib->qos_en) {
321 		ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /*  Hw set sequence number */
322 		ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);	/*  Hw set sequence number */
323 	}
324 
325 	ODM_SetTxAntByTxInfo_88E(&haldata->odmpriv, pmem, pattrib->mac_id);
326 
327 	rtl8188eu_cal_txdesc_chksum(ptxdesc);
328 	return pull;
329 }
330 
331 /* for non-agg data frame or  management frame */
rtw_dump_xframe(struct adapter * adapt,struct xmit_frame * pxmitframe)332 static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
333 {
334 	s32 ret = _SUCCESS;
335 	s32 inner_ret = _SUCCESS;
336 	int t, sz, w_sz, pull = 0;
337 	u8 *mem_addr;
338 	u32 ff_hwaddr;
339 	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
340 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
341 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
342 	struct security_priv *psecuritypriv = &adapt->securitypriv;
343 	if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
344 	    (pxmitframe->attrib.ether_type != 0x0806) &&
345 	    (pxmitframe->attrib.ether_type != 0x888e) &&
346 	    (pxmitframe->attrib.ether_type != 0x88b4) &&
347 	    (pxmitframe->attrib.dhcp_pkt != 1))
348 		rtw_issue_addbareq_cmd(adapt, pxmitframe);
349 	mem_addr = pxmitframe->buf_addr;
350 
351 	for (t = 0; t < pattrib->nr_frags; t++) {
352 		if (inner_ret != _SUCCESS && ret == _SUCCESS)
353 			ret = _FAIL;
354 
355 		if (t != (pattrib->nr_frags - 1)) {
356 			sz = pxmitpriv->frag_len;
357 			sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
358 		} else {
359 			/* no frag */
360 			sz = pattrib->last_txcmdsz;
361 		}
362 
363 		pull = update_txdesc(pxmitframe, mem_addr, sz, false);
364 
365 		if (pull) {
366 			mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
367 			pxmitframe->buf_addr = mem_addr;
368 			w_sz = sz + TXDESC_SIZE;
369 		} else {
370 			w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
371 		}
372 		ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
373 
374 		inner_ret = rtw_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
375 
376 		rtw_count_tx_stats(adapt, pxmitframe, sz);
377 
378 		mem_addr += w_sz;
379 
380 		mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
381 	}
382 
383 	rtw_free_xmitframe(pxmitpriv, pxmitframe);
384 
385 	if  (ret != _SUCCESS)
386 		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
387 
388 	return ret;
389 }
390 
xmitframe_need_length(struct xmit_frame * pxmitframe)391 static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
392 {
393 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
394 
395 	u32 len = 0;
396 
397 	/*  no consider fragement */
398 	len = pattrib->hdrlen + pattrib->iv_len +
399 		SNAP_SIZE + sizeof(u16) +
400 		pattrib->pktlen +
401 		((pattrib->bswenc) ? pattrib->icv_len : 0);
402 
403 	if (pattrib->encrypt == _TKIP_)
404 		len += 8;
405 
406 	return len;
407 }
408 
rtl8188eu_xmitframe_complete(struct adapter * adapt,struct xmit_priv * pxmitpriv,struct xmit_buf * pxmitbuf)409 s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
410 {
411 	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
412 	struct xmit_frame *pxmitframe = NULL;
413 	struct xmit_frame *pfirstframe = NULL;
414 
415 	/*  aggregate variable */
416 	struct hw_xmit *phwxmit;
417 	struct sta_info *psta = NULL;
418 	struct tx_servq *ptxservq = NULL;
419 	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
420 
421 	u32 pbuf;	/*  next pkt address */
422 	u32 pbuf_tail;	/*  last pkt tail */
423 	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */
424 
425 	u32 bulksize = haldata->UsbBulkOutSize;
426 	u8 desc_cnt;
427 	u32 bulkptr;
428 
429 	/*  dump frame variable */
430 	u32 ff_hwaddr;
431 
432 	/*  check xmitbuffer is ok */
433 	if (!pxmitbuf) {
434 		pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
435 		if (!pxmitbuf)
436 			return false;
437 	}
438 
439 	/* 3 1. pick up first frame */
440 	do {
441 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
442 
443 		pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
444 		if (!pxmitframe) {
445 			/*  no more xmit frame, release xmit buffer */
446 			rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
447 			return false;
448 		}
449 
450 		pxmitframe->pxmitbuf = pxmitbuf;
451 		pxmitframe->buf_addr = pxmitbuf->pbuf;
452 		pxmitbuf->priv_data = pxmitframe;
453 
454 		pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
455 		pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */
456 
457 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
458 
459 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
460 		rtw_os_xmit_complete(adapt, pxmitframe);
461 
462 		break;
463 	} while (1);
464 
465 	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
466 	pfirstframe = pxmitframe;
467 	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
468 	pbuf_tail = len;
469 	pbuf = _RND8(pbuf_tail);
470 
471 	/*  check pkt amount in one bulk */
472 	desc_cnt = 0;
473 	bulkptr = bulksize;
474 	if (pbuf < bulkptr) {
475 		desc_cnt++;
476 	} else {
477 		desc_cnt = 0;
478 		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
479 	}
480 
481 	/*  dequeue same priority packet from station tx queue */
482 	psta = pfirstframe->attrib.psta;
483 	switch (pfirstframe->attrib.priority) {
484 	case 1:
485 	case 2:
486 		ptxservq = &psta->sta_xmitpriv.bk_q;
487 		phwxmit = pxmitpriv->hwxmits + 3;
488 		break;
489 	case 4:
490 	case 5:
491 		ptxservq = &psta->sta_xmitpriv.vi_q;
492 		phwxmit = pxmitpriv->hwxmits + 1;
493 		break;
494 	case 6:
495 	case 7:
496 		ptxservq = &psta->sta_xmitpriv.vo_q;
497 		phwxmit = pxmitpriv->hwxmits;
498 		break;
499 	case 0:
500 	case 3:
501 	default:
502 		ptxservq = &psta->sta_xmitpriv.be_q;
503 		phwxmit = pxmitpriv->hwxmits + 2;
504 		break;
505 	}
506 	spin_lock_bh(&pxmitpriv->lock);
507 
508 	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
509 	xmitframe_plist = xmitframe_phead->next;
510 
511 	while (xmitframe_phead != xmitframe_plist) {
512 		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
513 		xmitframe_plist = xmitframe_plist->next;
514 
515 		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
516 		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */
517 
518 		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
519 
520 		if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
521 			pxmitframe->agg_num = 1;
522 			pxmitframe->pkt_offset = 1;
523 			break;
524 		}
525 		list_del_init(&pxmitframe->list);
526 		ptxservq->qcnt--;
527 		phwxmit->accnt--;
528 
529 		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
530 
531 		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
532 		/*  always return ndis_packet after rtw_xmitframe_coalesce */
533 		rtw_os_xmit_complete(adapt, pxmitframe);
534 
535 		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
536 		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
537 
538 		/*  don't need xmitframe any more */
539 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
540 
541 		/*  handle pointer and stop condition */
542 		pbuf_tail = pbuf + len;
543 		pbuf = _RND8(pbuf_tail);
544 
545 		pfirstframe->agg_num++;
546 		if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
547 			break;
548 
549 		if (pbuf < bulkptr) {
550 			desc_cnt++;
551 			if (desc_cnt == haldata->UsbTxAggDescNum)
552 				break;
553 		} else {
554 			desc_cnt = 0;
555 			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
556 		}
557 	} /* end while (aggregate same priority and same DA(AP or STA) frames) */
558 
559 	if (list_empty(&ptxservq->sta_pending.queue))
560 		list_del_init(&ptxservq->tx_pending);
561 
562 	spin_unlock_bh(&pxmitpriv->lock);
563 	if ((pfirstframe->attrib.ether_type != 0x0806) &&
564 	    (pfirstframe->attrib.ether_type != 0x888e) &&
565 	    (pfirstframe->attrib.ether_type != 0x88b4) &&
566 	    (pfirstframe->attrib.dhcp_pkt != 1))
567 		rtw_issue_addbareq_cmd(adapt, pfirstframe);
568 	/* 3 3. update first frame txdesc */
569 	if ((pbuf_tail % bulksize) == 0) {
570 		/*  remove pkt_offset */
571 		pbuf_tail -= PACKET_OFFSET_SZ;
572 		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
573 		pfirstframe->pkt_offset--;
574 	}
575 
576 	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
577 
578 	/* 3 4. write xmit buffer to USB FIFO */
579 	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
580 	rtw_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
581 
582 	/* 3 5. update statisitc */
583 	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
584 	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
585 
586 	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
587 
588 	rtw_free_xmitframe(pxmitpriv, pfirstframe);
589 
590 	return true;
591 }
592 
xmitframe_direct(struct adapter * adapt,struct xmit_frame * pxmitframe)593 static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
594 {
595 	s32 res = _SUCCESS;
596 
597 	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
598 	if (res == _SUCCESS)
599 		rtw_dump_xframe(adapt, pxmitframe);
600 	else
601 		DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
602 	return res;
603 }
604 
605 /*
606  * Return
607  *	true	dump packet directly
608  *	false	enqueue packet
609  */
pre_xmitframe(struct adapter * adapt,struct xmit_frame * pxmitframe)610 static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
611 {
612 	s32 res;
613 	struct xmit_buf *pxmitbuf = NULL;
614 	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
615 	struct pkt_attrib *pattrib = &pxmitframe->attrib;
616 	struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
617 
618 	spin_lock_bh(&pxmitpriv->lock);
619 
620 	if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
621 		goto enqueue;
622 
623 	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING))
624 		goto enqueue;
625 
626 	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
627 	if (!pxmitbuf)
628 		goto enqueue;
629 
630 	spin_unlock_bh(&pxmitpriv->lock);
631 
632 	pxmitframe->pxmitbuf = pxmitbuf;
633 	pxmitframe->buf_addr = pxmitbuf->pbuf;
634 	pxmitbuf->priv_data = pxmitframe;
635 
636 	if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
637 		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
638 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
639 	}
640 
641 	return true;
642 
643 enqueue:
644 	res = rtw_xmitframe_enqueue(adapt, pxmitframe);
645 	spin_unlock_bh(&pxmitpriv->lock);
646 
647 	if (res != _SUCCESS) {
648 		rtw_free_xmitframe(pxmitpriv, pxmitframe);
649 
650 		/*  Trick, make the statistics correct */
651 		pxmitpriv->tx_pkts--;
652 		pxmitpriv->tx_drop++;
653 		return true;
654 	}
655 
656 	return false;
657 }
658 
rtl8188eu_mgnt_xmit(struct adapter * adapt,struct xmit_frame * pmgntframe)659 s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
660 {
661 	return rtw_dump_xframe(adapt, pmgntframe);
662 }
663 
664 /*
665  * Return
666  *	true	dump packet directly ok
667  *	false	temporary can't transmit packets to hardware
668  */
rtl8188eu_hal_xmit(struct adapter * adapt,struct xmit_frame * pxmitframe)669 s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
670 {
671 	return pre_xmitframe(adapt, pxmitframe);
672 }
673