• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
3  *
4  * Copyright (C) 2011-2014, Marvell International Ltd.
5  *
6  * This software file (the "File") is distributed by Marvell International
7  * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8  * (the "License").  You may use, redistribute and/or modify this File in
9  * accordance with the terms and conditions of the License, a copy of which
10  * is available by writing to the Free Software Foundation, Inc.,
11  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12  * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13  *
14  * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16  * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
17  * this warranty disclaimer.
18  */
19 
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27 #include "11n_rxreorder.h"
28 
29 /* This function will dispatch amsdu packet and forward it to kernel/upper
30  * layer.
31  */
mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private * priv,struct sk_buff * skb)32 static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
33 					  struct sk_buff *skb)
34 {
35 	struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
36 	int ret;
37 
38 	if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
39 		struct sk_buff_head list;
40 		struct sk_buff *rx_skb;
41 
42 		__skb_queue_head_init(&list);
43 
44 		skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
45 		skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
46 
47 		ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
48 					 priv->wdev->iftype, 0, false);
49 
50 		while (!skb_queue_empty(&list)) {
51 			rx_skb = __skb_dequeue(&list);
52 			ret = mwifiex_recv_packet(priv, rx_skb);
53 			if (ret == -1)
54 				dev_err(priv->adapter->dev,
55 					"Rx of A-MSDU failed");
56 		}
57 		return 0;
58 	}
59 
60 	return -1;
61 }
62 
63 /* This function will process the rx packet and forward it to kernel/upper
64  * layer.
65  */
mwifiex_11n_dispatch_pkt(struct mwifiex_private * priv,void * payload)66 static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
67 {
68 	int ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
69 
70 	if (!ret)
71 		return 0;
72 
73 	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
74 		return mwifiex_handle_uap_rx_forward(priv, payload);
75 
76 	return mwifiex_process_rx_packet(priv, payload);
77 }
78 
79 /*
80  * This function dispatches all packets in the Rx reorder table until the
81  * start window.
82  *
83  * There could be holes in the buffer, which are skipped by the function.
84  * Since the buffer is linear, the function uses rotation to simulate
85  * circular buffer.
86  */
87 static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private * priv,struct mwifiex_rx_reorder_tbl * tbl,int start_win)88 mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
89 					 struct mwifiex_rx_reorder_tbl *tbl,
90 					 int start_win)
91 {
92 	int pkt_to_send, i;
93 	void *rx_tmp_ptr;
94 	unsigned long flags;
95 
96 	pkt_to_send = (start_win > tbl->start_win) ?
97 		      min((start_win - tbl->start_win), tbl->win_size) :
98 		      tbl->win_size;
99 
100 	for (i = 0; i < pkt_to_send; ++i) {
101 		spin_lock_irqsave(&priv->rx_pkt_lock, flags);
102 		rx_tmp_ptr = NULL;
103 		if (tbl->rx_reorder_ptr[i]) {
104 			rx_tmp_ptr = tbl->rx_reorder_ptr[i];
105 			tbl->rx_reorder_ptr[i] = NULL;
106 		}
107 		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
108 		if (rx_tmp_ptr)
109 			mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
110 	}
111 
112 	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
113 	/*
114 	 * We don't have a circular buffer, hence use rotation to simulate
115 	 * circular buffer
116 	 */
117 	for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
118 		tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i];
119 		tbl->rx_reorder_ptr[pkt_to_send + i] = NULL;
120 	}
121 
122 	tbl->start_win = start_win;
123 	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
124 }
125 
126 /*
127  * This function dispatches all packets in the Rx reorder table until
128  * a hole is found.
129  *
130  * The start window is adjusted automatically when a hole is located.
131  * Since the buffer is linear, the function uses rotation to simulate
132  * circular buffer.
133  */
134 static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private * priv,struct mwifiex_rx_reorder_tbl * tbl)135 mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
136 			      struct mwifiex_rx_reorder_tbl *tbl)
137 {
138 	int i, j, xchg;
139 	void *rx_tmp_ptr;
140 	unsigned long flags;
141 
142 	for (i = 0; i < tbl->win_size; ++i) {
143 		spin_lock_irqsave(&priv->rx_pkt_lock, flags);
144 		if (!tbl->rx_reorder_ptr[i]) {
145 			spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
146 			break;
147 		}
148 		rx_tmp_ptr = tbl->rx_reorder_ptr[i];
149 		tbl->rx_reorder_ptr[i] = NULL;
150 		spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
151 		mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
152 	}
153 
154 	spin_lock_irqsave(&priv->rx_pkt_lock, flags);
155 	/*
156 	 * We don't have a circular buffer, hence use rotation to simulate
157 	 * circular buffer
158 	 */
159 	if (i > 0) {
160 		xchg = tbl->win_size - i;
161 		for (j = 0; j < xchg; ++j) {
162 			tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j];
163 			tbl->rx_reorder_ptr[i + j] = NULL;
164 		}
165 	}
166 	tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
167 	spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
168 }
169 
170 /*
171  * This function deletes the Rx reorder table and frees the memory.
172  *
173  * The function stops the associated timer and dispatches all the
174  * pending packets in the Rx reorder table before deletion.
175  */
176 static void
mwifiex_del_rx_reorder_entry(struct mwifiex_private * priv,struct mwifiex_rx_reorder_tbl * tbl)177 mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
178 			     struct mwifiex_rx_reorder_tbl *tbl)
179 {
180 	unsigned long flags;
181 	int start_win;
182 
183 	if (!tbl)
184 		return;
185 
186 	spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
187 	priv->adapter->rx_locked = true;
188 	if (priv->adapter->rx_processing) {
189 		spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
190 		flush_workqueue(priv->adapter->rx_workqueue);
191 	} else {
192 		spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
193 	}
194 
195 	start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
196 	mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
197 
198 	del_timer_sync(&tbl->timer_context.timer);
199 	tbl->timer_context.timer_is_set = false;
200 
201 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
202 	list_del(&tbl->list);
203 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
204 
205 	kfree(tbl->rx_reorder_ptr);
206 	kfree(tbl);
207 
208 	spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
209 	priv->adapter->rx_locked = false;
210 	spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
211 
212 }
213 
214 /*
215  * This function returns the pointer to an entry in Rx reordering
216  * table which matches the given TA/TID pair.
217  */
218 struct mwifiex_rx_reorder_tbl *
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private * priv,int tid,u8 * ta)219 mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
220 {
221 	struct mwifiex_rx_reorder_tbl *tbl;
222 	unsigned long flags;
223 
224 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
225 	list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
226 		if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
227 			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
228 					       flags);
229 			return tbl;
230 		}
231 	}
232 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
233 
234 	return NULL;
235 }
236 
237 /* This function retrieves the pointer to an entry in Rx reordering
238  * table which matches the given TA and deletes it.
239  */
mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private * priv,u8 * ta)240 void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
241 {
242 	struct mwifiex_rx_reorder_tbl *tbl, *tmp;
243 	unsigned long flags;
244 
245 	if (!ta)
246 		return;
247 
248 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
249 	list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
250 		if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
251 			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
252 					       flags);
253 			mwifiex_del_rx_reorder_entry(priv, tbl);
254 			spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
255 		}
256 	}
257 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
258 
259 	return;
260 }
261 
262 /*
263  * This function finds the last sequence number used in the packets
264  * buffered in Rx reordering table.
265  */
266 static int
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt * ctx)267 mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
268 {
269 	struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
270 	struct mwifiex_private *priv = ctx->priv;
271 	unsigned long flags;
272 	int i;
273 
274 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
275 	for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
276 		if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
277 			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
278 					       flags);
279 			return i;
280 		}
281 	}
282 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
283 
284 	return -1;
285 }
286 
287 /*
288  * This function flushes all the packets in Rx reordering table.
289  *
290  * The function checks if any packets are currently buffered in the
291  * table or not. In case there are packets available, it dispatches
292  * them and then dumps the Rx reordering table.
293  */
294 static void
mwifiex_flush_data(unsigned long context)295 mwifiex_flush_data(unsigned long context)
296 {
297 	struct reorder_tmr_cnxt *ctx =
298 		(struct reorder_tmr_cnxt *) context;
299 	int start_win, seq_num;
300 
301 	ctx->timer_is_set = false;
302 	seq_num = mwifiex_11n_find_last_seq_num(ctx);
303 
304 	if (seq_num < 0)
305 		return;
306 
307 	dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", seq_num);
308 	start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
309 	mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
310 						 start_win);
311 }
312 
313 /*
314  * This function creates an entry in Rx reordering table for the
315  * given TA/TID.
316  *
317  * The function also initializes the entry with sequence number, window
318  * size as well as initializes the timer.
319  *
320  * If the received TA/TID pair is already present, all the packets are
321  * dispatched and the window size is moved until the SSN.
322  */
323 static void
mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private * priv,u8 * ta,int tid,int win_size,int seq_num)324 mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
325 				  int tid, int win_size, int seq_num)
326 {
327 	int i;
328 	struct mwifiex_rx_reorder_tbl *tbl, *new_node;
329 	u16 last_seq = 0;
330 	unsigned long flags;
331 	struct mwifiex_sta_node *node;
332 
333 	/*
334 	 * If we get a TID, ta pair which is already present dispatch all the
335 	 * the packets and move the window size until the ssn
336 	 */
337 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
338 	if (tbl) {
339 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
340 		return;
341 	}
342 	/* if !tbl then create one */
343 	new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
344 	if (!new_node)
345 		return;
346 
347 	INIT_LIST_HEAD(&new_node->list);
348 	new_node->tid = tid;
349 	memcpy(new_node->ta, ta, ETH_ALEN);
350 	new_node->start_win = seq_num;
351 	new_node->init_win = seq_num;
352 	new_node->flags = 0;
353 
354 	if (mwifiex_queuing_ra_based(priv)) {
355 		dev_dbg(priv->adapter->dev,
356 			"info: AP/ADHOC:last_seq=%d start_win=%d\n",
357 			last_seq, new_node->start_win);
358 		if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
359 			node = mwifiex_get_sta_entry(priv, ta);
360 			if (node)
361 				last_seq = node->rx_seq[tid];
362 		}
363 	} else {
364 		node = mwifiex_get_sta_entry(priv, ta);
365 		if (node)
366 			last_seq = node->rx_seq[tid];
367 		else
368 			last_seq = priv->rx_seq[tid];
369 	}
370 
371 	if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
372 	    last_seq >= new_node->start_win) {
373 		new_node->start_win = last_seq + 1;
374 		new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
375 	}
376 
377 	new_node->win_size = win_size;
378 
379 	new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
380 					GFP_KERNEL);
381 	if (!new_node->rx_reorder_ptr) {
382 		kfree((u8 *) new_node);
383 		dev_err(priv->adapter->dev,
384 			"%s: failed to alloc reorder_ptr\n", __func__);
385 		return;
386 	}
387 
388 	new_node->timer_context.ptr = new_node;
389 	new_node->timer_context.priv = priv;
390 	new_node->timer_context.timer_is_set = false;
391 
392 	init_timer(&new_node->timer_context.timer);
393 	new_node->timer_context.timer.function = mwifiex_flush_data;
394 	new_node->timer_context.timer.data =
395 			(unsigned long) &new_node->timer_context;
396 
397 	for (i = 0; i < win_size; ++i)
398 		new_node->rx_reorder_ptr[i] = NULL;
399 
400 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
401 	list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
402 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
403 }
404 
405 static void
mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl * tbl)406 mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl)
407 {
408 	u32 min_flush_time;
409 
410 	if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32)
411 		min_flush_time = MIN_FLUSH_TIMER_15_MS;
412 	else
413 		min_flush_time = MIN_FLUSH_TIMER_MS;
414 
415 	mod_timer(&tbl->timer_context.timer,
416 		  jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size));
417 
418 	tbl->timer_context.timer_is_set = true;
419 }
420 
421 /*
422  * This function prepares command for adding a BA request.
423  *
424  * Preparation includes -
425  *      - Setting command ID and proper size
426  *      - Setting add BA request buffer
427  *      - Ensuring correct endian-ness
428  */
mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command * cmd,void * data_buf)429 int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
430 {
431 	struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
432 
433 	cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
434 	cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
435 	memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
436 
437 	return 0;
438 }
439 
440 /*
441  * This function prepares command for adding a BA response.
442  *
443  * Preparation includes -
444  *      - Setting command ID and proper size
445  *      - Setting add BA response buffer
446  *      - Ensuring correct endian-ness
447  */
mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private * priv,struct host_cmd_ds_command * cmd,struct host_cmd_ds_11n_addba_req * cmd_addba_req)448 int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
449 				  struct host_cmd_ds_command *cmd,
450 				  struct host_cmd_ds_11n_addba_req
451 				  *cmd_addba_req)
452 {
453 	struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
454 	struct mwifiex_sta_node *sta_ptr;
455 	u32 rx_win_size = priv->add_ba_param.rx_win_size;
456 	u8 tid;
457 	int win_size;
458 	uint16_t block_ack_param_set;
459 
460 	if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
461 	    ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
462 	    priv->adapter->is_hw_11ac_capable &&
463 	    memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
464 		sta_ptr = mwifiex_get_sta_entry(priv,
465 						cmd_addba_req->peer_mac_addr);
466 		if (!sta_ptr) {
467 			dev_warn(priv->adapter->dev,
468 				 "BA setup with unknown TDLS peer %pM!\n",
469 				 cmd_addba_req->peer_mac_addr);
470 			return -1;
471 		}
472 		if (sta_ptr->is_11ac_enabled)
473 			rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
474 	}
475 
476 	cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
477 	cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
478 
479 	memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
480 	       ETH_ALEN);
481 	add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
482 	add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
483 	add_ba_rsp->ssn = cmd_addba_req->ssn;
484 
485 	block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
486 	tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
487 		>> BLOCKACKPARAM_TID_POS;
488 	add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
489 	block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
490 
491 	/* If we don't support AMSDU inside AMPDU, reset the bit */
492 	if (!priv->add_ba_param.rx_amsdu ||
493 	    (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
494 		block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
495 	block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
496 	add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
497 	win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
498 					& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
499 					>> BLOCKACKPARAM_WINSIZE_POS;
500 	cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
501 
502 	mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
503 					  tid, win_size,
504 					  le16_to_cpu(cmd_addba_req->ssn));
505 	return 0;
506 }
507 
508 /*
509  * This function prepares command for deleting a BA request.
510  *
511  * Preparation includes -
512  *      - Setting command ID and proper size
513  *      - Setting del BA request buffer
514  *      - Ensuring correct endian-ness
515  */
mwifiex_cmd_11n_delba(struct host_cmd_ds_command * cmd,void * data_buf)516 int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
517 {
518 	struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
519 
520 	cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
521 	cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
522 	memcpy(del_ba, data_buf, sizeof(*del_ba));
523 
524 	return 0;
525 }
526 
527 /*
528  * This function identifies if Rx reordering is needed for a received packet.
529  *
530  * In case reordering is required, the function will do the reordering
531  * before sending it to kernel.
532  *
533  * The Rx reorder table is checked first with the received TID/TA pair. If
534  * not found, the received packet is dispatched immediately. But if found,
535  * the packet is reordered and all the packets in the updated Rx reordering
536  * table is dispatched until a hole is found.
537  *
538  * For sequence number less than the starting window, the packet is dropped.
539  */
mwifiex_11n_rx_reorder_pkt(struct mwifiex_private * priv,u16 seq_num,u16 tid,u8 * ta,u8 pkt_type,void * payload)540 int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
541 				u16 seq_num, u16 tid,
542 				u8 *ta, u8 pkt_type, void *payload)
543 {
544 	struct mwifiex_rx_reorder_tbl *tbl;
545 	int prev_start_win, start_win, end_win, win_size;
546 	u16 pkt_index;
547 	bool init_window_shift = false;
548 	int ret = 0;
549 
550 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
551 	if (!tbl) {
552 		if (pkt_type != PKT_TYPE_BAR)
553 			mwifiex_11n_dispatch_pkt(priv, payload);
554 		return ret;
555 	}
556 
557 	if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
558 		mwifiex_11n_dispatch_pkt(priv, payload);
559 		return ret;
560 	}
561 
562 	start_win = tbl->start_win;
563 	prev_start_win = start_win;
564 	win_size = tbl->win_size;
565 	end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
566 	if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
567 		init_window_shift = true;
568 		tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
569 	}
570 
571 	if (tbl->flags & RXREOR_FORCE_NO_DROP) {
572 		dev_dbg(priv->adapter->dev,
573 			"RXREOR_FORCE_NO_DROP when HS is activated\n");
574 		tbl->flags &= ~RXREOR_FORCE_NO_DROP;
575 	} else if (init_window_shift && seq_num < start_win &&
576 		   seq_num >= tbl->init_win) {
577 		dev_dbg(priv->adapter->dev,
578 			"Sender TID sequence number reset %d->%d for SSN %d\n",
579 			start_win, seq_num, tbl->init_win);
580 		tbl->start_win = start_win = seq_num;
581 		end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
582 	} else {
583 		/*
584 		 * If seq_num is less then starting win then ignore and drop
585 		 * the packet
586 		 */
587 		if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
588 			if (seq_num >= ((start_win + TWOPOW11) &
589 					(MAX_TID_VALUE - 1)) &&
590 			    seq_num < start_win) {
591 				ret = -1;
592 				goto done;
593 			}
594 		} else if ((seq_num < start_win) ||
595 			   (seq_num >= (start_win + TWOPOW11))) {
596 			ret = -1;
597 			goto done;
598 		}
599 	}
600 
601 	/*
602 	 * If this packet is a BAR we adjust seq_num as
603 	 * WinStart = seq_num
604 	 */
605 	if (pkt_type == PKT_TYPE_BAR)
606 		seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
607 
608 	if (((end_win < start_win) &&
609 	     (seq_num < start_win) && (seq_num > end_win)) ||
610 	    ((end_win > start_win) && ((seq_num > end_win) ||
611 				       (seq_num < start_win)))) {
612 		end_win = seq_num;
613 		if (((seq_num - win_size) + 1) >= 0)
614 			start_win = (end_win - win_size) + 1;
615 		else
616 			start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
617 		mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
618 	}
619 
620 	if (pkt_type != PKT_TYPE_BAR) {
621 		if (seq_num >= start_win)
622 			pkt_index = seq_num - start_win;
623 		else
624 			pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
625 
626 		if (tbl->rx_reorder_ptr[pkt_index]) {
627 			ret = -1;
628 			goto done;
629 		}
630 
631 		tbl->rx_reorder_ptr[pkt_index] = payload;
632 	}
633 
634 	/*
635 	 * Dispatch all packets sequentially from start_win until a
636 	 * hole is found and adjust the start_win appropriately
637 	 */
638 	mwifiex_11n_scan_and_dispatch(priv, tbl);
639 
640 done:
641 	if (!tbl->timer_context.timer_is_set ||
642 	    prev_start_win != tbl->start_win)
643 		mwifiex_11n_rxreorder_timer_restart(tbl);
644 	return ret;
645 }
646 
647 /*
648  * This function deletes an entry for a given TID/TA pair.
649  *
650  * The TID/TA are taken from del BA event body.
651  */
652 void
mwifiex_del_ba_tbl(struct mwifiex_private * priv,int tid,u8 * peer_mac,u8 type,int initiator)653 mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
654 		   u8 type, int initiator)
655 {
656 	struct mwifiex_rx_reorder_tbl *tbl;
657 	struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
658 	u8 cleanup_rx_reorder_tbl;
659 	unsigned long flags;
660 
661 	if (type == TYPE_DELBA_RECEIVE)
662 		cleanup_rx_reorder_tbl = (initiator) ? true : false;
663 	else
664 		cleanup_rx_reorder_tbl = (initiator) ? false : true;
665 
666 	dev_dbg(priv->adapter->dev, "event: DELBA: %pM tid=%d initiator=%d\n",
667 		peer_mac, tid, initiator);
668 
669 	if (cleanup_rx_reorder_tbl) {
670 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
671 								 peer_mac);
672 		if (!tbl) {
673 			dev_dbg(priv->adapter->dev,
674 				"event: TID, TA not found in table\n");
675 			return;
676 		}
677 		mwifiex_del_rx_reorder_entry(priv, tbl);
678 	} else {
679 		ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
680 		if (!ptx_tbl) {
681 			dev_dbg(priv->adapter->dev,
682 				"event: TID, RA not found in table\n");
683 			return;
684 		}
685 
686 		spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
687 		mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
688 		spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
689 	}
690 }
691 
692 /*
693  * This function handles the command response of an add BA response.
694  *
695  * Handling includes changing the header fields into CPU format and
696  * creating the stream, provided the add BA is accepted.
697  */
mwifiex_ret_11n_addba_resp(struct mwifiex_private * priv,struct host_cmd_ds_command * resp)698 int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
699 			       struct host_cmd_ds_command *resp)
700 {
701 	struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
702 	int tid, win_size;
703 	struct mwifiex_rx_reorder_tbl *tbl;
704 	uint16_t block_ack_param_set;
705 
706 	block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
707 
708 	tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
709 		>> BLOCKACKPARAM_TID_POS;
710 	/*
711 	 * Check if we had rejected the ADDBA, if yes then do not create
712 	 * the stream
713 	 */
714 	if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
715 		dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n",
716 			add_ba_rsp->peer_mac_addr, tid);
717 
718 		tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
719 						     add_ba_rsp->peer_mac_addr);
720 		if (tbl)
721 			mwifiex_del_rx_reorder_entry(priv, tbl);
722 
723 		return 0;
724 	}
725 
726 	win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
727 		    >> BLOCKACKPARAM_WINSIZE_POS;
728 
729 	tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
730 					     add_ba_rsp->peer_mac_addr);
731 	if (tbl) {
732 		if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
733 		    priv->add_ba_param.rx_amsdu &&
734 		    (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
735 			tbl->amsdu = true;
736 		else
737 			tbl->amsdu = false;
738 	}
739 
740 	dev_dbg(priv->adapter->dev,
741 		"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
742 		add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
743 
744 	return 0;
745 }
746 
747 /*
748  * This function handles BA stream timeout event by preparing and sending
749  * a command to the firmware.
750  */
mwifiex_11n_ba_stream_timeout(struct mwifiex_private * priv,struct host_cmd_ds_11n_batimeout * event)751 void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
752 				   struct host_cmd_ds_11n_batimeout *event)
753 {
754 	struct host_cmd_ds_11n_delba delba;
755 
756 	memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
757 	memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
758 
759 	delba.del_ba_param_set |=
760 		cpu_to_le16((u16) event->tid << DELBA_TID_POS);
761 	delba.del_ba_param_set |= cpu_to_le16(
762 		(u16) event->origninator << DELBA_INITIATOR_POS);
763 	delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
764 	mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
765 }
766 
767 /*
768  * This function cleans up the Rx reorder table by deleting all the entries
769  * and re-initializing.
770  */
mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private * priv)771 void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
772 {
773 	struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
774 	unsigned long flags;
775 
776 	spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
777 	list_for_each_entry_safe(del_tbl_ptr, tmp_node,
778 				 &priv->rx_reorder_tbl_ptr, list) {
779 		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
780 		mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
781 		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
782 	}
783 	INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
784 	spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
785 
786 	mwifiex_reset_11n_rx_seq_num(priv);
787 }
788 
789 /*
790  * This function updates all rx_reorder_tbl's flags.
791  */
mwifiex_update_rxreor_flags(struct mwifiex_adapter * adapter,u8 flags)792 void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
793 {
794 	struct mwifiex_private *priv;
795 	struct mwifiex_rx_reorder_tbl *tbl;
796 	unsigned long lock_flags;
797 	int i;
798 
799 	for (i = 0; i < adapter->priv_num; i++) {
800 		priv = adapter->priv[i];
801 		if (!priv)
802 			continue;
803 
804 		spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
805 		if (list_empty(&priv->rx_reorder_tbl_ptr)) {
806 			spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
807 					       lock_flags);
808 			continue;
809 		}
810 
811 		list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
812 			tbl->flags = flags;
813 		spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
814 	}
815 
816 	return;
817 }
818