1 /*
2 * NXP Wireless LAN device driver: WMM
3 *
4 * Copyright 2011-2020 NXP
5 *
6 * This software file (the "File") is distributed by NXP
7 * under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20 #include "decl.h"
21 #include "ioctl.h"
22 #include "util.h"
23 #include "fw.h"
24 #include "main.h"
25 #include "wmm.h"
26 #include "11n.h"
27
28
29 /* Maximum value FW can accept for driver delay in packet transmission */
30 #define DRV_PKT_DELAY_TO_FW_MAX 512
31
32
33 #define WMM_QUEUED_PACKET_LOWER_LIMIT 180
34
35 #define WMM_QUEUED_PACKET_UPPER_LIMIT 200
36
37 /* Offset for TOS field in the IP header */
38 #define IPTOS_OFFSET 5
39
40 static bool disable_tx_amsdu;
41 module_param(disable_tx_amsdu, bool, 0644);
42
43 /* This table inverses the tos_to_tid operation to get a priority
44 * which is in sequential order, and can be compared.
45 * Use this to compare the priority of two different TIDs.
46 */
47 const u8 tos_to_tid_inv[] = {
48 0x02, /* from tos_to_tid[2] = 0 */
49 0x00, /* from tos_to_tid[0] = 1 */
50 0x01, /* from tos_to_tid[1] = 2 */
51 0x03,
52 0x04,
53 0x05,
54 0x06,
55 0x07
56 };
57
58 /* WMM information IE */
59 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
60 0x00, 0x50, 0xf2, 0x02,
61 0x00, 0x01, 0x00
62 };
63
64 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
65 WMM_AC_BK,
66 WMM_AC_VI,
67 WMM_AC_VO
68 };
69
70 static u8 tos_to_tid[] = {
71 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
72 0x01, /* 0 1 0 AC_BK */
73 0x02, /* 0 0 0 AC_BK */
74 0x00, /* 0 0 1 AC_BE */
75 0x03, /* 0 1 1 AC_BE */
76 0x04, /* 1 0 0 AC_VI */
77 0x05, /* 1 0 1 AC_VI */
78 0x06, /* 1 1 0 AC_VO */
79 0x07 /* 1 1 1 AC_VO */
80 };
81
82 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
83
84 /*
85 * This function debug prints the priority parameters for a WMM AC.
86 */
87 static void
mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters * ac_param)88 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
89 {
90 const char *ac_str[] = { "BK", "BE", "VI", "VO" };
91
92 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
93 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
94 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
95 & MWIFIEX_ACI) >> 5]],
96 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
97 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
98 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
99 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
100 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
101 le16_to_cpu(ac_param->tx_op_limit));
102 }
103
104 /*
105 * This function allocates a route address list.
106 *
107 * The function also initializes the list with the provided RA.
108 */
109 static struct mwifiex_ra_list_tbl *
mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter * adapter,const u8 * ra)110 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
111 {
112 struct mwifiex_ra_list_tbl *ra_list;
113
114 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
115 if (!ra_list)
116 return NULL;
117
118 INIT_LIST_HEAD(&ra_list->list);
119 skb_queue_head_init(&ra_list->skb_head);
120
121 memcpy(ra_list->ra, ra, ETH_ALEN);
122
123 ra_list->total_pkt_count = 0;
124
125 mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
126
127 return ra_list;
128 }
129
130 /* This function returns random no between 16 and 32 to be used as threshold
131 * for no of packets after which BA setup is initiated.
132 */
mwifiex_get_random_ba_threshold(void)133 static u8 mwifiex_get_random_ba_threshold(void)
134 {
135 u64 ns;
136 /* setup ba_packet_threshold here random number between
137 * [BA_SETUP_PACKET_OFFSET,
138 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
139 */
140 ns = ktime_get_ns();
141 ns += (ns >> 32) + (ns >> 16);
142
143 return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
144 }
145
146 /*
147 * This function allocates and adds a RA list for all TIDs
148 * with the given RA.
149 */
mwifiex_ralist_add(struct mwifiex_private * priv,const u8 * ra)150 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
151 {
152 int i;
153 struct mwifiex_ra_list_tbl *ra_list;
154 struct mwifiex_adapter *adapter = priv->adapter;
155 struct mwifiex_sta_node *node;
156
157
158 for (i = 0; i < MAX_NUM_TID; ++i) {
159 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
160 mwifiex_dbg(adapter, INFO,
161 "info: created ra_list %p\n", ra_list);
162
163 if (!ra_list)
164 break;
165
166 ra_list->is_11n_enabled = 0;
167 ra_list->tdls_link = false;
168 ra_list->ba_status = BA_SETUP_NONE;
169 ra_list->amsdu_in_ampdu = false;
170 if (!mwifiex_queuing_ra_based(priv)) {
171 if (mwifiex_is_tdls_link_setup
172 (mwifiex_get_tdls_link_status(priv, ra))) {
173 ra_list->tdls_link = true;
174 ra_list->is_11n_enabled =
175 mwifiex_tdls_peer_11n_enabled(priv, ra);
176 } else {
177 ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
178 }
179 } else {
180 spin_lock_bh(&priv->sta_list_spinlock);
181 node = mwifiex_get_sta_entry(priv, ra);
182 if (node)
183 ra_list->tx_paused = node->tx_pause;
184 ra_list->is_11n_enabled =
185 mwifiex_is_sta_11n_enabled(priv, node);
186 if (ra_list->is_11n_enabled)
187 ra_list->max_amsdu = node->max_amsdu;
188 spin_unlock_bh(&priv->sta_list_spinlock);
189 }
190
191 mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
192 ra_list, ra_list->is_11n_enabled);
193
194 if (ra_list->is_11n_enabled) {
195 ra_list->ba_pkt_count = 0;
196 ra_list->ba_packet_thr =
197 mwifiex_get_random_ba_threshold();
198 }
199 list_add_tail(&ra_list->list,
200 &priv->wmm.tid_tbl_ptr[i].ra_list);
201 }
202 }
203
204 /*
205 * This function sets the WMM queue priorities to their default values.
206 */
mwifiex_wmm_default_queue_priorities(struct mwifiex_private * priv)207 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
208 {
209 /* Default queue priorities: VO->VI->BE->BK */
210 priv->wmm.queue_priority[0] = WMM_AC_VO;
211 priv->wmm.queue_priority[1] = WMM_AC_VI;
212 priv->wmm.queue_priority[2] = WMM_AC_BE;
213 priv->wmm.queue_priority[3] = WMM_AC_BK;
214 }
215
216 /*
217 * This function map ACs to TIDs.
218 */
219 static void
mwifiex_wmm_queue_priorities_tid(struct mwifiex_private * priv)220 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
221 {
222 struct mwifiex_wmm_desc *wmm = &priv->wmm;
223 u8 *queue_priority = wmm->queue_priority;
224 int i;
225
226 for (i = 0; i < 4; ++i) {
227 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
228 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
229 }
230
231 for (i = 0; i < MAX_NUM_TID; ++i)
232 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
233
234 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
235 }
236
237 /*
238 * This function initializes WMM priority queues.
239 */
240 void
mwifiex_wmm_setup_queue_priorities(struct mwifiex_private * priv,struct ieee_types_wmm_parameter * wmm_ie)241 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
242 struct ieee_types_wmm_parameter *wmm_ie)
243 {
244 u16 cw_min, avg_back_off, tmp[4];
245 u32 i, j, num_ac;
246 u8 ac_idx;
247
248 if (!wmm_ie || !priv->wmm_enabled) {
249 /* WMM is not enabled, just set the defaults and return */
250 mwifiex_wmm_default_queue_priorities(priv);
251 return;
252 }
253
254 mwifiex_dbg(priv->adapter, INFO,
255 "info: WMM Parameter IE: version=%d,\t"
256 "qos_info Parameter Set Count=%d, Reserved=%#x\n",
257 wmm_ie->version, wmm_ie->qos_info_bitmap &
258 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
259 wmm_ie->reserved);
260
261 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
262 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
263 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
264 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
265 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
266
267 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
268 priv->wmm.queue_priority[ac_idx] = ac_idx;
269 tmp[ac_idx] = avg_back_off;
270
271 mwifiex_dbg(priv->adapter, INFO,
272 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
273 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
274 cw_min, avg_back_off);
275 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
276 }
277
278 /* Bubble sort */
279 for (i = 0; i < num_ac; i++) {
280 for (j = 1; j < num_ac - i; j++) {
281 if (tmp[j - 1] > tmp[j]) {
282 swap(tmp[j - 1], tmp[j]);
283 swap(priv->wmm.queue_priority[j - 1],
284 priv->wmm.queue_priority[j]);
285 } else if (tmp[j - 1] == tmp[j]) {
286 if (priv->wmm.queue_priority[j - 1]
287 < priv->wmm.queue_priority[j])
288 swap(priv->wmm.queue_priority[j - 1],
289 priv->wmm.queue_priority[j]);
290 }
291 }
292 }
293
294 mwifiex_wmm_queue_priorities_tid(priv);
295 }
296
297 /*
298 * This function evaluates whether or not an AC is to be downgraded.
299 *
300 * In case the AC is not enabled, the highest AC is returned that is
301 * enabled and does not require admission control.
302 */
303 static enum mwifiex_wmm_ac_e
mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private * priv,enum mwifiex_wmm_ac_e eval_ac)304 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
305 enum mwifiex_wmm_ac_e eval_ac)
306 {
307 int down_ac;
308 enum mwifiex_wmm_ac_e ret_ac;
309 struct mwifiex_wmm_ac_status *ac_status;
310
311 ac_status = &priv->wmm.ac_status[eval_ac];
312
313 if (!ac_status->disabled)
314 /* Okay to use this AC, its enabled */
315 return eval_ac;
316
317 /* Setup a default return value of the lowest priority */
318 ret_ac = WMM_AC_BK;
319
320 /*
321 * Find the highest AC that is enabled and does not require
322 * admission control. The spec disallows downgrading to an AC,
323 * which is enabled due to a completed admission control.
324 * Unadmitted traffic is not to be sent on an AC with admitted
325 * traffic.
326 */
327 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
328 ac_status = &priv->wmm.ac_status[down_ac];
329
330 if (!ac_status->disabled && !ac_status->flow_required)
331 /* AC is enabled and does not require admission
332 control */
333 ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
334 }
335
336 return ret_ac;
337 }
338
339 /*
340 * This function downgrades WMM priority queue.
341 */
342 void
mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private * priv)343 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
344 {
345 int ac_val;
346
347 mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
348 "BK(0), BE(1), VI(2), VO(3)\n");
349
350 if (!priv->wmm_enabled) {
351 /* WMM is not enabled, default priorities */
352 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
353 priv->wmm.ac_down_graded_vals[ac_val] =
354 (enum mwifiex_wmm_ac_e) ac_val;
355 } else {
356 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
357 priv->wmm.ac_down_graded_vals[ac_val]
358 = mwifiex_wmm_eval_downgrade_ac(priv,
359 (enum mwifiex_wmm_ac_e) ac_val);
360 mwifiex_dbg(priv->adapter, INFO,
361 "info: WMM: AC PRIO %d maps to %d\n",
362 ac_val,
363 priv->wmm.ac_down_graded_vals[ac_val]);
364 }
365 }
366 }
367
368 /*
369 * This function converts the IP TOS field to an WMM AC
370 * Queue assignment.
371 */
372 static enum mwifiex_wmm_ac_e
mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter * adapter,u32 tos)373 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
374 {
375 /* Map of TOS UP values to WMM AC */
376 static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
377 WMM_AC_BE,
378 WMM_AC_BK,
379 WMM_AC_BK,
380 WMM_AC_BE,
381 WMM_AC_VI,
382 WMM_AC_VI,
383 WMM_AC_VO,
384 WMM_AC_VO
385 };
386
387 if (tos >= ARRAY_SIZE(tos_to_ac))
388 return WMM_AC_BE;
389
390 return tos_to_ac[tos];
391 }
392
393 /*
394 * This function evaluates a given TID and downgrades it to a lower
395 * TID if the WMM Parameter IE received from the AP indicates that the
396 * AP is disabled (due to call admission control (ACM bit). Mapping
397 * of TID to AC is taken care of internally.
398 */
mwifiex_wmm_downgrade_tid(struct mwifiex_private * priv,u32 tid)399 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
400 {
401 enum mwifiex_wmm_ac_e ac, ac_down;
402 u8 new_tid;
403
404 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
405 ac_down = priv->wmm.ac_down_graded_vals[ac];
406
407 /* Send the index to tid array, picking from the array will be
408 * taken care by dequeuing function
409 */
410 new_tid = ac_to_tid[ac_down][tid % 2];
411
412 return new_tid;
413 }
414
415 /*
416 * This function initializes the WMM state information and the
417 * WMM data path queues.
418 */
419 void
mwifiex_wmm_init(struct mwifiex_adapter * adapter)420 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
421 {
422 int i, j;
423 struct mwifiex_private *priv;
424
425 for (j = 0; j < adapter->priv_num; ++j) {
426 priv = adapter->priv[j];
427 if (!priv)
428 continue;
429
430 for (i = 0; i < MAX_NUM_TID; ++i) {
431 if (!disable_tx_amsdu &&
432 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
433 priv->aggr_prio_tbl[i].amsdu =
434 priv->tos_to_tid_inv[i];
435 else
436 priv->aggr_prio_tbl[i].amsdu =
437 BA_STREAM_NOT_ALLOWED;
438 priv->aggr_prio_tbl[i].ampdu_ap =
439 priv->tos_to_tid_inv[i];
440 priv->aggr_prio_tbl[i].ampdu_user =
441 priv->tos_to_tid_inv[i];
442 }
443
444 priv->aggr_prio_tbl[6].amsdu
445 = priv->aggr_prio_tbl[6].ampdu_ap
446 = priv->aggr_prio_tbl[6].ampdu_user
447 = BA_STREAM_NOT_ALLOWED;
448
449 priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
450 = priv->aggr_prio_tbl[7].ampdu_user
451 = BA_STREAM_NOT_ALLOWED;
452
453 mwifiex_set_ba_params(priv);
454 mwifiex_reset_11n_rx_seq_num(priv);
455
456 priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
457 atomic_set(&priv->wmm.tx_pkts_queued, 0);
458 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
459 }
460 }
461
mwifiex_bypass_txlist_empty(struct mwifiex_adapter * adapter)462 int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
463 {
464 struct mwifiex_private *priv;
465 int i;
466
467 for (i = 0; i < adapter->priv_num; i++) {
468 priv = adapter->priv[i];
469 if (!priv)
470 continue;
471 if (adapter->if_ops.is_port_ready &&
472 !adapter->if_ops.is_port_ready(priv))
473 continue;
474 if (!skb_queue_empty(&priv->bypass_txq))
475 return false;
476 }
477
478 return true;
479 }
480
481 /*
482 * This function checks if WMM Tx queue is empty.
483 */
484 int
mwifiex_wmm_lists_empty(struct mwifiex_adapter * adapter)485 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
486 {
487 int i;
488 struct mwifiex_private *priv;
489
490 for (i = 0; i < adapter->priv_num; ++i) {
491 priv = adapter->priv[i];
492 if (!priv)
493 continue;
494 if (!priv->port_open &&
495 (priv->bss_mode != NL80211_IFTYPE_ADHOC))
496 continue;
497 if (adapter->if_ops.is_port_ready &&
498 !adapter->if_ops.is_port_ready(priv))
499 continue;
500 if (atomic_read(&priv->wmm.tx_pkts_queued))
501 return false;
502 }
503
504 return true;
505 }
506
507 /*
508 * This function deletes all packets in an RA list node.
509 *
510 * The packet sent completion callback handler are called with
511 * status failure, after they are dequeued to ensure proper
512 * cleanup. The RA list node itself is freed at the end.
513 */
514 static void
mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra_list)515 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
516 struct mwifiex_ra_list_tbl *ra_list)
517 {
518 struct mwifiex_adapter *adapter = priv->adapter;
519 struct sk_buff *skb, *tmp;
520
521 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
522 skb_unlink(skb, &ra_list->skb_head);
523 mwifiex_write_data_complete(adapter, skb, 0, -1);
524 }
525 }
526
527 /*
528 * This function deletes all packets in an RA list.
529 *
530 * Each nodes in the RA list are freed individually first, and then
531 * the RA list itself is freed.
532 */
533 static void
mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private * priv,struct list_head * ra_list_head)534 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
535 struct list_head *ra_list_head)
536 {
537 struct mwifiex_ra_list_tbl *ra_list;
538
539 list_for_each_entry(ra_list, ra_list_head, list)
540 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
541 }
542
543 /*
544 * This function deletes all packets in all RA lists.
545 */
mwifiex_wmm_cleanup_queues(struct mwifiex_private * priv)546 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
547 {
548 int i;
549
550 for (i = 0; i < MAX_NUM_TID; i++)
551 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
552 ra_list);
553
554 atomic_set(&priv->wmm.tx_pkts_queued, 0);
555 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
556 }
557
558 /*
559 * This function deletes all route addresses from all RA lists.
560 */
mwifiex_wmm_delete_all_ralist(struct mwifiex_private * priv)561 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
562 {
563 struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
564 int i;
565
566 for (i = 0; i < MAX_NUM_TID; ++i) {
567 mwifiex_dbg(priv->adapter, INFO,
568 "info: ra_list: freeing buf for tid %d\n", i);
569 list_for_each_entry_safe(ra_list, tmp_node,
570 &priv->wmm.tid_tbl_ptr[i].ra_list,
571 list) {
572 list_del(&ra_list->list);
573 kfree(ra_list);
574 }
575
576 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
577 }
578 }
579
mwifiex_free_ack_frame(int id,void * p,void * data)580 static int mwifiex_free_ack_frame(int id, void *p, void *data)
581 {
582 pr_warn("Have pending ack frames!\n");
583 kfree_skb(p);
584 return 0;
585 }
586
587 /*
588 * This function cleans up the Tx and Rx queues.
589 *
590 * Cleanup includes -
591 * - All packets in RA lists
592 * - All entries in Rx reorder table
593 * - All entries in Tx BA stream table
594 * - MPA buffer (if required)
595 * - All RA lists
596 */
597 void
mwifiex_clean_txrx(struct mwifiex_private * priv)598 mwifiex_clean_txrx(struct mwifiex_private *priv)
599 {
600 struct sk_buff *skb, *tmp;
601
602 mwifiex_11n_cleanup_reorder_tbl(priv);
603 spin_lock_bh(&priv->wmm.ra_list_spinlock);
604
605 mwifiex_wmm_cleanup_queues(priv);
606 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
607
608 if (priv->adapter->if_ops.cleanup_mpa_buf)
609 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
610
611 mwifiex_wmm_delete_all_ralist(priv);
612 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
613
614 if (priv->adapter->if_ops.clean_pcie_ring &&
615 !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
616 priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
617 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
618
619 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
620 skb_unlink(skb, &priv->tdls_txq);
621 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
622 }
623
624 skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
625 skb_unlink(skb, &priv->bypass_txq);
626 mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
627 }
628 atomic_set(&priv->adapter->bypass_tx_pending, 0);
629
630 idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
631 idr_destroy(&priv->ack_status_frames);
632 }
633
634 /*
635 * This function retrieves a particular RA list node, matching with the
636 * given TID and RA address.
637 */
638 struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_ralist_node(struct mwifiex_private * priv,u8 tid,const u8 * ra_addr)639 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
640 const u8 *ra_addr)
641 {
642 struct mwifiex_ra_list_tbl *ra_list;
643
644 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
645 list) {
646 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
647 return ra_list;
648 }
649
650 return NULL;
651 }
652
mwifiex_update_ralist_tx_pause(struct mwifiex_private * priv,u8 * mac,u8 tx_pause)653 void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
654 u8 tx_pause)
655 {
656 struct mwifiex_ra_list_tbl *ra_list;
657 u32 pkt_cnt = 0, tx_pkts_queued;
658 int i;
659
660 spin_lock_bh(&priv->wmm.ra_list_spinlock);
661
662 for (i = 0; i < MAX_NUM_TID; ++i) {
663 ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
664 if (ra_list && ra_list->tx_paused != tx_pause) {
665 pkt_cnt += ra_list->total_pkt_count;
666 ra_list->tx_paused = tx_pause;
667 if (tx_pause)
668 priv->wmm.pkts_paused[i] +=
669 ra_list->total_pkt_count;
670 else
671 priv->wmm.pkts_paused[i] -=
672 ra_list->total_pkt_count;
673 }
674 }
675
676 if (pkt_cnt) {
677 tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
678 if (tx_pause)
679 tx_pkts_queued -= pkt_cnt;
680 else
681 tx_pkts_queued += pkt_cnt;
682
683 atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
684 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
685 }
686 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
687 }
688
689 /* This function updates non-tdls peer ralist tx_pause while
690 * tdls channel switching
691 */
mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private * priv,u8 * mac,u8 tx_pause)692 void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
693 u8 *mac, u8 tx_pause)
694 {
695 struct mwifiex_ra_list_tbl *ra_list;
696 u32 pkt_cnt = 0, tx_pkts_queued;
697 int i;
698
699 spin_lock_bh(&priv->wmm.ra_list_spinlock);
700
701 for (i = 0; i < MAX_NUM_TID; ++i) {
702 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
703 list) {
704 if (!memcmp(ra_list->ra, mac, ETH_ALEN))
705 continue;
706
707 if (ra_list->tx_paused != tx_pause) {
708 pkt_cnt += ra_list->total_pkt_count;
709 ra_list->tx_paused = tx_pause;
710 if (tx_pause)
711 priv->wmm.pkts_paused[i] +=
712 ra_list->total_pkt_count;
713 else
714 priv->wmm.pkts_paused[i] -=
715 ra_list->total_pkt_count;
716 }
717 }
718 }
719
720 if (pkt_cnt) {
721 tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
722 if (tx_pause)
723 tx_pkts_queued -= pkt_cnt;
724 else
725 tx_pkts_queued += pkt_cnt;
726
727 atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
728 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
729 }
730 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
731 }
732
733 /*
734 * This function retrieves an RA list node for a given TID and
735 * RA address pair.
736 *
737 * If no such node is found, a new node is added first and then
738 * retrieved.
739 */
740 struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private * priv,u8 tid,const u8 * ra_addr)741 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
742 const u8 *ra_addr)
743 {
744 struct mwifiex_ra_list_tbl *ra_list;
745
746 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
747 if (ra_list)
748 return ra_list;
749 mwifiex_ralist_add(priv, ra_addr);
750
751 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
752 }
753
754 /*
755 * This function deletes RA list nodes for given mac for all TIDs.
756 * Function also decrements TX pending count accordingly.
757 */
758 void
mwifiex_wmm_del_peer_ra_list(struct mwifiex_private * priv,const u8 * ra_addr)759 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
760 {
761 struct mwifiex_ra_list_tbl *ra_list;
762 int i;
763
764 spin_lock_bh(&priv->wmm.ra_list_spinlock);
765
766 for (i = 0; i < MAX_NUM_TID; ++i) {
767 ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
768
769 if (!ra_list)
770 continue;
771 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
772 if (ra_list->tx_paused)
773 priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
774 else
775 atomic_sub(ra_list->total_pkt_count,
776 &priv->wmm.tx_pkts_queued);
777 list_del(&ra_list->list);
778 kfree(ra_list);
779 }
780 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
781 }
782
783 /*
784 * This function checks if a particular RA list node exists in a given TID
785 * table index.
786 */
787 int
mwifiex_is_ralist_valid(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra_list,int ptr_index)788 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
789 struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
790 {
791 struct mwifiex_ra_list_tbl *rlist;
792
793 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
794 list) {
795 if (rlist == ra_list)
796 return true;
797 }
798
799 return false;
800 }
801
802 /*
803 * This function adds a packet to bypass TX queue.
804 * This is special TX queue for packets which can be sent even when port_open
805 * is false.
806 */
807 void
mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private * priv,struct sk_buff * skb)808 mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
809 struct sk_buff *skb)
810 {
811 skb_queue_tail(&priv->bypass_txq, skb);
812 }
813
814 /*
815 * This function adds a packet to WMM queue.
816 *
817 * In disconnected state the packet is immediately dropped and the
818 * packet send completion callback is called with status failure.
819 *
820 * Otherwise, the correct RA list node is located and the packet
821 * is queued at the list tail.
822 */
823 void
mwifiex_wmm_add_buf_txqueue(struct mwifiex_private * priv,struct sk_buff * skb)824 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
825 struct sk_buff *skb)
826 {
827 struct mwifiex_adapter *adapter = priv->adapter;
828 u32 tid;
829 struct mwifiex_ra_list_tbl *ra_list;
830 u8 ra[ETH_ALEN], tid_down;
831 struct list_head list_head;
832 int tdls_status = TDLS_NOT_SETUP;
833 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
834 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
835
836 memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
837
838 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
839 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
840 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
841 mwifiex_dbg(adapter, DATA,
842 "TDLS setup packet for %pM.\t"
843 "Don't block\n", ra);
844 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
845 tdls_status = mwifiex_get_tdls_link_status(priv, ra);
846 }
847
848 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
849 mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
850 mwifiex_write_data_complete(adapter, skb, 0, -1);
851 return;
852 }
853
854 tid = skb->priority;
855
856 spin_lock_bh(&priv->wmm.ra_list_spinlock);
857
858 tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
859
860 /* In case of infra as we have already created the list during
861 association we just don't have to call get_queue_raptr, we will
862 have only 1 raptr for a tid in case of infra */
863 if (!mwifiex_queuing_ra_based(priv) &&
864 !mwifiex_is_skb_mgmt_frame(skb)) {
865 switch (tdls_status) {
866 case TDLS_SETUP_COMPLETE:
867 case TDLS_CHAN_SWITCHING:
868 case TDLS_IN_BASE_CHAN:
869 case TDLS_IN_OFF_CHAN:
870 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
871 ra);
872 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
873 break;
874 case TDLS_SETUP_INPROGRESS:
875 skb_queue_tail(&priv->tdls_txq, skb);
876 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
877 return;
878 default:
879 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
880 ra_list = list_first_entry_or_null(&list_head,
881 struct mwifiex_ra_list_tbl, list);
882 break;
883 }
884 } else {
885 memcpy(ra, skb->data, ETH_ALEN);
886 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
887 eth_broadcast_addr(ra);
888 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
889 }
890
891 if (!ra_list) {
892 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
893 mwifiex_write_data_complete(adapter, skb, 0, -1);
894 return;
895 }
896
897 skb_queue_tail(&ra_list->skb_head, skb);
898
899 ra_list->ba_pkt_count++;
900 ra_list->total_pkt_count++;
901
902 if (atomic_read(&priv->wmm.highest_queued_prio) <
903 priv->tos_to_tid_inv[tid_down])
904 atomic_set(&priv->wmm.highest_queued_prio,
905 priv->tos_to_tid_inv[tid_down]);
906
907 if (ra_list->tx_paused)
908 priv->wmm.pkts_paused[tid_down]++;
909 else
910 atomic_inc(&priv->wmm.tx_pkts_queued);
911
912 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
913 }
914
915 /*
916 * This function processes the get WMM status command response from firmware.
917 *
918 * The response may contain multiple TLVs -
919 * - AC Queue status TLVs
920 * - Current WMM Parameter IE TLV
921 * - Admission Control action frame TLVs
922 *
923 * This function parses the TLVs and then calls further specific functions
924 * to process any changes in the queue prioritize or state.
925 */
mwifiex_ret_wmm_get_status(struct mwifiex_private * priv,const struct host_cmd_ds_command * resp)926 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
927 const struct host_cmd_ds_command *resp)
928 {
929 u8 *curr = (u8 *) &resp->params.get_wmm_status;
930 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
931 int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
932 bool valid = true;
933
934 struct mwifiex_ie_types_data *tlv_hdr;
935 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
936 struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
937 struct mwifiex_wmm_ac_status *ac_status;
938
939 mwifiex_dbg(priv->adapter, INFO,
940 "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
941 resp_len);
942
943 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
944 tlv_hdr = (struct mwifiex_ie_types_data *) curr;
945 tlv_len = le16_to_cpu(tlv_hdr->header.len);
946
947 if (resp_len < tlv_len + sizeof(tlv_hdr->header))
948 break;
949
950 switch (le16_to_cpu(tlv_hdr->header.type)) {
951 case TLV_TYPE_WMMQSTATUS:
952 tlv_wmm_qstatus =
953 (struct mwifiex_ie_types_wmm_queue_status *)
954 tlv_hdr;
955 mwifiex_dbg(priv->adapter, CMD,
956 "info: CMD_RESP: WMM_GET_STATUS:\t"
957 "QSTATUS TLV: %d, %d, %d\n",
958 tlv_wmm_qstatus->queue_index,
959 tlv_wmm_qstatus->flow_required,
960 tlv_wmm_qstatus->disabled);
961
962 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
963 queue_index];
964 ac_status->disabled = tlv_wmm_qstatus->disabled;
965 ac_status->flow_required =
966 tlv_wmm_qstatus->flow_required;
967 ac_status->flow_created = tlv_wmm_qstatus->flow_created;
968 break;
969
970 case WLAN_EID_VENDOR_SPECIFIC:
971 /*
972 * Point the regular IEEE IE 2 bytes into the Marvell IE
973 * and setup the IEEE IE type and length byte fields
974 */
975
976 wmm_param_ie =
977 (struct ieee_types_wmm_parameter *) (curr +
978 2);
979 wmm_param_ie->vend_hdr.len = (u8) tlv_len;
980 wmm_param_ie->vend_hdr.element_id =
981 WLAN_EID_VENDOR_SPECIFIC;
982
983 mwifiex_dbg(priv->adapter, CMD,
984 "info: CMD_RESP: WMM_GET_STATUS:\t"
985 "WMM Parameter Set Count: %d\n",
986 wmm_param_ie->qos_info_bitmap & mask);
987
988 if (wmm_param_ie->vend_hdr.len + 2 >
989 sizeof(struct ieee_types_wmm_parameter))
990 break;
991
992 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
993 wmm_ie, wmm_param_ie,
994 wmm_param_ie->vend_hdr.len + 2);
995
996 break;
997
998 default:
999 valid = false;
1000 break;
1001 }
1002
1003 curr += (tlv_len + sizeof(tlv_hdr->header));
1004 resp_len -= (tlv_len + sizeof(tlv_hdr->header));
1005 }
1006
1007 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
1008 mwifiex_wmm_setup_ac_downgrade(priv);
1009
1010 return 0;
1011 }
1012
1013 /*
1014 * Callback handler from the command module to allow insertion of a WMM TLV.
1015 *
1016 * If the BSS we are associating to supports WMM, this function adds the
1017 * required WMM Information IE to the association request command buffer in
1018 * the form of a Marvell extended IEEE IE.
1019 */
1020 u32
mwifiex_wmm_process_association_req(struct mwifiex_private * priv,u8 ** assoc_buf,struct ieee_types_wmm_parameter * wmm_ie,struct ieee80211_ht_cap * ht_cap)1021 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
1022 u8 **assoc_buf,
1023 struct ieee_types_wmm_parameter *wmm_ie,
1024 struct ieee80211_ht_cap *ht_cap)
1025 {
1026 struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
1027 u32 ret_len = 0;
1028
1029 /* Null checks */
1030 if (!assoc_buf)
1031 return 0;
1032 if (!(*assoc_buf))
1033 return 0;
1034
1035 if (!wmm_ie)
1036 return 0;
1037
1038 mwifiex_dbg(priv->adapter, INFO,
1039 "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
1040 wmm_ie->vend_hdr.element_id);
1041
1042 if ((priv->wmm_required ||
1043 (ht_cap && (priv->adapter->config_bands & BAND_GN ||
1044 priv->adapter->config_bands & BAND_AN))) &&
1045 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
1046 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
1047 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
1048 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
1049 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
1050 le16_to_cpu(wmm_tlv->header.len));
1051 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
1052 memcpy((u8 *) (wmm_tlv->wmm_ie
1053 + le16_to_cpu(wmm_tlv->header.len)
1054 - sizeof(priv->wmm_qosinfo)),
1055 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
1056
1057 ret_len = sizeof(wmm_tlv->header)
1058 + le16_to_cpu(wmm_tlv->header.len);
1059
1060 *assoc_buf += ret_len;
1061 }
1062
1063 return ret_len;
1064 }
1065
1066 /*
1067 * This function computes the time delay in the driver queues for a
1068 * given packet.
1069 *
1070 * When the packet is received at the OS/Driver interface, the current
1071 * time is set in the packet structure. The difference between the present
1072 * time and that received time is computed in this function and limited
1073 * based on pre-compiled limits in the driver.
1074 */
1075 u8
mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private * priv,const struct sk_buff * skb)1076 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
1077 const struct sk_buff *skb)
1078 {
1079 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
1080 u8 ret_val;
1081
1082 /*
1083 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
1084 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
1085 *
1086 * Pass max value if queue_delay is beyond the uint8 range
1087 */
1088 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
1089
1090 mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
1091 "%d ms sent to FW\n", queue_delay, ret_val);
1092
1093 return ret_val;
1094 }
1095
1096 /*
1097 * This function retrieves the highest priority RA list table pointer.
1098 */
1099 static struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter * adapter,struct mwifiex_private ** priv,int * tid)1100 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
1101 struct mwifiex_private **priv, int *tid)
1102 {
1103 struct mwifiex_private *priv_tmp;
1104 struct mwifiex_ra_list_tbl *ptr;
1105 struct mwifiex_tid_tbl *tid_ptr;
1106 atomic_t *hqp;
1107 int i, j;
1108
1109 /* check the BSS with highest priority first */
1110 for (j = adapter->priv_num - 1; j >= 0; --j) {
1111 /* iterate over BSS with the equal priority */
1112 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
1113 &adapter->bss_prio_tbl[j].bss_prio_head,
1114 list) {
1115
1116 try_again:
1117 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
1118
1119 if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
1120 !priv_tmp->port_open) ||
1121 (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
1122 continue;
1123
1124 if (adapter->if_ops.is_port_ready &&
1125 !adapter->if_ops.is_port_ready(priv_tmp))
1126 continue;
1127
1128 /* iterate over the WMM queues of the BSS */
1129 hqp = &priv_tmp->wmm.highest_queued_prio;
1130 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
1131
1132 spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
1133
1134 tid_ptr = &(priv_tmp)->wmm.
1135 tid_tbl_ptr[tos_to_tid[i]];
1136
1137 /* iterate over receiver addresses */
1138 list_for_each_entry(ptr, &tid_ptr->ra_list,
1139 list) {
1140
1141 if (!ptr->tx_paused &&
1142 !skb_queue_empty(&ptr->skb_head))
1143 /* holds both locks */
1144 goto found;
1145 }
1146
1147 spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1148 }
1149
1150 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
1151 atomic_set(&priv_tmp->wmm.highest_queued_prio,
1152 HIGH_PRIO_TID);
1153 /* Iterate current private once more, since
1154 * there still exist packets in data queue
1155 */
1156 goto try_again;
1157 } else
1158 atomic_set(&priv_tmp->wmm.highest_queued_prio,
1159 NO_PKT_PRIO_TID);
1160 }
1161 }
1162
1163 return NULL;
1164
1165 found:
1166 /* holds ra_list_spinlock */
1167 if (atomic_read(hqp) > i)
1168 atomic_set(hqp, i);
1169 spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1170
1171 *priv = priv_tmp;
1172 *tid = tos_to_tid[i];
1173
1174 return ptr;
1175 }
1176
1177 /* This functions rotates ra and bss lists so packets are picked round robin.
1178 *
1179 * After a packet is successfully transmitted, rotate the ra list, so the ra
1180 * next to the one transmitted, will come first in the list. This way we pick
1181 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1182 * priority.
1183 *
1184 * Function also increments wmm.packets_out counter.
1185 */
mwifiex_rotate_priolists(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra,int tid)1186 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1187 struct mwifiex_ra_list_tbl *ra,
1188 int tid)
1189 {
1190 struct mwifiex_adapter *adapter = priv->adapter;
1191 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1192 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1193
1194 spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1195 /*
1196 * dirty trick: we remove 'head' temporarily and reinsert it after
1197 * curr bss node. imagine list to stay fixed while head is moved
1198 */
1199 list_move(&tbl[priv->bss_priority].bss_prio_head,
1200 &tbl[priv->bss_priority].bss_prio_cur->list);
1201 spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1202
1203 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1204 if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1205 priv->wmm.packets_out[tid]++;
1206 /* same as above */
1207 list_move(&tid_ptr->ra_list, &ra->list);
1208 }
1209 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1210 }
1211
1212 /*
1213 * This function checks if 11n aggregation is possible.
1214 */
1215 static int
mwifiex_is_11n_aggragation_possible(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int max_buf_size)1216 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1217 struct mwifiex_ra_list_tbl *ptr,
1218 int max_buf_size)
1219 {
1220 int count = 0, total_size = 0;
1221 struct sk_buff *skb, *tmp;
1222 int max_amsdu_size;
1223
1224 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1225 ptr->is_11n_enabled)
1226 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1227 else
1228 max_amsdu_size = max_buf_size;
1229
1230 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1231 total_size += skb->len;
1232 if (total_size >= max_amsdu_size)
1233 break;
1234 if (++count >= MIN_NUM_AMSDU)
1235 return true;
1236 }
1237
1238 return false;
1239 }
1240
1241 /*
1242 * This function sends a single packet to firmware for transmission.
1243 */
1244 static void
mwifiex_send_single_packet(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int ptr_index)1245 mwifiex_send_single_packet(struct mwifiex_private *priv,
1246 struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1247 __releases(&priv->wmm.ra_list_spinlock)
1248 {
1249 struct sk_buff *skb, *skb_next;
1250 struct mwifiex_tx_param tx_param;
1251 struct mwifiex_adapter *adapter = priv->adapter;
1252 struct mwifiex_txinfo *tx_info;
1253
1254 if (skb_queue_empty(&ptr->skb_head)) {
1255 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1256 mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1257 return;
1258 }
1259
1260 skb = skb_dequeue(&ptr->skb_head);
1261
1262 tx_info = MWIFIEX_SKB_TXCB(skb);
1263 mwifiex_dbg(adapter, DATA,
1264 "data: dequeuing the packet %p %p\n", ptr, skb);
1265
1266 ptr->total_pkt_count--;
1267
1268 if (!skb_queue_empty(&ptr->skb_head))
1269 skb_next = skb_peek(&ptr->skb_head);
1270 else
1271 skb_next = NULL;
1272
1273 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1274
1275 tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1276 sizeof(struct txpd) : 0);
1277
1278 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1279 /* Queue the packet back at the head */
1280 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1281
1282 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1283 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1284 mwifiex_write_data_complete(adapter, skb, 0, -1);
1285 return;
1286 }
1287
1288 skb_queue_tail(&ptr->skb_head, skb);
1289
1290 ptr->total_pkt_count++;
1291 ptr->ba_pkt_count++;
1292 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1293 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1294 } else {
1295 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1296 atomic_dec(&priv->wmm.tx_pkts_queued);
1297 }
1298 }
1299
1300 /*
1301 * This function checks if the first packet in the given RA list
1302 * is already processed or not.
1303 */
1304 static int
mwifiex_is_ptr_processed(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr)1305 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1306 struct mwifiex_ra_list_tbl *ptr)
1307 {
1308 struct sk_buff *skb;
1309 struct mwifiex_txinfo *tx_info;
1310
1311 if (skb_queue_empty(&ptr->skb_head))
1312 return false;
1313
1314 skb = skb_peek(&ptr->skb_head);
1315
1316 tx_info = MWIFIEX_SKB_TXCB(skb);
1317 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1318 return true;
1319
1320 return false;
1321 }
1322
1323 /*
1324 * This function sends a single processed packet to firmware for
1325 * transmission.
1326 */
1327 static void
mwifiex_send_processed_packet(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int ptr_index)1328 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1329 struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1330 __releases(&priv->wmm.ra_list_spinlock)
1331 {
1332 struct mwifiex_tx_param tx_param;
1333 struct mwifiex_adapter *adapter = priv->adapter;
1334 int ret = -1;
1335 struct sk_buff *skb, *skb_next;
1336 struct mwifiex_txinfo *tx_info;
1337
1338 if (skb_queue_empty(&ptr->skb_head)) {
1339 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1340 return;
1341 }
1342
1343 skb = skb_dequeue(&ptr->skb_head);
1344
1345 if (adapter->data_sent || adapter->tx_lock_flag) {
1346 ptr->total_pkt_count--;
1347 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1348 skb_queue_tail(&adapter->tx_data_q, skb);
1349 atomic_dec(&priv->wmm.tx_pkts_queued);
1350 atomic_inc(&adapter->tx_queued);
1351 return;
1352 }
1353
1354 if (!skb_queue_empty(&ptr->skb_head))
1355 skb_next = skb_peek(&ptr->skb_head);
1356 else
1357 skb_next = NULL;
1358
1359 tx_info = MWIFIEX_SKB_TXCB(skb);
1360
1361 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1362
1363 tx_param.next_pkt_len =
1364 ((skb_next) ? skb_next->len +
1365 sizeof(struct txpd) : 0);
1366 if (adapter->iface_type == MWIFIEX_USB) {
1367 ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
1368 skb, &tx_param);
1369 } else {
1370 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1371 skb, &tx_param);
1372 }
1373
1374 switch (ret) {
1375 case -EBUSY:
1376 mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1377 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1378
1379 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1380 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1381 mwifiex_write_data_complete(adapter, skb, 0, -1);
1382 return;
1383 }
1384
1385 skb_queue_tail(&ptr->skb_head, skb);
1386
1387 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1388 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1389 break;
1390 case -1:
1391 mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1392 adapter->dbg.num_tx_host_to_card_failure++;
1393 mwifiex_write_data_complete(adapter, skb, 0, ret);
1394 break;
1395 case -EINPROGRESS:
1396 break;
1397 case 0:
1398 mwifiex_write_data_complete(adapter, skb, 0, ret);
1399 default:
1400 break;
1401 }
1402 if (ret != -EBUSY) {
1403 mwifiex_rotate_priolists(priv, ptr, ptr_index);
1404 atomic_dec(&priv->wmm.tx_pkts_queued);
1405 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1406 ptr->total_pkt_count--;
1407 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1408 }
1409 }
1410
1411 /*
1412 * This function dequeues a packet from the highest priority list
1413 * and transmits it.
1414 */
1415 static int
mwifiex_dequeue_tx_packet(struct mwifiex_adapter * adapter)1416 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1417 {
1418 struct mwifiex_ra_list_tbl *ptr;
1419 struct mwifiex_private *priv = NULL;
1420 int ptr_index = 0;
1421 u8 ra[ETH_ALEN];
1422 int tid_del = 0, tid = 0;
1423
1424 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1425 if (!ptr)
1426 return -1;
1427
1428 tid = mwifiex_get_tid(ptr);
1429
1430 mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1431
1432 spin_lock_bh(&priv->wmm.ra_list_spinlock);
1433 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1434 spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1435 return -1;
1436 }
1437
1438 if (mwifiex_is_ptr_processed(priv, ptr)) {
1439 mwifiex_send_processed_packet(priv, ptr, ptr_index);
1440 /* ra_list_spinlock has been freed in
1441 mwifiex_send_processed_packet() */
1442 return 0;
1443 }
1444
1445 if (!ptr->is_11n_enabled ||
1446 ptr->ba_status ||
1447 priv->wps.session_enable) {
1448 if (ptr->is_11n_enabled &&
1449 ptr->ba_status &&
1450 ptr->amsdu_in_ampdu &&
1451 mwifiex_is_amsdu_allowed(priv, tid) &&
1452 mwifiex_is_11n_aggragation_possible(priv, ptr,
1453 adapter->tx_buf_size))
1454 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1455 /* ra_list_spinlock has been freed in
1456 * mwifiex_11n_aggregate_pkt()
1457 */
1458 else
1459 mwifiex_send_single_packet(priv, ptr, ptr_index);
1460 /* ra_list_spinlock has been freed in
1461 * mwifiex_send_single_packet()
1462 */
1463 } else {
1464 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1465 ptr->ba_pkt_count > ptr->ba_packet_thr) {
1466 if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1467 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1468 BA_SETUP_INPROGRESS);
1469 mwifiex_send_addba(priv, tid, ptr->ra);
1470 } else if (mwifiex_find_stream_to_delete
1471 (priv, tid, &tid_del, ra)) {
1472 mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1473 BA_SETUP_INPROGRESS);
1474 mwifiex_send_delba(priv, tid_del, ra, 1);
1475 }
1476 }
1477 if (mwifiex_is_amsdu_allowed(priv, tid) &&
1478 mwifiex_is_11n_aggragation_possible(priv, ptr,
1479 adapter->tx_buf_size))
1480 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1481 /* ra_list_spinlock has been freed in
1482 mwifiex_11n_aggregate_pkt() */
1483 else
1484 mwifiex_send_single_packet(priv, ptr, ptr_index);
1485 /* ra_list_spinlock has been freed in
1486 mwifiex_send_single_packet() */
1487 }
1488 return 0;
1489 }
1490
mwifiex_process_bypass_tx(struct mwifiex_adapter * adapter)1491 void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
1492 {
1493 struct mwifiex_tx_param tx_param;
1494 struct sk_buff *skb;
1495 struct mwifiex_txinfo *tx_info;
1496 struct mwifiex_private *priv;
1497 int i;
1498
1499 if (adapter->data_sent || adapter->tx_lock_flag)
1500 return;
1501
1502 for (i = 0; i < adapter->priv_num; ++i) {
1503 priv = adapter->priv[i];
1504
1505 if (!priv)
1506 continue;
1507
1508 if (adapter->if_ops.is_port_ready &&
1509 !adapter->if_ops.is_port_ready(priv))
1510 continue;
1511
1512 if (skb_queue_empty(&priv->bypass_txq))
1513 continue;
1514
1515 skb = skb_dequeue(&priv->bypass_txq);
1516 tx_info = MWIFIEX_SKB_TXCB(skb);
1517
1518 /* no aggregation for bypass packets */
1519 tx_param.next_pkt_len = 0;
1520
1521 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1522 skb_queue_head(&priv->bypass_txq, skb);
1523 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1524 } else {
1525 atomic_dec(&adapter->bypass_tx_pending);
1526 }
1527 }
1528 }
1529
1530 /*
1531 * This function transmits the highest priority packet awaiting in the
1532 * WMM Queues.
1533 */
1534 void
mwifiex_wmm_process_tx(struct mwifiex_adapter * adapter)1535 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1536 {
1537 do {
1538 if (mwifiex_dequeue_tx_packet(adapter))
1539 break;
1540 if (adapter->iface_type != MWIFIEX_SDIO) {
1541 if (adapter->data_sent ||
1542 adapter->tx_lock_flag)
1543 break;
1544 } else {
1545 if (atomic_read(&adapter->tx_queued) >=
1546 MWIFIEX_MAX_PKTS_TXQ)
1547 break;
1548 }
1549 } while (!mwifiex_wmm_lists_empty(adapter));
1550 }
1551