• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3  *
4  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
5  * Copyright (C) 2019 Intel Corporation
6  * Copyright (C) 2023 Intel Corporation
7  *****************************************************************************/
8 
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/ieee80211.h>
13 #include "iwl-io.h"
14 #include "iwl-trans.h"
15 #include "iwl-agn-hw.h"
16 #include "dev.h"
17 #include "agn.h"
18 
19 static const u8 tid_to_ac[] = {
20 	IEEE80211_AC_BE,
21 	IEEE80211_AC_BK,
22 	IEEE80211_AC_BK,
23 	IEEE80211_AC_BE,
24 	IEEE80211_AC_VI,
25 	IEEE80211_AC_VI,
26 	IEEE80211_AC_VO,
27 	IEEE80211_AC_VO,
28 };
29 
iwlagn_tx_cmd_protection(struct iwl_priv * priv,struct ieee80211_tx_info * info,__le16 fc,__le32 * tx_flags)30 static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
31 				     struct ieee80211_tx_info *info,
32 				     __le16 fc, __le32 *tx_flags)
33 {
34 	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
35 	    info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
36 	    info->flags & IEEE80211_TX_CTL_AMPDU)
37 		*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
38 }
39 
40 /*
41  * handle build REPLY_TX command notification.
42  */
iwlagn_tx_cmd_build_basic(struct iwl_priv * priv,struct sk_buff * skb,struct iwl_tx_cmd * tx_cmd,struct ieee80211_tx_info * info,struct ieee80211_hdr * hdr,u8 sta_id)43 static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
44 				      struct sk_buff *skb,
45 				      struct iwl_tx_cmd *tx_cmd,
46 				      struct ieee80211_tx_info *info,
47 				      struct ieee80211_hdr *hdr, u8 sta_id)
48 {
49 	__le16 fc = hdr->frame_control;
50 	__le32 tx_flags = tx_cmd->tx_flags;
51 
52 	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
53 
54 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
55 		tx_flags |= TX_CMD_FLG_ACK_MSK;
56 	else
57 		tx_flags &= ~TX_CMD_FLG_ACK_MSK;
58 
59 	if (ieee80211_is_probe_resp(fc))
60 		tx_flags |= TX_CMD_FLG_TSF_MSK;
61 	else if (ieee80211_is_back_req(fc))
62 		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
63 	else if (info->band == NL80211_BAND_2GHZ &&
64 		 priv->lib->bt_params &&
65 		 priv->lib->bt_params->advanced_bt_coexist &&
66 		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
67 		 ieee80211_is_reassoc_req(fc) ||
68 		 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
69 		tx_flags |= TX_CMD_FLG_IGNORE_BT;
70 
71 
72 	tx_cmd->sta_id = sta_id;
73 	if (ieee80211_has_morefrags(fc))
74 		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
75 
76 	if (ieee80211_is_data_qos(fc)) {
77 		u8 *qc = ieee80211_get_qos_ctl(hdr);
78 		tx_cmd->tid_tspec = qc[0] & 0xf;
79 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
80 	} else {
81 		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
82 		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
83 			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
84 		else
85 			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
86 	}
87 
88 	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
89 
90 	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
91 	if (ieee80211_is_mgmt(fc)) {
92 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
93 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
94 		else
95 			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
96 	} else {
97 		tx_cmd->timeout.pm_frame_timeout = 0;
98 	}
99 
100 	tx_cmd->driver_txop = 0;
101 	tx_cmd->tx_flags = tx_flags;
102 	tx_cmd->next_frame_len = 0;
103 }
104 
iwlagn_tx_cmd_build_rate(struct iwl_priv * priv,struct iwl_tx_cmd * tx_cmd,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)105 static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
106 				     struct iwl_tx_cmd *tx_cmd,
107 				     struct ieee80211_tx_info *info,
108 				     struct ieee80211_sta *sta,
109 				     __le16 fc)
110 {
111 	u32 rate_flags;
112 	int rate_idx;
113 	u8 rts_retry_limit;
114 	u8 data_retry_limit;
115 	u8 rate_plcp;
116 
117 	if (priv->wowlan) {
118 		rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
119 		data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
120 	} else {
121 		/* Set retry limit on RTS packets */
122 		rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
123 
124 		/* Set retry limit on DATA packets and Probe Responses*/
125 		if (ieee80211_is_probe_resp(fc)) {
126 			data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
127 			rts_retry_limit =
128 				min(data_retry_limit, rts_retry_limit);
129 		} else if (ieee80211_is_back_req(fc))
130 			data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
131 		else
132 			data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
133 	}
134 
135 	tx_cmd->data_retry_limit = data_retry_limit;
136 	tx_cmd->rts_retry_limit = rts_retry_limit;
137 
138 	/* DATA packets will use the uCode station table for rate/antenna
139 	 * selection */
140 	if (ieee80211_is_data(fc)) {
141 		tx_cmd->initial_rate_index = 0;
142 		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
143 		return;
144 	} else if (ieee80211_is_back_req(fc))
145 		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
146 
147 	/**
148 	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
149 	 * not really a TX rate.  Thus, we use the lowest supported rate for
150 	 * this band.  Also use the lowest supported rate if the stored rate
151 	 * index is invalid.
152 	 */
153 	rate_idx = info->control.rates[0].idx;
154 	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
155 			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
156 		rate_idx = rate_lowest_index(
157 				&priv->nvm_data->bands[info->band], sta);
158 	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
159 	if (info->band == NL80211_BAND_5GHZ)
160 		rate_idx += IWL_FIRST_OFDM_RATE;
161 	/* Get PLCP rate for tx_cmd->rate_n_flags */
162 	rate_plcp = iwl_rates[rate_idx].plcp;
163 	/* Zero out flags for this packet */
164 	rate_flags = 0;
165 
166 	/* Set CCK flag as needed */
167 	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
168 		rate_flags |= RATE_MCS_CCK_MSK;
169 
170 	/* Set up antennas */
171 	if (priv->lib->bt_params &&
172 	    priv->lib->bt_params->advanced_bt_coexist &&
173 	    priv->bt_full_concurrent) {
174 		/* operated as 1x1 in full concurrency mode */
175 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
176 				first_antenna(priv->nvm_data->valid_tx_ant));
177 	} else
178 		priv->mgmt_tx_ant = iwl_toggle_tx_ant(
179 					priv, priv->mgmt_tx_ant,
180 					priv->nvm_data->valid_tx_ant);
181 	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
182 
183 	/* Set the rate in the TX cmd */
184 	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
185 }
186 
iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv * priv,struct ieee80211_tx_info * info,struct iwl_tx_cmd * tx_cmd,struct sk_buff * skb_frag)187 static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
188 					 struct ieee80211_tx_info *info,
189 					 struct iwl_tx_cmd *tx_cmd,
190 					 struct sk_buff *skb_frag)
191 {
192 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
193 
194 	switch (keyconf->cipher) {
195 	case WLAN_CIPHER_SUITE_CCMP:
196 		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
197 		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
198 		if (info->flags & IEEE80211_TX_CTL_AMPDU)
199 			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
200 		break;
201 
202 	case WLAN_CIPHER_SUITE_TKIP:
203 		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
204 		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
205 		break;
206 
207 	case WLAN_CIPHER_SUITE_WEP104:
208 		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
209 		fallthrough;
210 	case WLAN_CIPHER_SUITE_WEP40:
211 		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
212 			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
213 
214 		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
215 
216 		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
217 			     "with key %d\n", keyconf->keyidx);
218 		break;
219 
220 	default:
221 		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
222 		break;
223 	}
224 }
225 
226 /**
227  * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
228  * @context: the current context
229  * @sta: mac80211 station
230  *
231  * In certain circumstances mac80211 passes a station pointer
232  * that may be %NULL, for example during TX or key setup. In
233  * that case, we need to use the broadcast station, so this
234  * inline wraps that pattern.
235  */
iwl_sta_id_or_broadcast(struct iwl_rxon_context * context,struct ieee80211_sta * sta)236 static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
237 				   struct ieee80211_sta *sta)
238 {
239 	int sta_id;
240 
241 	if (!sta)
242 		return context->bcast_sta_id;
243 
244 	sta_id = iwl_sta_id(sta);
245 
246 	/*
247 	 * mac80211 should not be passing a partially
248 	 * initialised station!
249 	 */
250 	WARN_ON(sta_id == IWL_INVALID_STATION);
251 
252 	return sta_id;
253 }
254 
255 /*
256  * start REPLY_TX command process
257  */
iwlagn_tx_skb(struct iwl_priv * priv,struct ieee80211_sta * sta,struct sk_buff * skb)258 int iwlagn_tx_skb(struct iwl_priv *priv,
259 		  struct ieee80211_sta *sta,
260 		  struct sk_buff *skb)
261 {
262 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
263 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
264 	struct iwl_station_priv *sta_priv = NULL;
265 	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
266 	struct iwl_device_tx_cmd *dev_cmd;
267 	struct iwl_tx_cmd *tx_cmd;
268 	__le16 fc;
269 	u8 hdr_len;
270 	u16 len, seq_number = 0;
271 	u8 sta_id, tid = IWL_MAX_TID_COUNT;
272 	bool is_agg = false, is_data_qos = false;
273 	int txq_id;
274 
275 	if (info->control.vif)
276 		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
277 
278 	if (iwl_is_rfkill(priv)) {
279 		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
280 		goto drop_unlock_priv;
281 	}
282 
283 	fc = hdr->frame_control;
284 
285 #ifdef CONFIG_IWLWIFI_DEBUG
286 	if (ieee80211_is_auth(fc))
287 		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
288 	else if (ieee80211_is_assoc_req(fc))
289 		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
290 	else if (ieee80211_is_reassoc_req(fc))
291 		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
292 #endif
293 
294 	if (unlikely(ieee80211_is_probe_resp(fc))) {
295 		struct iwl_wipan_noa_data *noa_data =
296 			rcu_dereference(priv->noa_data);
297 
298 		if (noa_data &&
299 		    pskb_expand_head(skb, 0, noa_data->length,
300 				     GFP_ATOMIC) == 0) {
301 			skb_put_data(skb, noa_data->data, noa_data->length);
302 			hdr = (struct ieee80211_hdr *)skb->data;
303 		}
304 	}
305 
306 	hdr_len = ieee80211_hdrlen(fc);
307 
308 	/* For management frames use broadcast id to do not break aggregation */
309 	if (!ieee80211_is_data(fc))
310 		sta_id = ctx->bcast_sta_id;
311 	else {
312 		/* Find index into station table for destination station */
313 		sta_id = iwl_sta_id_or_broadcast(ctx, sta);
314 		if (sta_id == IWL_INVALID_STATION) {
315 			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
316 				       hdr->addr1);
317 			goto drop_unlock_priv;
318 		}
319 	}
320 
321 	if (sta)
322 		sta_priv = (void *)sta->drv_priv;
323 
324 	if (sta_priv && sta_priv->asleep &&
325 	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
326 		/*
327 		 * This sends an asynchronous command to the device,
328 		 * but we can rely on it being processed before the
329 		 * next frame is processed -- and the next frame to
330 		 * this station is the one that will consume this
331 		 * counter.
332 		 * For now set the counter to just 1 since we do not
333 		 * support uAPSD yet.
334 		 *
335 		 * FIXME: If we get two non-bufferable frames one
336 		 * after the other, we might only send out one of
337 		 * them because this is racy.
338 		 */
339 		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
340 	}
341 
342 	dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
343 
344 	if (unlikely(!dev_cmd))
345 		goto drop_unlock_priv;
346 
347 	dev_cmd->hdr.cmd = REPLY_TX;
348 	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
349 
350 	/* Total # bytes to be transmitted */
351 	len = (u16)skb->len;
352 	tx_cmd->len = cpu_to_le16(len);
353 
354 	if (info->control.hw_key)
355 		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
356 
357 	/* TODO need this for burst mode later on */
358 	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
359 
360 	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
361 
362 	memset(&info->status, 0, sizeof(info->status));
363 	memset(info->driver_data, 0, sizeof(info->driver_data));
364 
365 	info->driver_data[0] = ctx;
366 	info->driver_data[1] = dev_cmd;
367 	/* From now on, we cannot access info->control */
368 
369 	spin_lock(&priv->sta_lock);
370 
371 	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
372 		u8 *qc = NULL;
373 		struct iwl_tid_data *tid_data;
374 		qc = ieee80211_get_qos_ctl(hdr);
375 		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
376 		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
377 			goto drop_unlock_sta;
378 		tid_data = &priv->tid_data[sta_id][tid];
379 
380 		/* aggregation is on for this <sta,tid> */
381 		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
382 		    tid_data->agg.state != IWL_AGG_ON) {
383 			IWL_ERR(priv,
384 				"TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
385 				info->flags, tid_data->agg.state);
386 			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
387 				sta_id, tid,
388 				IEEE80211_SEQ_TO_SN(tid_data->seq_number));
389 			goto drop_unlock_sta;
390 		}
391 
392 		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
393 		 * only. Check this here.
394 		 */
395 		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
396 			      tid_data->agg.state != IWL_AGG_OFF,
397 			      "Tx while agg.state = %d\n", tid_data->agg.state))
398 			goto drop_unlock_sta;
399 
400 		seq_number = tid_data->seq_number;
401 		seq_number &= IEEE80211_SCTL_SEQ;
402 		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
403 		hdr->seq_ctrl |= cpu_to_le16(seq_number);
404 		seq_number += 0x10;
405 
406 		if (info->flags & IEEE80211_TX_CTL_AMPDU)
407 			is_agg = true;
408 		is_data_qos = true;
409 	}
410 
411 	/* Copy MAC header from skb into command buffer */
412 	memcpy(tx_cmd->hdr, hdr, hdr_len);
413 
414 	txq_id = info->hw_queue;
415 
416 	if (is_agg)
417 		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
418 	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
419 		/*
420 		 * The microcode will clear the more data
421 		 * bit in the last frame it transmits.
422 		 */
423 		hdr->frame_control |=
424 			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
425 	}
426 
427 	WARN_ON_ONCE(is_agg &&
428 		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
429 
430 	IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
431 		     txq_id, seq_number);
432 
433 	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
434 		goto drop_unlock_sta;
435 
436 	if (is_data_qos && !ieee80211_has_morefrags(fc))
437 		priv->tid_data[sta_id][tid].seq_number = seq_number;
438 
439 	spin_unlock(&priv->sta_lock);
440 
441 	/*
442 	 * Avoid atomic ops if it isn't an associated client.
443 	 * Also, if this is a packet for aggregation, don't
444 	 * increase the counter because the ucode will stop
445 	 * aggregation queues when their respective station
446 	 * goes to sleep.
447 	 */
448 	if (sta_priv && sta_priv->client && !is_agg)
449 		atomic_inc(&sta_priv->pending_frames);
450 
451 	return 0;
452 
453 drop_unlock_sta:
454 	if (dev_cmd)
455 		iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
456 	spin_unlock(&priv->sta_lock);
457 drop_unlock_priv:
458 	return -1;
459 }
460 
iwlagn_alloc_agg_txq(struct iwl_priv * priv,int mq)461 static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
462 {
463 	int q;
464 
465 	for (q = IWLAGN_FIRST_AMPDU_QUEUE;
466 	     q < priv->trans->trans_cfg->base_params->num_of_queues; q++) {
467 		if (!test_and_set_bit(q, priv->agg_q_alloc)) {
468 			priv->queue_to_mac80211[q] = mq;
469 			return q;
470 		}
471 	}
472 
473 	return -ENOSPC;
474 }
475 
iwlagn_dealloc_agg_txq(struct iwl_priv * priv,int q)476 static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
477 {
478 	clear_bit(q, priv->agg_q_alloc);
479 	priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
480 }
481 
iwlagn_tx_agg_stop(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid)482 int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
483 			struct ieee80211_sta *sta, u16 tid)
484 {
485 	struct iwl_tid_data *tid_data;
486 	int sta_id, txq_id;
487 	enum iwl_agg_state agg_state;
488 
489 	sta_id = iwl_sta_id(sta);
490 
491 	if (sta_id == IWL_INVALID_STATION) {
492 		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
493 		return -ENXIO;
494 	}
495 
496 	spin_lock_bh(&priv->sta_lock);
497 
498 	tid_data = &priv->tid_data[sta_id][tid];
499 	txq_id = tid_data->agg.txq_id;
500 
501 	switch (tid_data->agg.state) {
502 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
503 		/*
504 		* This can happen if the peer stops aggregation
505 		* again before we've had a chance to drain the
506 		* queue we selected previously, i.e. before the
507 		* session was really started completely.
508 		*/
509 		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
510 		goto turn_off;
511 	case IWL_AGG_STARTING:
512 		/*
513 		 * This can happen when the session is stopped before
514 		 * we receive ADDBA response
515 		 */
516 		IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
517 		goto turn_off;
518 	case IWL_AGG_ON:
519 		break;
520 	default:
521 		IWL_WARN(priv,
522 			 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
523 			 sta_id, tid, tid_data->agg.state);
524 		spin_unlock_bh(&priv->sta_lock);
525 		return 0;
526 	}
527 
528 	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
529 
530 	/* There are still packets for this RA / TID in the HW */
531 	if (!test_bit(txq_id, priv->agg_q_alloc)) {
532 		IWL_DEBUG_TX_QUEUES(priv,
533 			"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
534 			sta_id, tid, txq_id);
535 	} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
536 		IWL_DEBUG_TX_QUEUES(priv,
537 				    "Can't proceed: ssn %d, next_recl = %d\n",
538 				    tid_data->agg.ssn,
539 				    tid_data->next_reclaimed);
540 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
541 		spin_unlock_bh(&priv->sta_lock);
542 		return 0;
543 	}
544 
545 	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
546 			    tid_data->agg.ssn);
547 turn_off:
548 	agg_state = tid_data->agg.state;
549 	tid_data->agg.state = IWL_AGG_OFF;
550 
551 	spin_unlock_bh(&priv->sta_lock);
552 
553 	if (test_bit(txq_id, priv->agg_q_alloc)) {
554 		/*
555 		 * If the transport didn't know that we wanted to start
556 		 * agreggation, don't tell it that we want to stop them.
557 		 * This can happen when we don't get the addBA response on
558 		 * time, or we hadn't time to drain the AC queues.
559 		 */
560 		if (agg_state == IWL_AGG_ON)
561 			iwl_trans_txq_disable(priv->trans, txq_id, true);
562 		else
563 			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
564 					    agg_state);
565 		iwlagn_dealloc_agg_txq(priv, txq_id);
566 	}
567 
568 	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
569 
570 	return 0;
571 }
572 
iwlagn_tx_agg_start(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 * ssn)573 int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
574 			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
575 {
576 	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
577 	struct iwl_tid_data *tid_data;
578 	int sta_id, txq_id, ret;
579 
580 	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
581 		     sta->addr, tid);
582 
583 	sta_id = iwl_sta_id(sta);
584 	if (sta_id == IWL_INVALID_STATION) {
585 		IWL_ERR(priv, "Start AGG on invalid station\n");
586 		return -ENXIO;
587 	}
588 	if (unlikely(tid >= IWL_MAX_TID_COUNT))
589 		return -EINVAL;
590 
591 	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
592 		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
593 		return -ENXIO;
594 	}
595 
596 	txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
597 	if (txq_id < 0) {
598 		IWL_DEBUG_TX_QUEUES(priv,
599 			"No free aggregation queue for %pM/%d\n",
600 			sta->addr, tid);
601 		return txq_id;
602 	}
603 
604 	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
605 	if (ret)
606 		return ret;
607 
608 	spin_lock_bh(&priv->sta_lock);
609 	tid_data = &priv->tid_data[sta_id][tid];
610 	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
611 	tid_data->agg.txq_id = txq_id;
612 
613 	*ssn = tid_data->agg.ssn;
614 
615 	if (*ssn == tid_data->next_reclaimed) {
616 		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
617 				    tid_data->agg.ssn);
618 		tid_data->agg.state = IWL_AGG_STARTING;
619 		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
620 	} else {
621 		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
622 				    "next_reclaimed = %d\n",
623 				    tid_data->agg.ssn,
624 				    tid_data->next_reclaimed);
625 		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
626 	}
627 	spin_unlock_bh(&priv->sta_lock);
628 
629 	return ret;
630 }
631 
iwlagn_tx_agg_flush(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid)632 int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
633 			struct ieee80211_sta *sta, u16 tid)
634 {
635 	struct iwl_tid_data *tid_data;
636 	enum iwl_agg_state agg_state;
637 	int sta_id, txq_id;
638 	sta_id = iwl_sta_id(sta);
639 
640 	/*
641 	 * First set the agg state to OFF to avoid calling
642 	 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
643 	 */
644 	spin_lock_bh(&priv->sta_lock);
645 
646 	tid_data = &priv->tid_data[sta_id][tid];
647 	txq_id = tid_data->agg.txq_id;
648 	agg_state = tid_data->agg.state;
649 	IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
650 			    sta_id, tid, txq_id, tid_data->agg.state);
651 
652 	tid_data->agg.state = IWL_AGG_OFF;
653 
654 	spin_unlock_bh(&priv->sta_lock);
655 
656 	if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
657 		IWL_ERR(priv, "Couldn't flush the AGG queue\n");
658 
659 	if (test_bit(txq_id, priv->agg_q_alloc)) {
660 		/*
661 		 * If the transport didn't know that we wanted to start
662 		 * agreggation, don't tell it that we want to stop them.
663 		 * This can happen when we don't get the addBA response on
664 		 * time, or we hadn't time to drain the AC queues.
665 		 */
666 		if (agg_state == IWL_AGG_ON)
667 			iwl_trans_txq_disable(priv->trans, txq_id, true);
668 		else
669 			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
670 					    agg_state);
671 		iwlagn_dealloc_agg_txq(priv, txq_id);
672 	}
673 
674 	return 0;
675 }
676 
iwlagn_tx_agg_oper(struct iwl_priv * priv,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u8 buf_size)677 int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
678 			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
679 {
680 	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
681 	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
682 	int q, fifo;
683 	u16 ssn;
684 
685 	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
686 
687 	spin_lock_bh(&priv->sta_lock);
688 	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
689 	q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
690 	priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
691 	spin_unlock_bh(&priv->sta_lock);
692 
693 	fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
694 
695 	iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
696 			     buf_size, ssn, 0);
697 
698 	/*
699 	 * If the limit is 0, then it wasn't initialised yet,
700 	 * use the default. We can do that since we take the
701 	 * minimum below, and we don't want to go above our
702 	 * default due to hardware restrictions.
703 	 */
704 	if (sta_priv->max_agg_bufsize == 0)
705 		sta_priv->max_agg_bufsize =
706 			LINK_QUAL_AGG_FRAME_LIMIT_DEF;
707 
708 	/*
709 	 * Even though in theory the peer could have different
710 	 * aggregation reorder buffer sizes for different sessions,
711 	 * our ucode doesn't allow for that and has a global limit
712 	 * for each station. Therefore, use the minimum of all the
713 	 * aggregation sessions and our default value.
714 	 */
715 	sta_priv->max_agg_bufsize =
716 		min(sta_priv->max_agg_bufsize, buf_size);
717 
718 	if (priv->hw_params.use_rts_for_aggregation) {
719 		/*
720 		 * switch to RTS/CTS if it is the prefer protection
721 		 * method for HT traffic
722 		 */
723 
724 		sta_priv->lq_sta.lq.general_params.flags |=
725 			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
726 	}
727 	priv->agg_tids_count++;
728 	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
729 		     priv->agg_tids_count);
730 
731 	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
732 		sta_priv->max_agg_bufsize;
733 
734 	IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
735 		 sta->addr, tid);
736 
737 	return iwl_send_lq_cmd(priv, ctx,
738 			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
739 }
740 
iwlagn_check_ratid_empty(struct iwl_priv * priv,int sta_id,u8 tid)741 static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
742 {
743 	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
744 	enum iwl_rxon_context_id ctx;
745 	struct ieee80211_vif *vif;
746 	u8 *addr;
747 
748 	lockdep_assert_held(&priv->sta_lock);
749 
750 	addr = priv->stations[sta_id].sta.sta.addr;
751 	ctx = priv->stations[sta_id].ctxid;
752 	vif = priv->contexts[ctx].vif;
753 
754 	switch (priv->tid_data[sta_id][tid].agg.state) {
755 	case IWL_EMPTYING_HW_QUEUE_DELBA:
756 		/* There are no packets for this RA / TID in the HW any more */
757 		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
758 			IWL_DEBUG_TX_QUEUES(priv,
759 				"Can continue DELBA flow ssn = next_recl = %d\n",
760 				tid_data->next_reclaimed);
761 			iwl_trans_txq_disable(priv->trans,
762 					      tid_data->agg.txq_id, true);
763 			iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
764 			tid_data->agg.state = IWL_AGG_OFF;
765 			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
766 		}
767 		break;
768 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
769 		/* There are no packets for this RA / TID in the HW any more */
770 		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
771 			IWL_DEBUG_TX_QUEUES(priv,
772 				"Can continue ADDBA flow ssn = next_recl = %d\n",
773 				tid_data->next_reclaimed);
774 			tid_data->agg.state = IWL_AGG_STARTING;
775 			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
776 		}
777 		break;
778 	default:
779 		break;
780 	}
781 }
782 
iwlagn_non_agg_tx_status(struct iwl_priv * priv,struct iwl_rxon_context * ctx,const u8 * addr1)783 static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
784 				     struct iwl_rxon_context *ctx,
785 				     const u8 *addr1)
786 {
787 	struct ieee80211_sta *sta;
788 	struct iwl_station_priv *sta_priv;
789 
790 	rcu_read_lock();
791 	sta = ieee80211_find_sta(ctx->vif, addr1);
792 	if (sta) {
793 		sta_priv = (void *)sta->drv_priv;
794 		/* avoid atomic ops if this isn't a client */
795 		if (sta_priv->client &&
796 		    atomic_dec_return(&sta_priv->pending_frames) == 0)
797 			ieee80211_sta_block_awake(priv->hw, sta, false);
798 	}
799 	rcu_read_unlock();
800 }
801 
802 /*
803  * translate ucode response to mac80211 tx status control values
804  */
iwlagn_hwrate_to_tx_control(struct iwl_priv * priv,u32 rate_n_flags,struct ieee80211_tx_info * info)805 static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
806 				  struct ieee80211_tx_info *info)
807 {
808 	struct ieee80211_tx_rate *r = &info->status.rates[0];
809 
810 	info->status.antenna =
811 		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
812 	if (rate_n_flags & RATE_MCS_HT_MSK)
813 		r->flags |= IEEE80211_TX_RC_MCS;
814 	if (rate_n_flags & RATE_MCS_GF_MSK)
815 		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
816 	if (rate_n_flags & RATE_MCS_HT40_MSK)
817 		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
818 	if (rate_n_flags & RATE_MCS_DUP_MSK)
819 		r->flags |= IEEE80211_TX_RC_DUP_DATA;
820 	if (rate_n_flags & RATE_MCS_SGI_MSK)
821 		r->flags |= IEEE80211_TX_RC_SHORT_GI;
822 	r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
823 }
824 
825 #ifdef CONFIG_IWLWIFI_DEBUG
iwl_get_tx_fail_reason(u32 status)826 const char *iwl_get_tx_fail_reason(u32 status)
827 {
828 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
829 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
830 
831 	switch (status & TX_STATUS_MSK) {
832 	case TX_STATUS_SUCCESS:
833 		return "SUCCESS";
834 	TX_STATUS_POSTPONE(DELAY);
835 	TX_STATUS_POSTPONE(FEW_BYTES);
836 	TX_STATUS_POSTPONE(BT_PRIO);
837 	TX_STATUS_POSTPONE(QUIET_PERIOD);
838 	TX_STATUS_POSTPONE(CALC_TTAK);
839 	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
840 	TX_STATUS_FAIL(SHORT_LIMIT);
841 	TX_STATUS_FAIL(LONG_LIMIT);
842 	TX_STATUS_FAIL(FIFO_UNDERRUN);
843 	TX_STATUS_FAIL(DRAIN_FLOW);
844 	TX_STATUS_FAIL(RFKILL_FLUSH);
845 	TX_STATUS_FAIL(LIFE_EXPIRE);
846 	TX_STATUS_FAIL(DEST_PS);
847 	TX_STATUS_FAIL(HOST_ABORTED);
848 	TX_STATUS_FAIL(BT_RETRY);
849 	TX_STATUS_FAIL(STA_INVALID);
850 	TX_STATUS_FAIL(FRAG_DROPPED);
851 	TX_STATUS_FAIL(TID_DISABLE);
852 	TX_STATUS_FAIL(FIFO_FLUSHED);
853 	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
854 	TX_STATUS_FAIL(PASSIVE_NO_RX);
855 	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
856 	}
857 
858 	return "UNKNOWN";
859 
860 #undef TX_STATUS_FAIL
861 #undef TX_STATUS_POSTPONE
862 }
863 #endif /* CONFIG_IWLWIFI_DEBUG */
864 
iwlagn_count_agg_tx_err_status(struct iwl_priv * priv,u16 status)865 static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
866 {
867 	status &= AGG_TX_STATUS_MSK;
868 
869 	switch (status) {
870 	case AGG_TX_STATE_UNDERRUN_MSK:
871 		priv->reply_agg_tx_stats.underrun++;
872 		break;
873 	case AGG_TX_STATE_BT_PRIO_MSK:
874 		priv->reply_agg_tx_stats.bt_prio++;
875 		break;
876 	case AGG_TX_STATE_FEW_BYTES_MSK:
877 		priv->reply_agg_tx_stats.few_bytes++;
878 		break;
879 	case AGG_TX_STATE_ABORT_MSK:
880 		priv->reply_agg_tx_stats.abort++;
881 		break;
882 	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
883 		priv->reply_agg_tx_stats.last_sent_ttl++;
884 		break;
885 	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
886 		priv->reply_agg_tx_stats.last_sent_try++;
887 		break;
888 	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
889 		priv->reply_agg_tx_stats.last_sent_bt_kill++;
890 		break;
891 	case AGG_TX_STATE_SCD_QUERY_MSK:
892 		priv->reply_agg_tx_stats.scd_query++;
893 		break;
894 	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
895 		priv->reply_agg_tx_stats.bad_crc32++;
896 		break;
897 	case AGG_TX_STATE_RESPONSE_MSK:
898 		priv->reply_agg_tx_stats.response++;
899 		break;
900 	case AGG_TX_STATE_DUMP_TX_MSK:
901 		priv->reply_agg_tx_stats.dump_tx++;
902 		break;
903 	case AGG_TX_STATE_DELAY_TX_MSK:
904 		priv->reply_agg_tx_stats.delay_tx++;
905 		break;
906 	default:
907 		priv->reply_agg_tx_stats.unknown++;
908 		break;
909 	}
910 }
911 
iwlagn_get_scd_ssn(struct iwlagn_tx_resp * tx_resp)912 static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
913 {
914 	return le32_to_cpup((__le32 *)&tx_resp->status +
915 			    tx_resp->frame_count) & IEEE80211_MAX_SN;
916 }
917 
iwl_rx_reply_tx_agg(struct iwl_priv * priv,struct iwlagn_tx_resp * tx_resp)918 static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
919 				struct iwlagn_tx_resp *tx_resp)
920 {
921 	struct agg_tx_status *frame_status = &tx_resp->status;
922 	int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
923 		IWLAGN_TX_RES_TID_POS;
924 	int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
925 		IWLAGN_TX_RES_RA_POS;
926 	struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
927 	u32 status = le16_to_cpu(tx_resp->status.status);
928 	int i;
929 
930 	WARN_ON(tid == IWL_TID_NON_QOS);
931 
932 	if (agg->wait_for_ba)
933 		IWL_DEBUG_TX_REPLY(priv,
934 			"got tx response w/o block-ack\n");
935 
936 	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
937 	agg->wait_for_ba = (tx_resp->frame_count > 1);
938 
939 	/*
940 	 * If the BT kill count is non-zero, we'll get this
941 	 * notification again.
942 	 */
943 	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
944 	    priv->lib->bt_params &&
945 	    priv->lib->bt_params->advanced_bt_coexist) {
946 		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
947 	}
948 
949 	if (tx_resp->frame_count == 1)
950 		return;
951 
952 	IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
953 			   agg->txq_id,
954 			   le32_to_cpu(tx_resp->rate_n_flags),
955 			   iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
956 
957 	/* Construct bit-map of pending frames within Tx window */
958 	for (i = 0; i < tx_resp->frame_count; i++) {
959 		u16 fstatus = le16_to_cpu(frame_status[i].status);
960 		u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
961 
962 		if (status & AGG_TX_STATUS_MSK)
963 			iwlagn_count_agg_tx_err_status(priv, fstatus);
964 
965 		if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
966 			      AGG_TX_STATE_ABORT_MSK))
967 			continue;
968 
969 		if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
970 			IWL_DEBUG_TX_REPLY(priv,
971 					   "%d: status %s (0x%04x), try-count (0x%01x)\n",
972 					   i,
973 					   iwl_get_agg_tx_fail_reason(fstatus),
974 					   fstatus & AGG_TX_STATUS_MSK,
975 					   retry_cnt);
976 	}
977 }
978 
979 #ifdef CONFIG_IWLWIFI_DEBUG
980 #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
981 
iwl_get_agg_tx_fail_reason(u16 status)982 const char *iwl_get_agg_tx_fail_reason(u16 status)
983 {
984 	status &= AGG_TX_STATUS_MSK;
985 	switch (status) {
986 	case AGG_TX_STATE_TRANSMITTED:
987 		return "SUCCESS";
988 		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
989 		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
990 		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
991 		AGG_TX_STATE_FAIL(ABORT_MSK);
992 		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
993 		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
994 		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
995 		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
996 		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
997 		AGG_TX_STATE_FAIL(RESPONSE_MSK);
998 		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
999 		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
1000 	}
1001 
1002 	return "UNKNOWN";
1003 }
1004 #endif /* CONFIG_IWLWIFI_DEBUG */
1005 
iwlagn_count_tx_err_status(struct iwl_priv * priv,u16 status)1006 static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
1007 {
1008 	status &= TX_STATUS_MSK;
1009 
1010 	switch (status) {
1011 	case TX_STATUS_POSTPONE_DELAY:
1012 		priv->reply_tx_stats.pp_delay++;
1013 		break;
1014 	case TX_STATUS_POSTPONE_FEW_BYTES:
1015 		priv->reply_tx_stats.pp_few_bytes++;
1016 		break;
1017 	case TX_STATUS_POSTPONE_BT_PRIO:
1018 		priv->reply_tx_stats.pp_bt_prio++;
1019 		break;
1020 	case TX_STATUS_POSTPONE_QUIET_PERIOD:
1021 		priv->reply_tx_stats.pp_quiet_period++;
1022 		break;
1023 	case TX_STATUS_POSTPONE_CALC_TTAK:
1024 		priv->reply_tx_stats.pp_calc_ttak++;
1025 		break;
1026 	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
1027 		priv->reply_tx_stats.int_crossed_retry++;
1028 		break;
1029 	case TX_STATUS_FAIL_SHORT_LIMIT:
1030 		priv->reply_tx_stats.short_limit++;
1031 		break;
1032 	case TX_STATUS_FAIL_LONG_LIMIT:
1033 		priv->reply_tx_stats.long_limit++;
1034 		break;
1035 	case TX_STATUS_FAIL_FIFO_UNDERRUN:
1036 		priv->reply_tx_stats.fifo_underrun++;
1037 		break;
1038 	case TX_STATUS_FAIL_DRAIN_FLOW:
1039 		priv->reply_tx_stats.drain_flow++;
1040 		break;
1041 	case TX_STATUS_FAIL_RFKILL_FLUSH:
1042 		priv->reply_tx_stats.rfkill_flush++;
1043 		break;
1044 	case TX_STATUS_FAIL_LIFE_EXPIRE:
1045 		priv->reply_tx_stats.life_expire++;
1046 		break;
1047 	case TX_STATUS_FAIL_DEST_PS:
1048 		priv->reply_tx_stats.dest_ps++;
1049 		break;
1050 	case TX_STATUS_FAIL_HOST_ABORTED:
1051 		priv->reply_tx_stats.host_abort++;
1052 		break;
1053 	case TX_STATUS_FAIL_BT_RETRY:
1054 		priv->reply_tx_stats.bt_retry++;
1055 		break;
1056 	case TX_STATUS_FAIL_STA_INVALID:
1057 		priv->reply_tx_stats.sta_invalid++;
1058 		break;
1059 	case TX_STATUS_FAIL_FRAG_DROPPED:
1060 		priv->reply_tx_stats.frag_drop++;
1061 		break;
1062 	case TX_STATUS_FAIL_TID_DISABLE:
1063 		priv->reply_tx_stats.tid_disable++;
1064 		break;
1065 	case TX_STATUS_FAIL_FIFO_FLUSHED:
1066 		priv->reply_tx_stats.fifo_flush++;
1067 		break;
1068 	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
1069 		priv->reply_tx_stats.insuff_cf_poll++;
1070 		break;
1071 	case TX_STATUS_FAIL_PASSIVE_NO_RX:
1072 		priv->reply_tx_stats.fail_hw_drop++;
1073 		break;
1074 	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
1075 		priv->reply_tx_stats.sta_color_mismatch++;
1076 		break;
1077 	default:
1078 		priv->reply_tx_stats.unknown++;
1079 		break;
1080 	}
1081 }
1082 
iwlagn_set_tx_status(struct iwl_priv * priv,struct ieee80211_tx_info * info,struct iwlagn_tx_resp * tx_resp)1083 static void iwlagn_set_tx_status(struct iwl_priv *priv,
1084 				 struct ieee80211_tx_info *info,
1085 				 struct iwlagn_tx_resp *tx_resp)
1086 {
1087 	u16 status = le16_to_cpu(tx_resp->status.status);
1088 
1089 	info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1090 
1091 	info->status.rates[0].count = tx_resp->failure_frame + 1;
1092 	info->flags |= iwl_tx_status_to_mac80211(status);
1093 	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
1094 				    info);
1095 	if (!iwl_is_tx_success(status))
1096 		iwlagn_count_tx_err_status(priv, status);
1097 }
1098 
iwl_check_abort_status(struct iwl_priv * priv,u8 frame_count,u32 status)1099 static void iwl_check_abort_status(struct iwl_priv *priv,
1100 			    u8 frame_count, u32 status)
1101 {
1102 	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
1103 		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
1104 		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1105 			queue_work(priv->workqueue, &priv->tx_flush);
1106 	}
1107 }
1108 
iwlagn_rx_reply_tx(struct iwl_priv * priv,struct iwl_rx_cmd_buffer * rxb)1109 void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
1110 {
1111 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1112 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1113 	int txq_id = SEQ_TO_QUEUE(sequence);
1114 	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
1115 	struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
1116 	struct ieee80211_hdr *hdr;
1117 	u32 status = le16_to_cpu(tx_resp->status.status);
1118 	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
1119 	int tid;
1120 	int sta_id;
1121 	int freed;
1122 	struct ieee80211_tx_info *info;
1123 	struct sk_buff_head skbs;
1124 	struct sk_buff *skb;
1125 	struct iwl_rxon_context *ctx;
1126 	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1127 
1128 	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
1129 		IWLAGN_TX_RES_TID_POS;
1130 	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
1131 		IWLAGN_TX_RES_RA_POS;
1132 
1133 	spin_lock_bh(&priv->sta_lock);
1134 
1135 	if (is_agg) {
1136 		WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
1137 			     tid >= IWL_MAX_TID_COUNT);
1138 		if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
1139 			IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
1140 				priv->tid_data[sta_id][tid].agg.txq_id);
1141 		iwl_rx_reply_tx_agg(priv, tx_resp);
1142 	}
1143 
1144 	__skb_queue_head_init(&skbs);
1145 
1146 	if (tx_resp->frame_count == 1) {
1147 		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1148 		next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1149 
1150 		if (is_agg) {
1151 			/* If this is an aggregation queue, we can rely on the
1152 			 * ssn since the wifi sequence number corresponds to
1153 			 * the index in the TFD ring (%256).
1154 			 * The seq_ctl is the sequence control of the packet
1155 			 * to which this Tx response relates. But if there is a
1156 			 * hole in the bitmap of the BA we received, this Tx
1157 			 * response may allow to reclaim the hole and all the
1158 			 * subsequent packets that were already acked.
1159 			 * In that case, seq_ctl != ssn, and the next packet
1160 			 * to be reclaimed will be ssn and not seq_ctl.
1161 			 */
1162 			next_reclaimed = ssn;
1163 		}
1164 
1165 		if (tid != IWL_TID_NON_QOS) {
1166 			priv->tid_data[sta_id][tid].next_reclaimed =
1167 				next_reclaimed;
1168 			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1169 						  next_reclaimed);
1170 			iwlagn_check_ratid_empty(priv, sta_id, tid);
1171 		}
1172 
1173 		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs, false);
1174 
1175 		freed = 0;
1176 
1177 		/* process frames */
1178 		skb_queue_walk(&skbs, skb) {
1179 			hdr = (struct ieee80211_hdr *)skb->data;
1180 
1181 			if (!ieee80211_is_data_qos(hdr->frame_control))
1182 				priv->last_seq_ctl = tx_resp->seq_ctl;
1183 
1184 			info = IEEE80211_SKB_CB(skb);
1185 			ctx = info->driver_data[0];
1186 			iwl_trans_free_tx_cmd(priv->trans,
1187 					      info->driver_data[1]);
1188 
1189 			memset(&info->status, 0, sizeof(info->status));
1190 
1191 			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
1192 			    ctx->vif &&
1193 			    ctx->vif->type == NL80211_IFTYPE_STATION) {
1194 				/* block and stop all queues */
1195 				priv->passive_no_rx = true;
1196 				IWL_DEBUG_TX_QUEUES(priv,
1197 					"stop all queues: passive channel\n");
1198 				ieee80211_stop_queues(priv->hw);
1199 
1200 				IWL_DEBUG_TX_REPLY(priv,
1201 					   "TXQ %d status %s (0x%08x) "
1202 					   "rate_n_flags 0x%x retries %d\n",
1203 					   txq_id,
1204 					   iwl_get_tx_fail_reason(status),
1205 					   status,
1206 					   le32_to_cpu(tx_resp->rate_n_flags),
1207 					   tx_resp->failure_frame);
1208 
1209 				IWL_DEBUG_TX_REPLY(priv,
1210 					   "FrameCnt = %d, idx=%d\n",
1211 					   tx_resp->frame_count, cmd_index);
1212 			}
1213 
1214 			/* check if BAR is needed */
1215 			if (is_agg && !iwl_is_tx_success(status))
1216 				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1217 			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
1218 				     tx_resp);
1219 			if (!is_agg)
1220 				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
1221 
1222 			freed++;
1223 		}
1224 
1225 		if (tid != IWL_TID_NON_QOS) {
1226 			priv->tid_data[sta_id][tid].next_reclaimed =
1227 				next_reclaimed;
1228 			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1229 					   next_reclaimed);
1230 		}
1231 
1232 		if (!is_agg && freed != 1)
1233 			IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
1234 
1235 		IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
1236 				   iwl_get_tx_fail_reason(status), status);
1237 
1238 		IWL_DEBUG_TX_REPLY(priv,
1239 				   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
1240 				   le32_to_cpu(tx_resp->rate_n_flags),
1241 				   tx_resp->failure_frame,
1242 				   SEQ_TO_INDEX(sequence), ssn,
1243 				   le16_to_cpu(tx_resp->seq_ctl));
1244 	}
1245 
1246 	iwl_check_abort_status(priv, tx_resp->frame_count, status);
1247 	spin_unlock_bh(&priv->sta_lock);
1248 
1249 	while (!skb_queue_empty(&skbs)) {
1250 		skb = __skb_dequeue(&skbs);
1251 		ieee80211_tx_status(priv->hw, skb);
1252 	}
1253 }
1254 
1255 /*
1256  * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1257  *
1258  * Handles block-acknowledge notification from device, which reports success
1259  * of frames sent via aggregation.
1260  */
iwlagn_rx_reply_compressed_ba(struct iwl_priv * priv,struct iwl_rx_cmd_buffer * rxb)1261 void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1262 				   struct iwl_rx_cmd_buffer *rxb)
1263 {
1264 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1265 	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
1266 	struct iwl_ht_agg *agg;
1267 	struct sk_buff_head reclaimed_skbs;
1268 	struct sk_buff *skb;
1269 	int sta_id;
1270 	int tid;
1271 	int freed;
1272 
1273 	/* "flow" corresponds to Tx queue */
1274 	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1275 
1276 	/* "ssn" is start of block-ack Tx window, corresponds to index
1277 	 * (in Tx queue's circular buffer) of first TFD/frame in window */
1278 	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1279 
1280 	if (scd_flow >= priv->trans->trans_cfg->base_params->num_of_queues) {
1281 		IWL_ERR(priv,
1282 			"BUG_ON scd_flow is bigger than number of queues\n");
1283 		return;
1284 	}
1285 
1286 	sta_id = ba_resp->sta_id;
1287 	tid = ba_resp->tid;
1288 	agg = &priv->tid_data[sta_id][tid].agg;
1289 
1290 	spin_lock_bh(&priv->sta_lock);
1291 
1292 	if (unlikely(!agg->wait_for_ba)) {
1293 		if (unlikely(ba_resp->bitmap))
1294 			IWL_ERR(priv, "Received BA when not expected\n");
1295 		spin_unlock_bh(&priv->sta_lock);
1296 		return;
1297 	}
1298 
1299 	if (unlikely(scd_flow != agg->txq_id)) {
1300 		/*
1301 		 * FIXME: this is a uCode bug which need to be addressed,
1302 		 * log the information and return for now.
1303 		 * Since it is can possibly happen very often and in order
1304 		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1305 		 */
1306 		IWL_DEBUG_TX_QUEUES(priv,
1307 				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1308 				    scd_flow, sta_id, tid, agg->txq_id);
1309 		spin_unlock_bh(&priv->sta_lock);
1310 		return;
1311 	}
1312 
1313 	__skb_queue_head_init(&reclaimed_skbs);
1314 
1315 	/* Release all TFDs before the SSN, i.e. all TFDs in front of
1316 	 * block-ack window (we assume that they've been successfully
1317 	 * transmitted ... if not, it's too late anyway). */
1318 	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
1319 			  &reclaimed_skbs, false);
1320 
1321 	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1322 			   "sta_id = %d\n",
1323 			   agg->wait_for_ba,
1324 			   (u8 *) &ba_resp->sta_addr_lo32,
1325 			   ba_resp->sta_id);
1326 	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
1327 			   "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
1328 			   ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
1329 			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1330 			   scd_flow, ba_resp_scd_ssn, ba_resp->txed,
1331 			   ba_resp->txed_2_done);
1332 
1333 	/* Mark that the expected block-ack response arrived */
1334 	agg->wait_for_ba = false;
1335 
1336 	/* Sanity check values reported by uCode */
1337 	if (ba_resp->txed_2_done > ba_resp->txed) {
1338 		IWL_DEBUG_TX_REPLY(priv,
1339 			"bogus sent(%d) and ack(%d) count\n",
1340 			ba_resp->txed, ba_resp->txed_2_done);
1341 		/*
1342 		 * set txed_2_done = txed,
1343 		 * so it won't impact rate scale
1344 		 */
1345 		ba_resp->txed = ba_resp->txed_2_done;
1346 	}
1347 
1348 	priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
1349 
1350 	iwlagn_check_ratid_empty(priv, sta_id, tid);
1351 	freed = 0;
1352 
1353 	skb_queue_walk(&reclaimed_skbs, skb) {
1354 		struct ieee80211_hdr *hdr = (void *)skb->data;
1355 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1356 
1357 		if (ieee80211_is_data_qos(hdr->frame_control))
1358 			freed++;
1359 		else
1360 			WARN_ON_ONCE(1);
1361 
1362 		iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
1363 
1364 		memset(&info->status, 0, sizeof(info->status));
1365 		/* Packet was transmitted successfully, failures come as single
1366 		 * frames because before failing a frame the firmware transmits
1367 		 * it without aggregation at least once.
1368 		 */
1369 		info->flags |= IEEE80211_TX_STAT_ACK;
1370 
1371 		if (freed == 1) {
1372 			/* this is the first skb we deliver in this batch */
1373 			/* put the rate scaling data there */
1374 			info = IEEE80211_SKB_CB(skb);
1375 			memset(&info->status, 0, sizeof(info->status));
1376 			info->flags |= IEEE80211_TX_STAT_AMPDU;
1377 			info->status.ampdu_ack_len = ba_resp->txed_2_done;
1378 			info->status.ampdu_len = ba_resp->txed;
1379 			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
1380 						    info);
1381 		}
1382 	}
1383 
1384 	spin_unlock_bh(&priv->sta_lock);
1385 
1386 	while (!skb_queue_empty(&reclaimed_skbs)) {
1387 		skb = __skb_dequeue(&reclaimed_skbs);
1388 		ieee80211_tx_status(priv->hw, skb);
1389 	}
1390 }
1391