• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public License
21  * along with this program; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
23  * USA
24  *
25  * The full GNU General Public License is included in this distribution
26  * in the file called COPYING.
27  *
28  * Contact Information:
29  *  Intel Linux Wireless <ilw@linux.intel.com>
30  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31  *
32  * BSD LICENSE
33  *
34  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
35  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  *
42  *  * Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  *  * Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in
46  *    the documentation and/or other materials provided with the
47  *    distribution.
48  *  * Neither the name Intel Corporation nor the names of its
49  *    contributors may be used to endorse or promote products derived
50  *    from this software without specific prior written permission.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63  *
64  *****************************************************************************/
65 #include <linux/kernel.h>
66 #include <linux/slab.h>
67 #include <linux/skbuff.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/ip.h>
71 #include <linux/if_arp.h>
72 #include <linux/devcoredump.h>
73 #include <net/mac80211.h>
74 #include <net/ieee80211_radiotap.h>
75 #include <net/tcp.h>
76 
77 #include "iwl-op-mode.h"
78 #include "iwl-io.h"
79 #include "mvm.h"
80 #include "sta.h"
81 #include "time-event.h"
82 #include "iwl-eeprom-parse.h"
83 #include "iwl-phy-db.h"
84 #include "testmode.h"
85 #include "iwl-fw-error-dump.h"
86 #include "iwl-prph.h"
87 #include "iwl-csr.h"
88 #include "iwl-nvm-parse.h"
89 
90 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
91 	{
92 		.max = 1,
93 		.types = BIT(NL80211_IFTYPE_STATION),
94 	},
95 	{
96 		.max = 1,
97 		.types = BIT(NL80211_IFTYPE_AP) |
98 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
99 			BIT(NL80211_IFTYPE_P2P_GO),
100 	},
101 	{
102 		.max = 1,
103 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
104 	},
105 };
106 
107 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
108 	{
109 		.num_different_channels = 2,
110 		.max_interfaces = 3,
111 		.limits = iwl_mvm_limits,
112 		.n_limits = ARRAY_SIZE(iwl_mvm_limits),
113 	},
114 };
115 
116 #ifdef CONFIG_PM_SLEEP
117 static const struct nl80211_wowlan_tcp_data_token_feature
118 iwl_mvm_wowlan_tcp_token_feature = {
119 	.min_len = 0,
120 	.max_len = 255,
121 	.bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
122 };
123 
124 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
125 	.tok = &iwl_mvm_wowlan_tcp_token_feature,
126 	.data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
127 			    sizeof(struct ethhdr) -
128 			    sizeof(struct iphdr) -
129 			    sizeof(struct tcphdr),
130 	.data_interval_max = 65535, /* __le16 in API */
131 	.wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
132 			    sizeof(struct ethhdr) -
133 			    sizeof(struct iphdr) -
134 			    sizeof(struct tcphdr),
135 	.seq = true,
136 };
137 #endif
138 
139 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
140 /*
141  * Use the reserved field to indicate magic values.
142  * these values will only be used internally by the driver,
143  * and won't make it to the fw (reserved will be 0).
144  * BC_FILTER_MAGIC_IP - configure the val of this attribute to
145  *	be the vif's ip address. in case there is not a single
146  *	ip address (0, or more than 1), this attribute will
147  *	be skipped.
148  * BC_FILTER_MAGIC_MAC - set the val of this attribute to
149  *	the LSB bytes of the vif's mac address
150  */
151 enum {
152 	BC_FILTER_MAGIC_NONE = 0,
153 	BC_FILTER_MAGIC_IP,
154 	BC_FILTER_MAGIC_MAC,
155 };
156 
157 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
158 	{
159 		/* arp */
160 		.discard = 0,
161 		.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
162 		.attrs = {
163 			{
164 				/* frame type - arp, hw type - ethernet */
165 				.offset_type =
166 					BCAST_FILTER_OFFSET_PAYLOAD_START,
167 				.offset = sizeof(rfc1042_header),
168 				.val = cpu_to_be32(0x08060001),
169 				.mask = cpu_to_be32(0xffffffff),
170 			},
171 			{
172 				/* arp dest ip */
173 				.offset_type =
174 					BCAST_FILTER_OFFSET_PAYLOAD_START,
175 				.offset = sizeof(rfc1042_header) + 2 +
176 					  sizeof(struct arphdr) +
177 					  ETH_ALEN + sizeof(__be32) +
178 					  ETH_ALEN,
179 				.mask = cpu_to_be32(0xffffffff),
180 				/* mark it as special field */
181 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
182 			},
183 		},
184 	},
185 	{
186 		/* dhcp offer bcast */
187 		.discard = 0,
188 		.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
189 		.attrs = {
190 			{
191 				/* udp dest port - 68 (bootp client)*/
192 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
193 				.offset = offsetof(struct udphdr, dest),
194 				.val = cpu_to_be32(0x00440000),
195 				.mask = cpu_to_be32(0xffff0000),
196 			},
197 			{
198 				/* dhcp - lsb bytes of client hw address */
199 				.offset_type = BCAST_FILTER_OFFSET_IP_END,
200 				.offset = 38,
201 				.mask = cpu_to_be32(0xffffffff),
202 				/* mark it as special field */
203 				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
204 			},
205 		},
206 	},
207 	/* last filter must be empty */
208 	{},
209 };
210 #endif
211 
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)212 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
213 {
214 	if (!iwl_mvm_is_d0i3_supported(mvm))
215 		return;
216 
217 	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
218 	spin_lock_bh(&mvm->refs_lock);
219 	mvm->refs[ref_type]++;
220 	spin_unlock_bh(&mvm->refs_lock);
221 	iwl_trans_ref(mvm->trans);
222 }
223 
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)224 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
225 {
226 	if (!iwl_mvm_is_d0i3_supported(mvm))
227 		return;
228 
229 	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
230 	spin_lock_bh(&mvm->refs_lock);
231 	WARN_ON(!mvm->refs[ref_type]--);
232 	spin_unlock_bh(&mvm->refs_lock);
233 	iwl_trans_unref(mvm->trans);
234 }
235 
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)236 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
237 				     enum iwl_mvm_ref_type except_ref)
238 {
239 	int i, j;
240 
241 	if (!iwl_mvm_is_d0i3_supported(mvm))
242 		return;
243 
244 	spin_lock_bh(&mvm->refs_lock);
245 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
246 		if (except_ref == i || !mvm->refs[i])
247 			continue;
248 
249 		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
250 			      i, mvm->refs[i]);
251 		for (j = 0; j < mvm->refs[i]; j++)
252 			iwl_trans_unref(mvm->trans);
253 		mvm->refs[i] = 0;
254 	}
255 	spin_unlock_bh(&mvm->refs_lock);
256 }
257 
iwl_mvm_ref_taken(struct iwl_mvm * mvm)258 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
259 {
260 	int i;
261 	bool taken = false;
262 
263 	if (!iwl_mvm_is_d0i3_supported(mvm))
264 		return true;
265 
266 	spin_lock_bh(&mvm->refs_lock);
267 	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
268 		if (mvm->refs[i]) {
269 			taken = true;
270 			break;
271 		}
272 	}
273 	spin_unlock_bh(&mvm->refs_lock);
274 
275 	return taken;
276 }
277 
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)278 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
279 {
280 	iwl_mvm_ref(mvm, ref_type);
281 
282 	if (!wait_event_timeout(mvm->d0i3_exit_waitq,
283 				!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
284 				HZ)) {
285 		WARN_ON_ONCE(1);
286 		iwl_mvm_unref(mvm, ref_type);
287 		return -EIO;
288 	}
289 
290 	return 0;
291 }
292 
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)293 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
294 {
295 	int i;
296 
297 	memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
298 	for (i = 0; i < NUM_PHY_CTX; i++) {
299 		mvm->phy_ctxts[i].id = i;
300 		mvm->phy_ctxts[i].ref = 0;
301 	}
302 }
303 
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)304 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
305 						  const char *alpha2,
306 						  enum iwl_mcc_source src_id,
307 						  bool *changed)
308 {
309 	struct ieee80211_regdomain *regd = NULL;
310 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
311 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
312 	struct iwl_mcc_update_resp *resp;
313 
314 	IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
315 
316 	lockdep_assert_held(&mvm->mutex);
317 
318 	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
319 	if (IS_ERR_OR_NULL(resp)) {
320 		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
321 			      PTR_ERR_OR_ZERO(resp));
322 		goto out;
323 	}
324 
325 	if (changed) {
326 		u32 status = le32_to_cpu(resp->status);
327 
328 		*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
329 			    status == MCC_RESP_ILLEGAL);
330 	}
331 
332 	regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
333 				      __le32_to_cpu(resp->n_channels),
334 				      resp->channels,
335 				      __le16_to_cpu(resp->mcc));
336 	/* Store the return source id */
337 	src_id = resp->source_id;
338 	kfree(resp);
339 	if (IS_ERR_OR_NULL(regd)) {
340 		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
341 			      PTR_ERR_OR_ZERO(regd));
342 		goto out;
343 	}
344 
345 	IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
346 		      regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
347 	mvm->lar_regdom_set = true;
348 	mvm->mcc_src = src_id;
349 
350 out:
351 	return regd;
352 }
353 
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)354 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
355 {
356 	bool changed;
357 	struct ieee80211_regdomain *regd;
358 
359 	if (!iwl_mvm_is_lar_supported(mvm))
360 		return;
361 
362 	regd = iwl_mvm_get_current_regdomain(mvm, &changed);
363 	if (!IS_ERR_OR_NULL(regd)) {
364 		/* only update the regulatory core if changed */
365 		if (changed)
366 			regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
367 
368 		kfree(regd);
369 	}
370 }
371 
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)372 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
373 							  bool *changed)
374 {
375 	return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
376 				     iwl_mvm_is_wifi_mcc_supported(mvm) ?
377 				     MCC_SOURCE_GET_CURRENT :
378 				     MCC_SOURCE_OLD_FW, changed);
379 }
380 
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)381 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
382 {
383 	enum iwl_mcc_source used_src;
384 	struct ieee80211_regdomain *regd;
385 	int ret;
386 	bool changed;
387 	const struct ieee80211_regdomain *r =
388 			rtnl_dereference(mvm->hw->wiphy->regd);
389 
390 	if (!r)
391 		return -ENOENT;
392 
393 	/* save the last source in case we overwrite it below */
394 	used_src = mvm->mcc_src;
395 	if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
396 		/* Notify the firmware we support wifi location updates */
397 		regd = iwl_mvm_get_current_regdomain(mvm, NULL);
398 		if (!IS_ERR_OR_NULL(regd))
399 			kfree(regd);
400 	}
401 
402 	/* Now set our last stored MCC and source */
403 	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
404 				     &changed);
405 	if (IS_ERR_OR_NULL(regd))
406 		return -EIO;
407 
408 	/* update cfg80211 if the regdomain was changed */
409 	if (changed)
410 		ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
411 	else
412 		ret = 0;
413 
414 	kfree(regd);
415 	return ret;
416 }
417 
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)418 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
419 {
420 	struct ieee80211_hw *hw = mvm->hw;
421 	int num_mac, ret, i;
422 	static const u32 mvm_ciphers[] = {
423 		WLAN_CIPHER_SUITE_WEP40,
424 		WLAN_CIPHER_SUITE_WEP104,
425 		WLAN_CIPHER_SUITE_TKIP,
426 		WLAN_CIPHER_SUITE_CCMP,
427 	};
428 
429 	/* Tell mac80211 our characteristics */
430 	ieee80211_hw_set(hw, SIGNAL_DBM);
431 	ieee80211_hw_set(hw, SPECTRUM_MGMT);
432 	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
433 	ieee80211_hw_set(hw, QUEUE_CONTROL);
434 	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
435 	ieee80211_hw_set(hw, SUPPORTS_PS);
436 	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
437 	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
438 	ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
439 	ieee80211_hw_set(hw, CONNECTION_MONITOR);
440 	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
441 	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
442 	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
443 
444 	hw->queues = mvm->first_agg_queue;
445 	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
446 	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
447 				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
448 	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
449 		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
450 	hw->rate_control_algorithm = "iwl-mvm-rs";
451 	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
452 	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
453 
454 	BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 2);
455 	memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
456 	hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
457 	hw->wiphy->cipher_suites = mvm->ciphers;
458 
459 	/*
460 	 * Enable 11w if advertised by firmware and software crypto
461 	 * is not enabled (as the firmware will interpret some mgmt
462 	 * packets, so enabling it with software crypto isn't safe)
463 	 */
464 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
465 	    !iwlwifi_mod_params.sw_crypto) {
466 		ieee80211_hw_set(hw, MFP_CAPABLE);
467 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
468 			WLAN_CIPHER_SUITE_AES_CMAC;
469 		hw->wiphy->n_cipher_suites++;
470 	}
471 
472 	/* currently FW API supports only one optional cipher scheme */
473 	if (mvm->fw->cs[0].cipher) {
474 		mvm->hw->n_cipher_schemes = 1;
475 		mvm->hw->cipher_schemes = &mvm->fw->cs[0];
476 		mvm->ciphers[hw->wiphy->n_cipher_suites] =
477 			mvm->fw->cs[0].cipher;
478 		hw->wiphy->n_cipher_suites++;
479 	}
480 
481 	ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
482 	hw->wiphy->features |=
483 		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
484 		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
485 		NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
486 
487 	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
488 	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
489 	hw->chanctx_data_size = sizeof(u16);
490 
491 	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
492 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
493 		BIT(NL80211_IFTYPE_AP) |
494 		BIT(NL80211_IFTYPE_P2P_GO) |
495 		BIT(NL80211_IFTYPE_P2P_DEVICE) |
496 		BIT(NL80211_IFTYPE_ADHOC);
497 
498 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
499 	hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
500 	if (iwl_mvm_is_lar_supported(mvm))
501 		hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
502 	else
503 		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
504 					       REGULATORY_DISABLE_BEACON_HINTS;
505 
506 	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
507 		hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
508 
509 	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
510 
511 	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
512 	hw->wiphy->n_iface_combinations =
513 		ARRAY_SIZE(iwl_mvm_iface_combinations);
514 
515 	hw->wiphy->max_remain_on_channel_duration = 10000;
516 	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
517 	/* we can compensate an offset of up to 3 channels = 15 MHz */
518 	hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
519 
520 	/* Extract MAC address */
521 	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
522 	hw->wiphy->addresses = mvm->addresses;
523 	hw->wiphy->n_addresses = 1;
524 
525 	/* Extract additional MAC addresses if available */
526 	num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
527 		min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
528 
529 	for (i = 1; i < num_mac; i++) {
530 		memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
531 		       ETH_ALEN);
532 		mvm->addresses[i].addr[5]++;
533 		hw->wiphy->n_addresses++;
534 	}
535 
536 	iwl_mvm_reset_phy_ctxts(mvm);
537 
538 	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
539 
540 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
541 
542 	BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
543 	BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
544 		     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
545 
546 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
547 		mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
548 	else
549 		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
550 
551 	if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
552 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
553 			&mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
554 	if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
555 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
556 			&mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
557 
558 		if (fw_has_capa(&mvm->fw->ucode_capa,
559 				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
560 		    fw_has_api(&mvm->fw->ucode_capa,
561 			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
562 			hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
563 				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
564 	}
565 
566 	hw->wiphy->hw_version = mvm->trans->hw_id;
567 
568 	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
569 		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
570 	else
571 		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
572 
573 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
574 	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
575 	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
576 	/* we create the 802.11 header and zero length SSID IE. */
577 	hw->wiphy->max_sched_scan_ie_len =
578 		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
579 	hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
580 	hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
581 
582 	/*
583 	 * the firmware uses u8 for num of iterations, but 0xff is saved for
584 	 * infinite loop, so the maximum number of iterations is actually 254.
585 	 */
586 	hw->wiphy->max_sched_scan_plan_iterations = 254;
587 
588 	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
589 			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
590 			       NL80211_FEATURE_P2P_GO_OPPPS |
591 			       NL80211_FEATURE_DYNAMIC_SMPS |
592 			       NL80211_FEATURE_STATIC_SMPS |
593 			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
594 
595 	if (fw_has_capa(&mvm->fw->ucode_capa,
596 			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
597 		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
598 	if (fw_has_capa(&mvm->fw->ucode_capa,
599 			IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
600 		hw->wiphy->features |= NL80211_FEATURE_QUIET;
601 
602 	if (fw_has_capa(&mvm->fw->ucode_capa,
603 			IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
604 		hw->wiphy->features |=
605 			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
606 
607 	if (fw_has_capa(&mvm->fw->ucode_capa,
608 			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
609 		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
610 
611 	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
612 
613 #ifdef CONFIG_PM_SLEEP
614 	if (iwl_mvm_is_d0i3_supported(mvm) &&
615 	    device_can_wakeup(mvm->trans->dev)) {
616 		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
617 		hw->wiphy->wowlan = &mvm->wowlan;
618 	}
619 
620 	if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
621 	    mvm->trans->ops->d3_suspend &&
622 	    mvm->trans->ops->d3_resume &&
623 	    device_can_wakeup(mvm->trans->dev)) {
624 		mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
625 				     WIPHY_WOWLAN_DISCONNECT |
626 				     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
627 				     WIPHY_WOWLAN_RFKILL_RELEASE |
628 				     WIPHY_WOWLAN_NET_DETECT;
629 		if (!iwlwifi_mod_params.sw_crypto)
630 			mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
631 					     WIPHY_WOWLAN_GTK_REKEY_FAILURE |
632 					     WIPHY_WOWLAN_4WAY_HANDSHAKE;
633 
634 		mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
635 		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
636 		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
637 		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
638 		mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
639 		hw->wiphy->wowlan = &mvm->wowlan;
640 	}
641 #endif
642 
643 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
644 	/* assign default bcast filtering configuration */
645 	mvm->bcast_filters = iwl_mvm_default_bcast_filters;
646 #endif
647 
648 	ret = iwl_mvm_leds_init(mvm);
649 	if (ret)
650 		return ret;
651 
652 	if (fw_has_capa(&mvm->fw->ucode_capa,
653 			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
654 		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
655 		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
656 		ieee80211_hw_set(hw, TDLS_WIDER_BW);
657 	}
658 
659 	if (fw_has_capa(&mvm->fw->ucode_capa,
660 			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
661 		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
662 		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
663 	}
664 
665 	hw->netdev_features |= mvm->cfg->features;
666 	if (!iwl_mvm_is_csum_supported(mvm))
667 		hw->netdev_features &= ~NETIF_F_RXCSUM;
668 
669 	ret = ieee80211_register_hw(mvm->hw);
670 	if (ret)
671 		iwl_mvm_leds_exit(mvm);
672 
673 	return ret;
674 }
675 
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)676 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
677 			     struct ieee80211_sta *sta,
678 			     struct sk_buff *skb)
679 {
680 	struct iwl_mvm_sta *mvmsta;
681 	bool defer = false;
682 
683 	/*
684 	 * double check the IN_D0I3 flag both before and after
685 	 * taking the spinlock, in order to prevent taking
686 	 * the spinlock when not needed.
687 	 */
688 	if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
689 		return false;
690 
691 	spin_lock(&mvm->d0i3_tx_lock);
692 	/*
693 	 * testing the flag again ensures the skb dequeue
694 	 * loop (on d0i3 exit) hasn't run yet.
695 	 */
696 	if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
697 		goto out;
698 
699 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
700 	if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
701 	    mvmsta->sta_id != mvm->d0i3_ap_sta_id)
702 		goto out;
703 
704 	__skb_queue_tail(&mvm->d0i3_tx, skb);
705 	ieee80211_stop_queues(mvm->hw);
706 
707 	/* trigger wakeup */
708 	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
709 	iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
710 
711 	defer = true;
712 out:
713 	spin_unlock(&mvm->d0i3_tx_lock);
714 	return defer;
715 }
716 
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)717 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
718 			   struct ieee80211_tx_control *control,
719 			   struct sk_buff *skb)
720 {
721 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
722 	struct ieee80211_sta *sta = control->sta;
723 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
724 	struct ieee80211_hdr *hdr = (void *)skb->data;
725 
726 	if (iwl_mvm_is_radio_killed(mvm)) {
727 		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
728 		goto drop;
729 	}
730 
731 	if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
732 	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
733 	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
734 		goto drop;
735 
736 	/* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
737 	if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
738 		     ieee80211_is_mgmt(hdr->frame_control) &&
739 		     !ieee80211_is_deauth(hdr->frame_control) &&
740 		     !ieee80211_is_disassoc(hdr->frame_control) &&
741 		     !ieee80211_is_action(hdr->frame_control)))
742 		sta = NULL;
743 
744 	/* If there is no sta, and it's not offchannel - send through AP */
745 	if (info->control.vif->type == NL80211_IFTYPE_STATION &&
746 	    info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
747 		struct iwl_mvm_vif *mvmvif =
748 			iwl_mvm_vif_from_mac80211(info->control.vif);
749 		u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
750 
751 		if (ap_sta_id < IWL_MVM_STATION_COUNT) {
752 			/* mac80211 holds rcu read lock */
753 			sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
754 			if (IS_ERR_OR_NULL(sta))
755 				goto drop;
756 		}
757 	}
758 
759 	if (sta) {
760 		if (iwl_mvm_defer_tx(mvm, sta, skb))
761 			return;
762 		if (iwl_mvm_tx_skb(mvm, skb, sta))
763 			goto drop;
764 		return;
765 	}
766 
767 	if (iwl_mvm_tx_skb_non_sta(mvm, skb))
768 		goto drop;
769 	return;
770  drop:
771 	ieee80211_free_txskb(hw, skb);
772 }
773 
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)774 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
775 {
776 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
777 		return false;
778 	return true;
779 }
780 
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)781 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
782 {
783 	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
784 		return false;
785 	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
786 		return true;
787 
788 	/* enabled by default */
789 	return true;
790 }
791 
792 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)	\
793 	do {							\
794 		if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))	\
795 			break;					\
796 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);	\
797 	} while (0)
798 
799 static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 rx_ba_ssn,enum ieee80211_ampdu_mlme_action action)800 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
801 			    struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
802 			    enum ieee80211_ampdu_mlme_action action)
803 {
804 	struct iwl_fw_dbg_trigger_tlv *trig;
805 	struct iwl_fw_dbg_trigger_ba *ba_trig;
806 
807 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
808 		return;
809 
810 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
811 	ba_trig = (void *)trig->data;
812 
813 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
814 		return;
815 
816 	switch (action) {
817 	case IEEE80211_AMPDU_TX_OPERATIONAL: {
818 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
819 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
820 
821 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
822 				 "TX AGG START: MAC %pM tid %d ssn %d\n",
823 				 sta->addr, tid, tid_data->ssn);
824 		break;
825 		}
826 	case IEEE80211_AMPDU_TX_STOP_CONT:
827 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
828 				 "TX AGG STOP: MAC %pM tid %d\n",
829 				 sta->addr, tid);
830 		break;
831 	case IEEE80211_AMPDU_RX_START:
832 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
833 				 "RX AGG START: MAC %pM tid %d ssn %d\n",
834 				 sta->addr, tid, rx_ba_ssn);
835 		break;
836 	case IEEE80211_AMPDU_RX_STOP:
837 		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
838 				 "RX AGG STOP: MAC %pM tid %d\n",
839 				 sta->addr, tid);
840 		break;
841 	default:
842 		break;
843 	}
844 }
845 
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)846 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
847 				    struct ieee80211_vif *vif,
848 				    struct ieee80211_ampdu_params *params)
849 {
850 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
851 	int ret;
852 	bool tx_agg_ref = false;
853 	struct ieee80211_sta *sta = params->sta;
854 	enum ieee80211_ampdu_mlme_action action = params->action;
855 	u16 tid = params->tid;
856 	u16 *ssn = &params->ssn;
857 	u8 buf_size = params->buf_size;
858 
859 	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
860 		     sta->addr, tid, action);
861 
862 	if (!(mvm->nvm_data->sku_cap_11n_enable))
863 		return -EACCES;
864 
865 	/* return from D0i3 before starting a new Tx aggregation */
866 	switch (action) {
867 	case IEEE80211_AMPDU_TX_START:
868 	case IEEE80211_AMPDU_TX_STOP_CONT:
869 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
870 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
871 	case IEEE80211_AMPDU_TX_OPERATIONAL:
872 		/*
873 		 * for tx start, wait synchronously until D0i3 exit to
874 		 * get the correct sequence number for the tid.
875 		 * additionally, some other ampdu actions use direct
876 		 * target access, which is not handled automatically
877 		 * by the trans layer (unlike commands), so wait for
878 		 * d0i3 exit in these cases as well.
879 		 */
880 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
881 		if (ret)
882 			return ret;
883 
884 		tx_agg_ref = true;
885 		break;
886 	default:
887 		break;
888 	}
889 
890 	mutex_lock(&mvm->mutex);
891 
892 	switch (action) {
893 	case IEEE80211_AMPDU_RX_START:
894 		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
895 			ret = -EINVAL;
896 			break;
897 		}
898 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
899 		break;
900 	case IEEE80211_AMPDU_RX_STOP:
901 		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
902 		break;
903 	case IEEE80211_AMPDU_TX_START:
904 		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
905 			ret = -EINVAL;
906 			break;
907 		}
908 		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
909 		break;
910 	case IEEE80211_AMPDU_TX_STOP_CONT:
911 		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
912 		break;
913 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
914 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
915 		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
916 		break;
917 	case IEEE80211_AMPDU_TX_OPERATIONAL:
918 		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
919 		break;
920 	default:
921 		WARN_ON_ONCE(1);
922 		ret = -EINVAL;
923 		break;
924 	}
925 
926 	if (!ret) {
927 		u16 rx_ba_ssn = 0;
928 
929 		if (action == IEEE80211_AMPDU_RX_START)
930 			rx_ba_ssn = *ssn;
931 
932 		iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
933 					    rx_ba_ssn, action);
934 	}
935 	mutex_unlock(&mvm->mutex);
936 
937 	/*
938 	 * If the tid is marked as started, we won't use it for offloaded
939 	 * traffic on the next D0i3 entry. It's safe to unref.
940 	 */
941 	if (tx_agg_ref)
942 		iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
943 
944 	return ret;
945 }
946 
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)947 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
948 				     struct ieee80211_vif *vif)
949 {
950 	struct iwl_mvm *mvm = data;
951 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
952 
953 	mvmvif->uploaded = false;
954 	mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
955 
956 	spin_lock_bh(&mvm->time_event_lock);
957 	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
958 	spin_unlock_bh(&mvm->time_event_lock);
959 
960 	mvmvif->phy_ctxt = NULL;
961 	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
962 }
963 
iwl_mvm_read_coredump(char * buffer,loff_t offset,size_t count,const void * data,size_t datalen)964 static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
965 				     const void *data, size_t datalen)
966 {
967 	const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
968 	ssize_t bytes_read;
969 	ssize_t bytes_read_trans;
970 
971 	if (offset < dump_ptrs->op_mode_len) {
972 		bytes_read = min_t(ssize_t, count,
973 				   dump_ptrs->op_mode_len - offset);
974 		memcpy(buffer, (u8 *)dump_ptrs->op_mode_ptr + offset,
975 		       bytes_read);
976 		offset += bytes_read;
977 		count -= bytes_read;
978 
979 		if (count == 0)
980 			return bytes_read;
981 	} else {
982 		bytes_read = 0;
983 	}
984 
985 	if (!dump_ptrs->trans_ptr)
986 		return bytes_read;
987 
988 	offset -= dump_ptrs->op_mode_len;
989 	bytes_read_trans = min_t(ssize_t, count,
990 				 dump_ptrs->trans_ptr->len - offset);
991 	memcpy(buffer + bytes_read,
992 	       (u8 *)dump_ptrs->trans_ptr->data + offset,
993 	       bytes_read_trans);
994 
995 	return bytes_read + bytes_read_trans;
996 }
997 
iwl_mvm_free_coredump(const void * data)998 static void iwl_mvm_free_coredump(const void *data)
999 {
1000 	const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
1001 
1002 	vfree(fw_error_dump->op_mode_ptr);
1003 	vfree(fw_error_dump->trans_ptr);
1004 	kfree(fw_error_dump);
1005 }
1006 
iwl_mvm_dump_fifos(struct iwl_mvm * mvm,struct iwl_fw_error_dump_data ** dump_data)1007 static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
1008 			       struct iwl_fw_error_dump_data **dump_data)
1009 {
1010 	struct iwl_fw_error_dump_fifo *fifo_hdr;
1011 	u32 *fifo_data;
1012 	u32 fifo_len;
1013 	unsigned long flags;
1014 	int i, j;
1015 
1016 	if (!iwl_trans_grab_nic_access(mvm->trans, false, &flags))
1017 		return;
1018 
1019 	/* Pull RXF data from all RXFs */
1020 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++) {
1021 		/*
1022 		 * Keep aside the additional offset that might be needed for
1023 		 * next RXF
1024 		 */
1025 		u32 offset_diff = RXF_DIFF_FROM_PREV * i;
1026 
1027 		fifo_hdr = (void *)(*dump_data)->data;
1028 		fifo_data = (void *)fifo_hdr->data;
1029 		fifo_len = mvm->shared_mem_cfg.rxfifo_size[i];
1030 
1031 		/* No need to try to read the data if the length is 0 */
1032 		if (fifo_len == 0)
1033 			continue;
1034 
1035 		/* Add a TLV for the RXF */
1036 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
1037 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1038 
1039 		fifo_hdr->fifo_num = cpu_to_le32(i);
1040 		fifo_hdr->available_bytes =
1041 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1042 							RXF_RD_D_SPACE +
1043 							offset_diff));
1044 		fifo_hdr->wr_ptr =
1045 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1046 							RXF_RD_WR_PTR +
1047 							offset_diff));
1048 		fifo_hdr->rd_ptr =
1049 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1050 							RXF_RD_RD_PTR +
1051 							offset_diff));
1052 		fifo_hdr->fence_ptr =
1053 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1054 							RXF_RD_FENCE_PTR +
1055 							offset_diff));
1056 		fifo_hdr->fence_mode =
1057 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1058 							RXF_SET_FENCE_MODE +
1059 							offset_diff));
1060 
1061 		/* Lock fence */
1062 		iwl_trans_write_prph(mvm->trans,
1063 				     RXF_SET_FENCE_MODE + offset_diff, 0x1);
1064 		/* Set fence pointer to the same place like WR pointer */
1065 		iwl_trans_write_prph(mvm->trans,
1066 				     RXF_LD_WR2FENCE + offset_diff, 0x1);
1067 		/* Set fence offset */
1068 		iwl_trans_write_prph(mvm->trans,
1069 				     RXF_LD_FENCE_OFFSET_ADDR + offset_diff,
1070 				     0x0);
1071 
1072 		/* Read FIFO */
1073 		fifo_len /= sizeof(u32); /* Size in DWORDS */
1074 		for (j = 0; j < fifo_len; j++)
1075 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1076 							 RXF_FIFO_RD_FENCE_INC +
1077 							 offset_diff);
1078 		*dump_data = iwl_fw_error_next_data(*dump_data);
1079 	}
1080 
1081 	/* Pull TXF data from all TXFs */
1082 	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++) {
1083 		/* Mark the number of TXF we're pulling now */
1084 		iwl_trans_write_prph(mvm->trans, TXF_LARC_NUM, i);
1085 
1086 		fifo_hdr = (void *)(*dump_data)->data;
1087 		fifo_data = (void *)fifo_hdr->data;
1088 		fifo_len = mvm->shared_mem_cfg.txfifo_size[i];
1089 
1090 		/* No need to try to read the data if the length is 0 */
1091 		if (fifo_len == 0)
1092 			continue;
1093 
1094 		/* Add a TLV for the FIFO */
1095 		(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
1096 		(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
1097 
1098 		fifo_hdr->fifo_num = cpu_to_le32(i);
1099 		fifo_hdr->available_bytes =
1100 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1101 							TXF_FIFO_ITEM_CNT));
1102 		fifo_hdr->wr_ptr =
1103 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1104 							TXF_WR_PTR));
1105 		fifo_hdr->rd_ptr =
1106 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1107 							TXF_RD_PTR));
1108 		fifo_hdr->fence_ptr =
1109 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1110 							TXF_FENCE_PTR));
1111 		fifo_hdr->fence_mode =
1112 			cpu_to_le32(iwl_trans_read_prph(mvm->trans,
1113 							TXF_LOCK_FENCE));
1114 
1115 		/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1116 		iwl_trans_write_prph(mvm->trans, TXF_READ_MODIFY_ADDR,
1117 				     TXF_WR_PTR);
1118 
1119 		/* Dummy-read to advance the read pointer to the head */
1120 		iwl_trans_read_prph(mvm->trans, TXF_READ_MODIFY_DATA);
1121 
1122 		/* Read FIFO */
1123 		fifo_len /= sizeof(u32); /* Size in DWORDS */
1124 		for (j = 0; j < fifo_len; j++)
1125 			fifo_data[j] = iwl_trans_read_prph(mvm->trans,
1126 							  TXF_READ_MODIFY_DATA);
1127 		*dump_data = iwl_fw_error_next_data(*dump_data);
1128 	}
1129 
1130 	iwl_trans_release_nic_access(mvm->trans, &flags);
1131 }
1132 
iwl_mvm_free_fw_dump_desc(struct iwl_mvm * mvm)1133 void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
1134 {
1135 	if (mvm->fw_dump_desc == &iwl_mvm_dump_desc_assert ||
1136 	    !mvm->fw_dump_desc)
1137 		return;
1138 
1139 	kfree(mvm->fw_dump_desc);
1140 	mvm->fw_dump_desc = NULL;
1141 }
1142 
1143 #define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
1144 #define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
1145 
iwl_mvm_fw_error_dump(struct iwl_mvm * mvm)1146 void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
1147 {
1148 	struct iwl_fw_error_dump_file *dump_file;
1149 	struct iwl_fw_error_dump_data *dump_data;
1150 	struct iwl_fw_error_dump_info *dump_info;
1151 	struct iwl_fw_error_dump_mem *dump_mem;
1152 	struct iwl_fw_error_dump_trigger_desc *dump_trig;
1153 	struct iwl_mvm_dump_ptrs *fw_error_dump;
1154 	u32 sram_len, sram_ofs;
1155 	u32 file_len, fifo_data_len = 0;
1156 	u32 smem_len = mvm->cfg->smem_len;
1157 	u32 sram2_len = mvm->cfg->dccm2_len;
1158 	bool monitor_dump_only = false;
1159 
1160 	lockdep_assert_held(&mvm->mutex);
1161 
1162 	/* there's no point in fw dump if the bus is dead */
1163 	if (test_bit(STATUS_TRANS_DEAD, &mvm->trans->status)) {
1164 		IWL_ERR(mvm, "Skip fw error dump since bus is dead\n");
1165 		return;
1166 	}
1167 
1168 	if (mvm->fw_dump_trig &&
1169 	    mvm->fw_dump_trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
1170 		monitor_dump_only = true;
1171 
1172 	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
1173 	if (!fw_error_dump)
1174 		return;
1175 
1176 	/* SRAM - include stack CCM if driver knows the values for it */
1177 	if (!mvm->cfg->dccm_offset || !mvm->cfg->dccm_len) {
1178 		const struct fw_img *img;
1179 
1180 		img = &mvm->fw->img[mvm->cur_ucode];
1181 		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
1182 		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
1183 	} else {
1184 		sram_ofs = mvm->cfg->dccm_offset;
1185 		sram_len = mvm->cfg->dccm_len;
1186 	}
1187 
1188 	/* reading RXF/TXF sizes */
1189 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status)) {
1190 		struct iwl_mvm_shared_mem_cfg *mem_cfg = &mvm->shared_mem_cfg;
1191 		int i;
1192 
1193 		fifo_data_len = 0;
1194 
1195 		/* Count RXF size */
1196 		for (i = 0; i < ARRAY_SIZE(mem_cfg->rxfifo_size); i++) {
1197 			if (!mem_cfg->rxfifo_size[i])
1198 				continue;
1199 
1200 			/* Add header info */
1201 			fifo_data_len += mem_cfg->rxfifo_size[i] +
1202 					 sizeof(*dump_data) +
1203 					 sizeof(struct iwl_fw_error_dump_fifo);
1204 		}
1205 
1206 		for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
1207 			if (!mem_cfg->txfifo_size[i])
1208 				continue;
1209 
1210 			/* Add header info */
1211 			fifo_data_len += mem_cfg->txfifo_size[i] +
1212 					 sizeof(*dump_data) +
1213 					 sizeof(struct iwl_fw_error_dump_fifo);
1214 		}
1215 	}
1216 
1217 	file_len = sizeof(*dump_file) +
1218 		   sizeof(*dump_data) * 2 +
1219 		   sram_len + sizeof(*dump_mem) +
1220 		   fifo_data_len +
1221 		   sizeof(*dump_info);
1222 
1223 	/* Make room for the SMEM, if it exists */
1224 	if (smem_len)
1225 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len;
1226 
1227 	/* Make room for the secondary SRAM, if it exists */
1228 	if (sram2_len)
1229 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
1230 
1231 	/* Make room for fw's virtual image pages, if it exists */
1232 	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
1233 		file_len += mvm->num_of_paging_blk *
1234 			(sizeof(*dump_data) +
1235 			 sizeof(struct iwl_fw_error_dump_paging) +
1236 			 PAGING_BLOCK_SIZE);
1237 
1238 	/* If we only want a monitor dump, reset the file length */
1239 	if (monitor_dump_only) {
1240 		file_len = sizeof(*dump_file) + sizeof(*dump_data) +
1241 			   sizeof(*dump_info);
1242 	}
1243 
1244 	/*
1245 	 * In 8000 HW family B-step include the ICCM (which resides separately)
1246 	 */
1247 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1248 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP)
1249 		file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
1250 			    IWL8260_ICCM_LEN;
1251 
1252 	if (mvm->fw_dump_desc)
1253 		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
1254 			    mvm->fw_dump_desc->len;
1255 
1256 	dump_file = vzalloc(file_len);
1257 	if (!dump_file) {
1258 		kfree(fw_error_dump);
1259 		iwl_mvm_free_fw_dump_desc(mvm);
1260 		return;
1261 	}
1262 
1263 	fw_error_dump->op_mode_ptr = dump_file;
1264 
1265 	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
1266 	dump_data = (void *)dump_file->data;
1267 
1268 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
1269 	dump_data->len = cpu_to_le32(sizeof(*dump_info));
1270 	dump_info = (void *) dump_data->data;
1271 	dump_info->device_family =
1272 		mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000 ?
1273 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
1274 			cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
1275 	dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(mvm->trans->hw_rev));
1276 	memcpy(dump_info->fw_human_readable, mvm->fw->human_readable,
1277 	       sizeof(dump_info->fw_human_readable));
1278 	strncpy(dump_info->dev_human_readable, mvm->cfg->name,
1279 		sizeof(dump_info->dev_human_readable));
1280 	strncpy(dump_info->bus_human_readable, mvm->dev->bus->name,
1281 		sizeof(dump_info->bus_human_readable));
1282 
1283 	dump_data = iwl_fw_error_next_data(dump_data);
1284 	/* We only dump the FIFOs if the FW is in error state */
1285 	if (test_bit(STATUS_FW_ERROR, &mvm->trans->status))
1286 		iwl_mvm_dump_fifos(mvm, &dump_data);
1287 
1288 	if (mvm->fw_dump_desc) {
1289 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
1290 		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
1291 					     mvm->fw_dump_desc->len);
1292 		dump_trig = (void *)dump_data->data;
1293 		memcpy(dump_trig, &mvm->fw_dump_desc->trig_desc,
1294 		       sizeof(*dump_trig) + mvm->fw_dump_desc->len);
1295 
1296 		/* now we can free this copy */
1297 		iwl_mvm_free_fw_dump_desc(mvm);
1298 		dump_data = iwl_fw_error_next_data(dump_data);
1299 	}
1300 
1301 	/* In case we only want monitor dump, skip to dump trasport data */
1302 	if (monitor_dump_only)
1303 		goto dump_trans_data;
1304 
1305 	dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1306 	dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
1307 	dump_mem = (void *)dump_data->data;
1308 	dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1309 	dump_mem->offset = cpu_to_le32(sram_ofs);
1310 	iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
1311 				 sram_len);
1312 
1313 	if (smem_len) {
1314 		dump_data = iwl_fw_error_next_data(dump_data);
1315 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1316 		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
1317 		dump_mem = (void *)dump_data->data;
1318 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
1319 		dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
1320 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
1321 					 dump_mem->data, smem_len);
1322 	}
1323 
1324 	if (sram2_len) {
1325 		dump_data = iwl_fw_error_next_data(dump_data);
1326 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1327 		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
1328 		dump_mem = (void *)dump_data->data;
1329 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1330 		dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
1331 		iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
1332 					 dump_mem->data, sram2_len);
1333 	}
1334 
1335 	if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
1336 	    CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
1337 		dump_data = iwl_fw_error_next_data(dump_data);
1338 		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
1339 		dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
1340 					     sizeof(*dump_mem));
1341 		dump_mem = (void *)dump_data->data;
1342 		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
1343 		dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
1344 		iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
1345 					 dump_mem->data, IWL8260_ICCM_LEN);
1346 	}
1347 
1348 	/* Dump fw's virtual image */
1349 	if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
1350 		u32 i;
1351 
1352 		for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
1353 			struct iwl_fw_error_dump_paging *paging;
1354 			struct page *pages =
1355 				mvm->fw_paging_db[i].fw_paging_block;
1356 
1357 			dump_data = iwl_fw_error_next_data(dump_data);
1358 			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
1359 			dump_data->len = cpu_to_le32(sizeof(*paging) +
1360 						     PAGING_BLOCK_SIZE);
1361 			paging = (void *)dump_data->data;
1362 			paging->index = cpu_to_le32(i);
1363 			memcpy(paging->data, page_address(pages),
1364 			       PAGING_BLOCK_SIZE);
1365 		}
1366 	}
1367 
1368 dump_trans_data:
1369 	fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
1370 						       mvm->fw_dump_trig);
1371 	fw_error_dump->op_mode_len = file_len;
1372 	if (fw_error_dump->trans_ptr)
1373 		file_len += fw_error_dump->trans_ptr->len;
1374 	dump_file->file_len = cpu_to_le32(file_len);
1375 
1376 	dev_coredumpm(mvm->trans->dev, THIS_MODULE, fw_error_dump, 0,
1377 		      GFP_KERNEL, iwl_mvm_read_coredump, iwl_mvm_free_coredump);
1378 
1379 	mvm->fw_dump_trig = NULL;
1380 	clear_bit(IWL_MVM_STATUS_DUMPING_FW_LOG, &mvm->status);
1381 }
1382 
1383 struct iwl_mvm_dump_desc iwl_mvm_dump_desc_assert = {
1384 	.trig_desc = {
1385 		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
1386 	},
1387 };
1388 
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1389 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1390 {
1391 	/* clear the D3 reconfig, we only need it to avoid dumping a
1392 	 * firmware coredump on reconfiguration, we shouldn't do that
1393 	 * on D3->D0 transition
1394 	 */
1395 	if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1396 		mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1397 		iwl_mvm_fw_error_dump(mvm);
1398 	}
1399 
1400 	/* cleanup all stale references (scan, roc), but keep the
1401 	 * ucode_down ref until reconfig is complete
1402 	 */
1403 	iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1404 
1405 	iwl_trans_stop_device(mvm->trans);
1406 
1407 	mvm->scan_status = 0;
1408 	mvm->ps_disabled = false;
1409 	mvm->calibrating = false;
1410 
1411 	/* just in case one was running */
1412 	ieee80211_remain_on_channel_expired(mvm->hw);
1413 
1414 	/*
1415 	 * cleanup all interfaces, even inactive ones, as some might have
1416 	 * gone down during the HW restart
1417 	 */
1418 	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1419 
1420 	mvm->p2p_device_vif = NULL;
1421 	mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1422 
1423 	iwl_mvm_reset_phy_ctxts(mvm);
1424 	memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1425 	memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1426 	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1427 	memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
1428 	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1429 	memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
1430 	memset(&mvm->bt_ack_kill_msk, 0, sizeof(mvm->bt_ack_kill_msk));
1431 	memset(&mvm->bt_cts_kill_msk, 0, sizeof(mvm->bt_cts_kill_msk));
1432 
1433 	ieee80211_wake_queues(mvm->hw);
1434 
1435 	/* clear any stale d0i3 state */
1436 	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1437 
1438 	mvm->vif_count = 0;
1439 	mvm->rx_ba_sessions = 0;
1440 	mvm->fw_dbg_conf = FW_DBG_INVALID;
1441 
1442 	/* keep statistics ticking */
1443 	iwl_mvm_accu_radio_stats(mvm);
1444 }
1445 
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1446 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1447 {
1448 	int ret;
1449 
1450 	lockdep_assert_held(&mvm->mutex);
1451 
1452 	/* Clean up some internal and mac80211 state on restart */
1453 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1454 		iwl_mvm_restart_cleanup(mvm);
1455 
1456 	ret = iwl_mvm_up(mvm);
1457 
1458 	if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1459 		/* Something went wrong - we need to finish some cleanup
1460 		 * that normally iwl_mvm_mac_restart_complete() below
1461 		 * would do.
1462 		 */
1463 		clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1464 		iwl_mvm_d0i3_enable_tx(mvm, NULL);
1465 	}
1466 
1467 	return ret;
1468 }
1469 
iwl_mvm_mac_start(struct ieee80211_hw * hw)1470 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1471 {
1472 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1473 	int ret;
1474 
1475 	/* Some hw restart cleanups must not hold the mutex */
1476 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1477 		/*
1478 		 * Make sure we are out of d0i3. This is needed
1479 		 * to make sure the reference accounting is correct
1480 		 * (and there is no stale d0i3_exit_work).
1481 		 */
1482 		wait_event_timeout(mvm->d0i3_exit_waitq,
1483 				   !test_bit(IWL_MVM_STATUS_IN_D0I3,
1484 					     &mvm->status),
1485 				   HZ);
1486 	}
1487 
1488 	mutex_lock(&mvm->mutex);
1489 	ret = __iwl_mvm_mac_start(mvm);
1490 	mutex_unlock(&mvm->mutex);
1491 
1492 	return ret;
1493 }
1494 
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1495 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1496 {
1497 	int ret;
1498 
1499 	mutex_lock(&mvm->mutex);
1500 
1501 	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1502 	iwl_mvm_d0i3_enable_tx(mvm, NULL);
1503 	ret = iwl_mvm_update_quotas(mvm, true, NULL);
1504 	if (ret)
1505 		IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1506 			ret);
1507 
1508 	/* allow transport/FW low power modes */
1509 	iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1510 
1511 	/*
1512 	 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1513 	 * of packets the FW sent out, so we must reconnect.
1514 	 */
1515 	iwl_mvm_teardown_tdls_peers(mvm);
1516 
1517 	mutex_unlock(&mvm->mutex);
1518 }
1519 
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1520 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1521 {
1522 	if (!iwl_mvm_is_d0i3_supported(mvm))
1523 		return;
1524 
1525 	if (mvm->trans->d0i3_mode == IWL_D0I3_MODE_ON_SUSPEND)
1526 		if (!wait_event_timeout(mvm->d0i3_exit_waitq,
1527 					!test_bit(IWL_MVM_STATUS_IN_D0I3,
1528 						  &mvm->status),
1529 					HZ))
1530 			WARN_ONCE(1, "D0i3 exit on resume timed out\n");
1531 }
1532 
1533 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1534 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1535 			      enum ieee80211_reconfig_type reconfig_type)
1536 {
1537 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1538 
1539 	switch (reconfig_type) {
1540 	case IEEE80211_RECONFIG_TYPE_RESTART:
1541 		iwl_mvm_restart_complete(mvm);
1542 		break;
1543 	case IEEE80211_RECONFIG_TYPE_SUSPEND:
1544 		iwl_mvm_resume_complete(mvm);
1545 		break;
1546 	}
1547 }
1548 
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1549 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1550 {
1551 	lockdep_assert_held(&mvm->mutex);
1552 
1553 	/* firmware counters are obviously reset now, but we shouldn't
1554 	 * partially track so also clear the fw_reset_accu counters.
1555 	 */
1556 	memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1557 
1558 	/*
1559 	 * Disallow low power states when the FW is down by taking
1560 	 * the UCODE_DOWN ref. in case of ongoing hw restart the
1561 	 * ref is already taken, so don't take it again.
1562 	 */
1563 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1564 		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1565 
1566 	/* async_handlers_wk is now blocked */
1567 
1568 	/*
1569 	 * The work item could be running or queued if the
1570 	 * ROC time event stops just as we get here.
1571 	 */
1572 	flush_work(&mvm->roc_done_wk);
1573 
1574 	iwl_trans_stop_device(mvm->trans);
1575 
1576 	iwl_mvm_async_handlers_purge(mvm);
1577 	/* async_handlers_list is empty and will stay empty: HW is stopped */
1578 
1579 	/* the fw is stopped, the aux sta is dead: clean up driver state */
1580 	iwl_mvm_del_aux_sta(mvm);
1581 
1582 	iwl_free_fw_paging(mvm);
1583 
1584 	/*
1585 	 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1586 	 * won't be called in this case).
1587 	 * But make sure to cleanup interfaces that have gone down before/during
1588 	 * HW restart was requested.
1589 	 */
1590 	if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1591 		ieee80211_iterate_interfaces(mvm->hw, 0,
1592 					     iwl_mvm_cleanup_iterator, mvm);
1593 
1594 	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
1595 	 * make sure there's nothing left there and warn if any is found.
1596 	 */
1597 	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1598 		int i;
1599 
1600 		for (i = 0; i < mvm->max_scans; i++) {
1601 			if (WARN_ONCE(mvm->scan_uid_status[i],
1602 				      "UMAC scan UID %d status was not cleaned\n",
1603 				      i))
1604 				mvm->scan_uid_status[i] = 0;
1605 		}
1606 	}
1607 
1608 	mvm->ucode_loaded = false;
1609 }
1610 
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1611 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1612 {
1613 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1614 
1615 	flush_work(&mvm->d0i3_exit_work);
1616 	flush_work(&mvm->async_handlers_wk);
1617 	cancel_delayed_work_sync(&mvm->fw_dump_wk);
1618 	iwl_mvm_free_fw_dump_desc(mvm);
1619 
1620 	mutex_lock(&mvm->mutex);
1621 	__iwl_mvm_mac_stop(mvm);
1622 	mutex_unlock(&mvm->mutex);
1623 
1624 	/*
1625 	 * The worker might have been waiting for the mutex, let it run and
1626 	 * discover that its list is now empty.
1627 	 */
1628 	cancel_work_sync(&mvm->async_handlers_wk);
1629 }
1630 
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1631 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1632 {
1633 	u16 i;
1634 
1635 	lockdep_assert_held(&mvm->mutex);
1636 
1637 	for (i = 0; i < NUM_PHY_CTX; i++)
1638 		if (!mvm->phy_ctxts[i].ref)
1639 			return &mvm->phy_ctxts[i];
1640 
1641 	IWL_ERR(mvm, "No available PHY context\n");
1642 	return NULL;
1643 }
1644 
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1645 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1646 				s16 tx_power)
1647 {
1648 	struct iwl_dev_tx_power_cmd cmd = {
1649 		.v2.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1650 		.v2.mac_context_id =
1651 			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1652 		.v2.pwr_restriction = cpu_to_le16(8 * tx_power),
1653 	};
1654 	int len = sizeof(cmd);
1655 
1656 	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1657 		cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1658 
1659 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_CHAIN))
1660 		len = sizeof(cmd.v2);
1661 
1662 	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1663 }
1664 
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1665 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1666 				     struct ieee80211_vif *vif)
1667 {
1668 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1669 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1670 	int ret;
1671 
1672 	mvmvif->mvm = mvm;
1673 
1674 	/*
1675 	 * make sure D0i3 exit is completed, otherwise a target access
1676 	 * during tx queue configuration could be done when still in
1677 	 * D0i3 state.
1678 	 */
1679 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1680 	if (ret)
1681 		return ret;
1682 
1683 	/*
1684 	 * Not much to do here. The stack will not allow interface
1685 	 * types or combinations that we didn't advertise, so we
1686 	 * don't really have to check the types.
1687 	 */
1688 
1689 	mutex_lock(&mvm->mutex);
1690 
1691 	/* make sure that beacon statistics don't go backwards with FW reset */
1692 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1693 		mvmvif->beacon_stats.accu_num_beacons +=
1694 			mvmvif->beacon_stats.num_beacons;
1695 
1696 	/* Allocate resources for the MAC context, and add it to the fw  */
1697 	ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1698 	if (ret)
1699 		goto out_unlock;
1700 
1701 	/* Counting number of interfaces is needed for legacy PM */
1702 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1703 		mvm->vif_count++;
1704 
1705 	/*
1706 	 * The AP binding flow can be done only after the beacon
1707 	 * template is configured (which happens only in the mac80211
1708 	 * start_ap() flow), and adding the broadcast station can happen
1709 	 * only after the binding.
1710 	 * In addition, since modifying the MAC before adding a bcast
1711 	 * station is not allowed by the FW, delay the adding of MAC context to
1712 	 * the point where we can also add the bcast station.
1713 	 * In short: there's not much we can do at this point, other than
1714 	 * allocating resources :)
1715 	 */
1716 	if (vif->type == NL80211_IFTYPE_AP ||
1717 	    vif->type == NL80211_IFTYPE_ADHOC) {
1718 		ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1719 		if (ret) {
1720 			IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1721 			goto out_release;
1722 		}
1723 
1724 		iwl_mvm_vif_dbgfs_register(mvm, vif);
1725 		goto out_unlock;
1726 	}
1727 
1728 	mvmvif->features |= hw->netdev_features;
1729 
1730 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1731 	if (ret)
1732 		goto out_release;
1733 
1734 	ret = iwl_mvm_power_update_mac(mvm);
1735 	if (ret)
1736 		goto out_remove_mac;
1737 
1738 	/* beacon filtering */
1739 	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1740 	if (ret)
1741 		goto out_remove_mac;
1742 
1743 	if (!mvm->bf_allowed_vif &&
1744 	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1745 		mvm->bf_allowed_vif = mvmvif;
1746 		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1747 				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1748 	}
1749 
1750 	/*
1751 	 * P2P_DEVICE interface does not have a channel context assigned to it,
1752 	 * so a dedicated PHY context is allocated to it and the corresponding
1753 	 * MAC context is bound to it at this stage.
1754 	 */
1755 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1756 
1757 		mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1758 		if (!mvmvif->phy_ctxt) {
1759 			ret = -ENOSPC;
1760 			goto out_free_bf;
1761 		}
1762 
1763 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1764 		ret = iwl_mvm_binding_add_vif(mvm, vif);
1765 		if (ret)
1766 			goto out_unref_phy;
1767 
1768 		ret = iwl_mvm_add_bcast_sta(mvm, vif);
1769 		if (ret)
1770 			goto out_unbind;
1771 
1772 		/* Save a pointer to p2p device vif, so it can later be used to
1773 		 * update the p2p device MAC when a GO is started/stopped */
1774 		mvm->p2p_device_vif = vif;
1775 	}
1776 
1777 	iwl_mvm_vif_dbgfs_register(mvm, vif);
1778 	goto out_unlock;
1779 
1780  out_unbind:
1781 	iwl_mvm_binding_remove_vif(mvm, vif);
1782  out_unref_phy:
1783 	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1784  out_free_bf:
1785 	if (mvm->bf_allowed_vif == mvmvif) {
1786 		mvm->bf_allowed_vif = NULL;
1787 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1788 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1789 	}
1790  out_remove_mac:
1791 	mvmvif->phy_ctxt = NULL;
1792 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1793  out_release:
1794 	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1795 		mvm->vif_count--;
1796 
1797 	iwl_mvm_mac_ctxt_release(mvm, vif);
1798  out_unlock:
1799 	mutex_unlock(&mvm->mutex);
1800 
1801 	iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1802 
1803 	return ret;
1804 }
1805 
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1806 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1807 					struct ieee80211_vif *vif)
1808 {
1809 	u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1810 
1811 	if (tfd_msk) {
1812 		/*
1813 		 * mac80211 first removes all the stations of the vif and
1814 		 * then removes the vif. When it removes a station it also
1815 		 * flushes the AMPDU session. So by now, all the AMPDU sessions
1816 		 * of all the stations of this vif are closed, and the queues
1817 		 * of these AMPDU sessions are properly closed.
1818 		 * We still need to take care of the shared queues of the vif.
1819 		 * Flush them here.
1820 		 */
1821 		mutex_lock(&mvm->mutex);
1822 		iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1823 		mutex_unlock(&mvm->mutex);
1824 
1825 		/*
1826 		 * There are transports that buffer a few frames in the host.
1827 		 * For these, the flush above isn't enough since while we were
1828 		 * flushing, the transport might have sent more frames to the
1829 		 * device. To solve this, wait here until the transport is
1830 		 * empty. Technically, this could have replaced the flush
1831 		 * above, but flush is much faster than draining. So flush
1832 		 * first, and drain to make sure we have no frames in the
1833 		 * transport anymore.
1834 		 * If a station still had frames on the shared queues, it is
1835 		 * already marked as draining, so to complete the draining, we
1836 		 * just need to wait until the transport is empty.
1837 		 */
1838 		iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1839 	}
1840 
1841 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1842 		/*
1843 		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1844 		 * We assume here that all the packets sent to the OFFCHANNEL
1845 		 * queue are sent in ROC session.
1846 		 */
1847 		flush_work(&mvm->roc_done_wk);
1848 	} else {
1849 		/*
1850 		 * By now, all the AC queues are empty. The AGG queues are
1851 		 * empty too. We already got all the Tx responses for all the
1852 		 * packets in the queues. The drain work can have been
1853 		 * triggered. Flush it.
1854 		 */
1855 		flush_work(&mvm->sta_drained_wk);
1856 	}
1857 }
1858 
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1859 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1860 					 struct ieee80211_vif *vif)
1861 {
1862 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1863 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1864 
1865 	iwl_mvm_prepare_mac_removal(mvm, vif);
1866 
1867 	mutex_lock(&mvm->mutex);
1868 
1869 	if (mvm->bf_allowed_vif == mvmvif) {
1870 		mvm->bf_allowed_vif = NULL;
1871 		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1872 				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1873 	}
1874 
1875 	iwl_mvm_vif_dbgfs_clean(mvm, vif);
1876 
1877 	/*
1878 	 * For AP/GO interface, the tear down of the resources allocated to the
1879 	 * interface is be handled as part of the stop_ap flow.
1880 	 */
1881 	if (vif->type == NL80211_IFTYPE_AP ||
1882 	    vif->type == NL80211_IFTYPE_ADHOC) {
1883 #ifdef CONFIG_NL80211_TESTMODE
1884 		if (vif == mvm->noa_vif) {
1885 			mvm->noa_vif = NULL;
1886 			mvm->noa_duration = 0;
1887 		}
1888 #endif
1889 		iwl_mvm_dealloc_bcast_sta(mvm, vif);
1890 		goto out_release;
1891 	}
1892 
1893 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1894 		mvm->p2p_device_vif = NULL;
1895 		iwl_mvm_rm_bcast_sta(mvm, vif);
1896 		iwl_mvm_binding_remove_vif(mvm, vif);
1897 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1898 		mvmvif->phy_ctxt = NULL;
1899 	}
1900 
1901 	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1902 		mvm->vif_count--;
1903 
1904 	iwl_mvm_power_update_mac(mvm);
1905 	iwl_mvm_mac_ctxt_remove(mvm, vif);
1906 
1907 out_release:
1908 	iwl_mvm_mac_ctxt_release(mvm, vif);
1909 	mutex_unlock(&mvm->mutex);
1910 }
1911 
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1912 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1913 {
1914 	return 0;
1915 }
1916 
1917 struct iwl_mvm_mc_iter_data {
1918 	struct iwl_mvm *mvm;
1919 	int port_id;
1920 };
1921 
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1922 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1923 				      struct ieee80211_vif *vif)
1924 {
1925 	struct iwl_mvm_mc_iter_data *data = _data;
1926 	struct iwl_mvm *mvm = data->mvm;
1927 	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1928 	struct iwl_host_cmd hcmd = {
1929 		.id = MCAST_FILTER_CMD,
1930 		.flags = CMD_ASYNC,
1931 		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1932 	};
1933 	int ret, len;
1934 
1935 	/* if we don't have free ports, mcast frames will be dropped */
1936 	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1937 		return;
1938 
1939 	if (vif->type != NL80211_IFTYPE_STATION ||
1940 	    !vif->bss_conf.assoc)
1941 		return;
1942 
1943 	cmd->port_id = data->port_id++;
1944 	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1945 	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1946 
1947 	hcmd.len[0] = len;
1948 	hcmd.data[0] = cmd;
1949 
1950 	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1951 	if (ret)
1952 		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1953 }
1954 
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1955 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1956 {
1957 	struct iwl_mvm_mc_iter_data iter_data = {
1958 		.mvm = mvm,
1959 	};
1960 	int ret;
1961 
1962 	lockdep_assert_held(&mvm->mutex);
1963 
1964 	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1965 		return;
1966 
1967 	ieee80211_iterate_active_interfaces_atomic(
1968 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1969 		iwl_mvm_mc_iface_iterator, &iter_data);
1970 
1971 	/*
1972 	 * Send a (synchronous) ech command so that we wait for the
1973 	 * multiple asynchronous MCAST_FILTER_CMD commands sent by
1974 	 * the interface iterator. Otherwise, we might get here over
1975 	 * and over again (by userspace just sending a lot of these)
1976 	 * and the CPU can send them faster than the firmware can
1977 	 * process them.
1978 	 * Note that the CPU is still faster - but with this we'll
1979 	 * actually send fewer commands overall because the CPU will
1980 	 * not schedule the work in mac80211 as frequently if it's
1981 	 * still running when rescheduled (possibly multiple times).
1982 	 */
1983 	ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
1984 	if (ret)
1985 		IWL_ERR(mvm, "Failed to synchronize multicast groups update\n");
1986 }
1987 
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1988 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1989 				     struct netdev_hw_addr_list *mc_list)
1990 {
1991 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1992 	struct iwl_mcast_filter_cmd *cmd;
1993 	struct netdev_hw_addr *addr;
1994 	int addr_count;
1995 	bool pass_all;
1996 	int len;
1997 
1998 	addr_count = netdev_hw_addr_list_count(mc_list);
1999 	pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
2000 		   IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
2001 	if (pass_all)
2002 		addr_count = 0;
2003 
2004 	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
2005 	cmd = kzalloc(len, GFP_ATOMIC);
2006 	if (!cmd)
2007 		return 0;
2008 
2009 	if (pass_all) {
2010 		cmd->pass_all = 1;
2011 		return (u64)(unsigned long)cmd;
2012 	}
2013 
2014 	netdev_hw_addr_list_for_each(addr, mc_list) {
2015 		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
2016 				   cmd->count, addr->addr);
2017 		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
2018 		       addr->addr, ETH_ALEN);
2019 		cmd->count++;
2020 	}
2021 
2022 	return (u64)(unsigned long)cmd;
2023 }
2024 
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)2025 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
2026 				     unsigned int changed_flags,
2027 				     unsigned int *total_flags,
2028 				     u64 multicast)
2029 {
2030 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2031 	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
2032 
2033 	mutex_lock(&mvm->mutex);
2034 
2035 	/* replace previous configuration */
2036 	kfree(mvm->mcast_filter_cmd);
2037 	mvm->mcast_filter_cmd = cmd;
2038 
2039 	if (!cmd)
2040 		goto out;
2041 
2042 	iwl_mvm_recalc_multicast(mvm);
2043 out:
2044 	mutex_unlock(&mvm->mutex);
2045 	*total_flags = 0;
2046 }
2047 
iwl_mvm_config_iface_filter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int filter_flags,unsigned int changed_flags)2048 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
2049 					struct ieee80211_vif *vif,
2050 					unsigned int filter_flags,
2051 					unsigned int changed_flags)
2052 {
2053 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2054 
2055 	/* We support only filter for probe requests */
2056 	if (!(changed_flags & FIF_PROBE_REQ))
2057 		return;
2058 
2059 	/* Supported only for p2p client interfaces */
2060 	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
2061 	    !vif->p2p)
2062 		return;
2063 
2064 	mutex_lock(&mvm->mutex);
2065 	iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2066 	mutex_unlock(&mvm->mutex);
2067 }
2068 
2069 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
2070 struct iwl_bcast_iter_data {
2071 	struct iwl_mvm *mvm;
2072 	struct iwl_bcast_filter_cmd *cmd;
2073 	u8 current_filter;
2074 };
2075 
2076 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)2077 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
2078 			 const struct iwl_fw_bcast_filter *in_filter,
2079 			 struct iwl_fw_bcast_filter *out_filter)
2080 {
2081 	struct iwl_fw_bcast_filter_attr *attr;
2082 	int i;
2083 
2084 	memcpy(out_filter, in_filter, sizeof(*out_filter));
2085 
2086 	for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
2087 		attr = &out_filter->attrs[i];
2088 
2089 		if (!attr->mask)
2090 			break;
2091 
2092 		switch (attr->reserved1) {
2093 		case cpu_to_le16(BC_FILTER_MAGIC_IP):
2094 			if (vif->bss_conf.arp_addr_cnt != 1) {
2095 				attr->mask = 0;
2096 				continue;
2097 			}
2098 
2099 			attr->val = vif->bss_conf.arp_addr_list[0];
2100 			break;
2101 		case cpu_to_le16(BC_FILTER_MAGIC_MAC):
2102 			attr->val = *(__be32 *)&vif->addr[2];
2103 			break;
2104 		default:
2105 			break;
2106 		}
2107 		attr->reserved1 = 0;
2108 		out_filter->num_attrs++;
2109 	}
2110 }
2111 
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)2112 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
2113 					  struct ieee80211_vif *vif)
2114 {
2115 	struct iwl_bcast_iter_data *data = _data;
2116 	struct iwl_mvm *mvm = data->mvm;
2117 	struct iwl_bcast_filter_cmd *cmd = data->cmd;
2118 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2119 	struct iwl_fw_bcast_mac *bcast_mac;
2120 	int i;
2121 
2122 	if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
2123 		return;
2124 
2125 	bcast_mac = &cmd->macs[mvmvif->id];
2126 
2127 	/*
2128 	 * enable filtering only for associated stations, but not for P2P
2129 	 * Clients
2130 	 */
2131 	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
2132 	    !vif->bss_conf.assoc)
2133 		return;
2134 
2135 	bcast_mac->default_discard = 1;
2136 
2137 	/* copy all configured filters */
2138 	for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
2139 		/*
2140 		 * Make sure we don't exceed our filters limit.
2141 		 * if there is still a valid filter to be configured,
2142 		 * be on the safe side and just allow bcast for this mac.
2143 		 */
2144 		if (WARN_ON_ONCE(data->current_filter >=
2145 				 ARRAY_SIZE(cmd->filters))) {
2146 			bcast_mac->default_discard = 0;
2147 			bcast_mac->attached_filters = 0;
2148 			break;
2149 		}
2150 
2151 		iwl_mvm_set_bcast_filter(vif,
2152 					 &mvm->bcast_filters[i],
2153 					 &cmd->filters[data->current_filter]);
2154 
2155 		/* skip current filter if it contains no attributes */
2156 		if (!cmd->filters[data->current_filter].num_attrs)
2157 			continue;
2158 
2159 		/* attach the filter to current mac */
2160 		bcast_mac->attached_filters |=
2161 				cpu_to_le16(BIT(data->current_filter));
2162 
2163 		data->current_filter++;
2164 	}
2165 }
2166 
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)2167 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
2168 				    struct iwl_bcast_filter_cmd *cmd)
2169 {
2170 	struct iwl_bcast_iter_data iter_data = {
2171 		.mvm = mvm,
2172 		.cmd = cmd,
2173 	};
2174 
2175 	if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
2176 		return false;
2177 
2178 	memset(cmd, 0, sizeof(*cmd));
2179 	cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
2180 	cmd->max_macs = ARRAY_SIZE(cmd->macs);
2181 
2182 #ifdef CONFIG_IWLWIFI_DEBUGFS
2183 	/* use debugfs filters/macs if override is configured */
2184 	if (mvm->dbgfs_bcast_filtering.override) {
2185 		memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
2186 		       sizeof(cmd->filters));
2187 		memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
2188 		       sizeof(cmd->macs));
2189 		return true;
2190 	}
2191 #endif
2192 
2193 	/* if no filters are configured, do nothing */
2194 	if (!mvm->bcast_filters)
2195 		return false;
2196 
2197 	/* configure and attach these filters for each associated sta vif */
2198 	ieee80211_iterate_active_interfaces(
2199 		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
2200 		iwl_mvm_bcast_filter_iterator, &iter_data);
2201 
2202 	return true;
2203 }
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2204 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2205 					  struct ieee80211_vif *vif)
2206 {
2207 	struct iwl_bcast_filter_cmd cmd;
2208 
2209 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
2210 		return 0;
2211 
2212 	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
2213 		return 0;
2214 
2215 	return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
2216 				    sizeof(cmd), &cmd);
2217 }
2218 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm,struct ieee80211_vif * vif)2219 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
2220 						 struct ieee80211_vif *vif)
2221 {
2222 	return 0;
2223 }
2224 #endif
2225 
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2226 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2227 					     struct ieee80211_vif *vif,
2228 					     struct ieee80211_bss_conf *bss_conf,
2229 					     u32 changes)
2230 {
2231 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2232 	int ret;
2233 
2234 	/*
2235 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2236 	 * beacon interval, which was not known when the station interface was
2237 	 * added.
2238 	 */
2239 	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
2240 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2241 
2242 	/*
2243 	 * If we're not associated yet, take the (new) BSSID before associating
2244 	 * so the firmware knows. If we're already associated, then use the old
2245 	 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2246 	 * branch for disassociation below.
2247 	 */
2248 	if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2249 		memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2250 
2251 	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2252 	if (ret)
2253 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2254 
2255 	/* after sending it once, adopt mac80211 data */
2256 	memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2257 	mvmvif->associated = bss_conf->assoc;
2258 
2259 	if (changes & BSS_CHANGED_ASSOC) {
2260 		if (bss_conf->assoc) {
2261 			/* clear statistics to get clean beacon counter */
2262 			iwl_mvm_request_statistics(mvm, true);
2263 			memset(&mvmvif->beacon_stats, 0,
2264 			       sizeof(mvmvif->beacon_stats));
2265 
2266 			/* add quota for this interface */
2267 			ret = iwl_mvm_update_quotas(mvm, true, NULL);
2268 			if (ret) {
2269 				IWL_ERR(mvm, "failed to update quotas\n");
2270 				return;
2271 			}
2272 
2273 			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2274 				     &mvm->status)) {
2275 				/*
2276 				 * If we're restarting then the firmware will
2277 				 * obviously have lost synchronisation with
2278 				 * the AP. It will attempt to synchronise by
2279 				 * itself, but we can make it more reliable by
2280 				 * scheduling a session protection time event.
2281 				 *
2282 				 * The firmware needs to receive a beacon to
2283 				 * catch up with synchronisation, use 110% of
2284 				 * the beacon interval.
2285 				 *
2286 				 * Set a large maximum delay to allow for more
2287 				 * than a single interface.
2288 				 */
2289 				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2290 				iwl_mvm_protect_session(mvm, vif, dur, dur,
2291 							5 * dur, false);
2292 			}
2293 
2294 			iwl_mvm_sf_update(mvm, vif, false);
2295 			iwl_mvm_power_vif_assoc(mvm, vif);
2296 			if (vif->p2p) {
2297 				iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2298 				iwl_mvm_update_smps(mvm, vif,
2299 						    IWL_MVM_SMPS_REQ_PROT,
2300 						    IEEE80211_SMPS_DYNAMIC);
2301 			}
2302 		} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
2303 			/*
2304 			 * If update fails - SF might be running in associated
2305 			 * mode while disassociated - which is forbidden.
2306 			 */
2307 			WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2308 				  "Failed to update SF upon disassociation\n");
2309 
2310 			/* remove AP station now that the MAC is unassoc */
2311 			ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
2312 			if (ret)
2313 				IWL_ERR(mvm, "failed to remove AP station\n");
2314 
2315 			if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2316 				mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
2317 			mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
2318 			/* remove quota for this interface */
2319 			ret = iwl_mvm_update_quotas(mvm, false, NULL);
2320 			if (ret)
2321 				IWL_ERR(mvm, "failed to update quotas\n");
2322 
2323 			if (vif->p2p)
2324 				iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2325 
2326 			/* this will take the cleared BSSID from bss_conf */
2327 			ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2328 			if (ret)
2329 				IWL_ERR(mvm,
2330 					"failed to update MAC %pM (clear after unassoc)\n",
2331 					vif->addr);
2332 		}
2333 
2334 		iwl_mvm_recalc_multicast(mvm);
2335 		iwl_mvm_configure_bcast_filter(mvm, vif);
2336 
2337 		/* reset rssi values */
2338 		mvmvif->bf_data.ave_beacon_signal = 0;
2339 
2340 		iwl_mvm_bt_coex_vif_change(mvm);
2341 		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2342 				    IEEE80211_SMPS_AUTOMATIC);
2343 	} else if (changes & BSS_CHANGED_BEACON_INFO) {
2344 		/*
2345 		 * We received a beacon _after_ association so
2346 		 * remove the session protection.
2347 		 */
2348 		iwl_mvm_remove_time_event(mvm, mvmvif,
2349 					  &mvmvif->time_event_data);
2350 	}
2351 
2352 	if (changes & BSS_CHANGED_BEACON_INFO) {
2353 		iwl_mvm_sf_update(mvm, vif, false);
2354 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2355 	}
2356 
2357 	if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
2358 		ret = iwl_mvm_power_update_mac(mvm);
2359 		if (ret)
2360 			IWL_ERR(mvm, "failed to update power mode\n");
2361 	}
2362 
2363 	if (changes & BSS_CHANGED_TXPOWER) {
2364 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2365 				bss_conf->txpower);
2366 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2367 	}
2368 
2369 	if (changes & BSS_CHANGED_CQM) {
2370 		IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2371 		/* reset cqm events tracking */
2372 		mvmvif->bf_data.last_cqm_event = 0;
2373 		if (mvmvif->bf_data.bf_enabled) {
2374 			ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2375 			if (ret)
2376 				IWL_ERR(mvm,
2377 					"failed to update CQM thresholds\n");
2378 		}
2379 	}
2380 
2381 	if (changes & BSS_CHANGED_ARP_FILTER) {
2382 		IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2383 		iwl_mvm_configure_bcast_filter(mvm, vif);
2384 	}
2385 }
2386 
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2387 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2388 				 struct ieee80211_vif *vif)
2389 {
2390 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2391 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2392 	int ret;
2393 
2394 	/*
2395 	 * iwl_mvm_mac_ctxt_add() might read directly from the device
2396 	 * (the system time), so make sure it is available.
2397 	 */
2398 	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2399 	if (ret)
2400 		return ret;
2401 
2402 	mutex_lock(&mvm->mutex);
2403 
2404 	/* Send the beacon template */
2405 	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2406 	if (ret)
2407 		goto out_unlock;
2408 
2409 	/*
2410 	 * Re-calculate the tsf id, as the master-slave relations depend on the
2411 	 * beacon interval, which was not known when the AP interface was added.
2412 	 */
2413 	if (vif->type == NL80211_IFTYPE_AP)
2414 		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2415 
2416 	mvmvif->ap_assoc_sta_count = 0;
2417 
2418 	/* Add the mac context */
2419 	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2420 	if (ret)
2421 		goto out_unlock;
2422 
2423 	/* Perform the binding */
2424 	ret = iwl_mvm_binding_add_vif(mvm, vif);
2425 	if (ret)
2426 		goto out_remove;
2427 
2428 	/* Send the bcast station. At this stage the TBTT and DTIM time events
2429 	 * are added and applied to the scheduler */
2430 	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2431 	if (ret)
2432 		goto out_unbind;
2433 
2434 	/* must be set before quota calculations */
2435 	mvmvif->ap_ibss_active = true;
2436 
2437 	/* power updated needs to be done before quotas */
2438 	iwl_mvm_power_update_mac(mvm);
2439 
2440 	ret = iwl_mvm_update_quotas(mvm, false, NULL);
2441 	if (ret)
2442 		goto out_quota_failed;
2443 
2444 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2445 	if (vif->p2p && mvm->p2p_device_vif)
2446 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2447 
2448 	iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2449 
2450 	iwl_mvm_bt_coex_vif_change(mvm);
2451 
2452 	/* we don't support TDLS during DCM */
2453 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
2454 		iwl_mvm_teardown_tdls_peers(mvm);
2455 
2456 	goto out_unlock;
2457 
2458 out_quota_failed:
2459 	iwl_mvm_power_update_mac(mvm);
2460 	mvmvif->ap_ibss_active = false;
2461 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2462 out_unbind:
2463 	iwl_mvm_binding_remove_vif(mvm, vif);
2464 out_remove:
2465 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2466 out_unlock:
2467 	mutex_unlock(&mvm->mutex);
2468 	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2469 	return ret;
2470 }
2471 
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2472 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2473 				 struct ieee80211_vif *vif)
2474 {
2475 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2476 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2477 
2478 	iwl_mvm_prepare_mac_removal(mvm, vif);
2479 
2480 	mutex_lock(&mvm->mutex);
2481 
2482 	/* Handle AP stop while in CSA */
2483 	if (rcu_access_pointer(mvm->csa_vif) == vif) {
2484 		iwl_mvm_remove_time_event(mvm, mvmvif,
2485 					  &mvmvif->time_event_data);
2486 		RCU_INIT_POINTER(mvm->csa_vif, NULL);
2487 		mvmvif->csa_countdown = false;
2488 	}
2489 
2490 	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2491 		RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2492 		mvm->csa_tx_block_bcn_timeout = 0;
2493 	}
2494 
2495 	mvmvif->ap_ibss_active = false;
2496 	mvm->ap_last_beacon_gp2 = 0;
2497 
2498 	iwl_mvm_bt_coex_vif_change(mvm);
2499 
2500 	iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2501 
2502 	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2503 	if (vif->p2p && mvm->p2p_device_vif)
2504 		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2505 
2506 	iwl_mvm_update_quotas(mvm, false, NULL);
2507 	iwl_mvm_send_rm_bcast_sta(mvm, vif);
2508 	iwl_mvm_binding_remove_vif(mvm, vif);
2509 
2510 	iwl_mvm_power_update_mac(mvm);
2511 
2512 	iwl_mvm_mac_ctxt_remove(mvm, vif);
2513 
2514 	mutex_unlock(&mvm->mutex);
2515 }
2516 
2517 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2518 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2519 				 struct ieee80211_vif *vif,
2520 				 struct ieee80211_bss_conf *bss_conf,
2521 				 u32 changes)
2522 {
2523 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2524 
2525 	/* Changes will be applied when the AP/IBSS is started */
2526 	if (!mvmvif->ap_ibss_active)
2527 		return;
2528 
2529 	if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2530 		       BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2531 	    iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2532 		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2533 
2534 	/* Need to send a new beacon template to the FW */
2535 	if (changes & BSS_CHANGED_BEACON &&
2536 	    iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2537 		IWL_WARN(mvm, "Failed updating beacon data\n");
2538 
2539 	if (changes & BSS_CHANGED_TXPOWER) {
2540 		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2541 				bss_conf->txpower);
2542 		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2543 	}
2544 
2545 }
2546 
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2547 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2548 				     struct ieee80211_vif *vif,
2549 				     struct ieee80211_bss_conf *bss_conf,
2550 				     u32 changes)
2551 {
2552 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2553 
2554 	/*
2555 	 * iwl_mvm_bss_info_changed_station() might call
2556 	 * iwl_mvm_protect_session(), which reads directly from
2557 	 * the device (the system time), so make sure it is available.
2558 	 */
2559 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2560 		return;
2561 
2562 	mutex_lock(&mvm->mutex);
2563 
2564 	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2565 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2566 
2567 	switch (vif->type) {
2568 	case NL80211_IFTYPE_STATION:
2569 		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2570 		break;
2571 	case NL80211_IFTYPE_AP:
2572 	case NL80211_IFTYPE_ADHOC:
2573 		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2574 		break;
2575 	default:
2576 		/* shouldn't happen */
2577 		WARN_ON_ONCE(1);
2578 	}
2579 
2580 	mutex_unlock(&mvm->mutex);
2581 	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2582 }
2583 
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2584 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2585 			       struct ieee80211_vif *vif,
2586 			       struct ieee80211_scan_request *hw_req)
2587 {
2588 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2589 	int ret;
2590 
2591 	if (hw_req->req.n_channels == 0 ||
2592 	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2593 		return -EINVAL;
2594 
2595 	mutex_lock(&mvm->mutex);
2596 	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2597 	mutex_unlock(&mvm->mutex);
2598 
2599 	return ret;
2600 }
2601 
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2602 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2603 				       struct ieee80211_vif *vif)
2604 {
2605 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2606 
2607 	mutex_lock(&mvm->mutex);
2608 
2609 	/* Due to a race condition, it's possible that mac80211 asks
2610 	 * us to stop a hw_scan when it's already stopped.  This can
2611 	 * happen, for instance, if we stopped the scan ourselves,
2612 	 * called ieee80211_scan_completed() and the userspace called
2613 	 * cancel scan scan before ieee80211_scan_work() could run.
2614 	 * To handle that, simply return if the scan is not running.
2615 	*/
2616 	if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2617 		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2618 
2619 	mutex_unlock(&mvm->mutex);
2620 }
2621 
2622 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2623 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2624 				  struct ieee80211_sta *sta, u16 tids,
2625 				  int num_frames,
2626 				  enum ieee80211_frame_release_type reason,
2627 				  bool more_data)
2628 {
2629 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2630 
2631 	/* Called when we need to transmit (a) frame(s) from mac80211 */
2632 
2633 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2634 					  tids, more_data, false);
2635 }
2636 
2637 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2638 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2639 				    struct ieee80211_sta *sta, u16 tids,
2640 				    int num_frames,
2641 				    enum ieee80211_frame_release_type reason,
2642 				    bool more_data)
2643 {
2644 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2645 
2646 	/* Called when we need to transmit (a) frame(s) from agg queue */
2647 
2648 	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2649 					  tids, more_data, true);
2650 }
2651 
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2652 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2653 				   struct ieee80211_vif *vif,
2654 				   enum sta_notify_cmd cmd,
2655 				   struct ieee80211_sta *sta)
2656 {
2657 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2658 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2659 	unsigned long txqs = 0, tids = 0;
2660 	int tid;
2661 
2662 	spin_lock_bh(&mvmsta->lock);
2663 	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2664 		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2665 
2666 		if (tid_data->state != IWL_AGG_ON &&
2667 		    tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2668 			continue;
2669 
2670 		__set_bit(tid_data->txq_id, &txqs);
2671 
2672 		if (iwl_mvm_tid_queued(tid_data) == 0)
2673 			continue;
2674 
2675 		__set_bit(tid, &tids);
2676 	}
2677 
2678 	switch (cmd) {
2679 	case STA_NOTIFY_SLEEP:
2680 		if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2681 			ieee80211_sta_block_awake(hw, sta, true);
2682 
2683 		for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2684 			ieee80211_sta_set_buffered(sta, tid, true);
2685 
2686 		if (txqs)
2687 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2688 		/*
2689 		 * The fw updates the STA to be asleep. Tx packets on the Tx
2690 		 * queues to this station will not be transmitted. The fw will
2691 		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2692 		 */
2693 		break;
2694 	case STA_NOTIFY_AWAKE:
2695 		if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2696 			break;
2697 
2698 		if (txqs)
2699 			iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2700 		iwl_mvm_sta_modify_ps_wake(mvm, sta);
2701 		break;
2702 	default:
2703 		break;
2704 	}
2705 	spin_unlock_bh(&mvmsta->lock);
2706 }
2707 
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2708 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2709 				       struct ieee80211_vif *vif,
2710 				       struct ieee80211_sta *sta)
2711 {
2712 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2713 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2714 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2715 
2716 	/*
2717 	 * This is called before mac80211 does RCU synchronisation,
2718 	 * so here we already invalidate our internal RCU-protected
2719 	 * station pointer. The rest of the code will thus no longer
2720 	 * be able to find the station this way, and we don't rely
2721 	 * on further RCU synchronisation after the sta_state()
2722 	 * callback deleted the station.
2723 	 */
2724 	mutex_lock(&mvm->mutex);
2725 	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2726 		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2727 				   ERR_PTR(-ENOENT));
2728 
2729 	if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
2730 		mvmvif->ap_assoc_sta_count--;
2731 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2732 	}
2733 
2734 	mutex_unlock(&mvm->mutex);
2735 }
2736 
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2737 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2738 				const u8 *bssid)
2739 {
2740 	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2741 		return;
2742 
2743 	if (iwlwifi_mod_params.uapsd_disable) {
2744 		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2745 		return;
2746 	}
2747 
2748 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2749 }
2750 
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2751 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2752 				 struct ieee80211_vif *vif,
2753 				 struct ieee80211_sta *sta,
2754 				 enum ieee80211_sta_state old_state,
2755 				 enum ieee80211_sta_state new_state)
2756 {
2757 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2758 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2759 	int ret;
2760 
2761 	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2762 			   sta->addr, old_state, new_state);
2763 
2764 	/* this would be a mac80211 bug ... but don't crash */
2765 	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2766 		return -EINVAL;
2767 
2768 	/* if a STA is being removed, reuse its ID */
2769 	flush_work(&mvm->sta_drained_wk);
2770 
2771 	mutex_lock(&mvm->mutex);
2772 	if (old_state == IEEE80211_STA_NOTEXIST &&
2773 	    new_state == IEEE80211_STA_NONE) {
2774 		/*
2775 		 * Firmware bug - it'll crash if the beacon interval is less
2776 		 * than 16. We can't avoid connecting at all, so refuse the
2777 		 * station state change, this will cause mac80211 to abandon
2778 		 * attempts to connect to this AP, and eventually wpa_s will
2779 		 * blacklist the AP...
2780 		 */
2781 		if (vif->type == NL80211_IFTYPE_STATION &&
2782 		    vif->bss_conf.beacon_int < 16) {
2783 			IWL_ERR(mvm,
2784 				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2785 				sta->addr, vif->bss_conf.beacon_int);
2786 			ret = -EINVAL;
2787 			goto out_unlock;
2788 		}
2789 
2790 		if (sta->tdls &&
2791 		    (vif->p2p ||
2792 		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
2793 						IWL_MVM_TDLS_STA_COUNT ||
2794 		     iwl_mvm_phy_ctx_count(mvm) > 1)) {
2795 			IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2796 			ret = -EBUSY;
2797 			goto out_unlock;
2798 		}
2799 
2800 		ret = iwl_mvm_add_sta(mvm, vif, sta);
2801 		if (sta->tdls && ret == 0)
2802 			iwl_mvm_recalc_tdls_state(mvm, vif, true);
2803 	} else if (old_state == IEEE80211_STA_NONE &&
2804 		   new_state == IEEE80211_STA_AUTH) {
2805 		/*
2806 		 * EBS may be disabled due to previous failures reported by FW.
2807 		 * Reset EBS status here assuming environment has been changed.
2808 		 */
2809 		mvm->last_ebs_successful = true;
2810 		iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2811 		ret = 0;
2812 	} else if (old_state == IEEE80211_STA_AUTH &&
2813 		   new_state == IEEE80211_STA_ASSOC) {
2814 		ret = iwl_mvm_update_sta(mvm, vif, sta);
2815 		if (ret == 0)
2816 			iwl_mvm_rs_rate_init(mvm, sta,
2817 					     mvmvif->phy_ctxt->channel->band,
2818 					     true);
2819 	} else if (old_state == IEEE80211_STA_ASSOC &&
2820 		   new_state == IEEE80211_STA_AUTHORIZED) {
2821 
2822 		/* we don't support TDLS during DCM */
2823 		if (iwl_mvm_phy_ctx_count(mvm) > 1)
2824 			iwl_mvm_teardown_tdls_peers(mvm);
2825 
2826 		/* enable beacon filtering */
2827 		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2828 		ret = 0;
2829 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
2830 		   new_state == IEEE80211_STA_ASSOC) {
2831 		/* disable beacon filtering */
2832 		WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2833 		ret = 0;
2834 	} else if (old_state == IEEE80211_STA_ASSOC &&
2835 		   new_state == IEEE80211_STA_AUTH) {
2836 		ret = 0;
2837 	} else if (old_state == IEEE80211_STA_AUTH &&
2838 		   new_state == IEEE80211_STA_NONE) {
2839 		ret = 0;
2840 	} else if (old_state == IEEE80211_STA_NONE &&
2841 		   new_state == IEEE80211_STA_NOTEXIST) {
2842 		ret = iwl_mvm_rm_sta(mvm, vif, sta);
2843 		if (sta->tdls)
2844 			iwl_mvm_recalc_tdls_state(mvm, vif, false);
2845 	} else {
2846 		ret = -EIO;
2847 	}
2848  out_unlock:
2849 	mutex_unlock(&mvm->mutex);
2850 
2851 	if (sta->tdls && ret == 0) {
2852 		if (old_state == IEEE80211_STA_NOTEXIST &&
2853 		    new_state == IEEE80211_STA_NONE)
2854 			ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2855 		else if (old_state == IEEE80211_STA_NONE &&
2856 			 new_state == IEEE80211_STA_NOTEXIST)
2857 			ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2858 	}
2859 
2860 	return ret;
2861 }
2862 
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)2863 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2864 {
2865 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2866 
2867 	mvm->rts_threshold = value;
2868 
2869 	return 0;
2870 }
2871 
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)2872 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2873 				  struct ieee80211_vif *vif,
2874 				  struct ieee80211_sta *sta, u32 changed)
2875 {
2876 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2877 
2878 	if (vif->type == NL80211_IFTYPE_STATION &&
2879 	    changed & IEEE80211_RC_NSS_CHANGED)
2880 		iwl_mvm_sf_update(mvm, vif, false);
2881 }
2882 
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)2883 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2884 			       struct ieee80211_vif *vif, u16 ac,
2885 			       const struct ieee80211_tx_queue_params *params)
2886 {
2887 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2888 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2889 
2890 	mvmvif->queue_params[ac] = *params;
2891 
2892 	/*
2893 	 * No need to update right away, we'll get BSS_CHANGED_QOS
2894 	 * The exception is P2P_DEVICE interface which needs immediate update.
2895 	 */
2896 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2897 		int ret;
2898 
2899 		mutex_lock(&mvm->mutex);
2900 		ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2901 		mutex_unlock(&mvm->mutex);
2902 		return ret;
2903 	}
2904 	return 0;
2905 }
2906 
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2907 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2908 				      struct ieee80211_vif *vif)
2909 {
2910 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2911 	u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
2912 			   200 + vif->bss_conf.beacon_int);
2913 	u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
2914 			       100 + vif->bss_conf.beacon_int);
2915 
2916 	if (WARN_ON_ONCE(vif->bss_conf.assoc))
2917 		return;
2918 
2919 	/*
2920 	 * iwl_mvm_protect_session() reads directly from the device
2921 	 * (the system time), so make sure it is available.
2922 	 */
2923 	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2924 		return;
2925 
2926 	mutex_lock(&mvm->mutex);
2927 	/* Try really hard to protect the session and hear a beacon */
2928 	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2929 	mutex_unlock(&mvm->mutex);
2930 
2931 	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2932 }
2933 
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)2934 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2935 					struct ieee80211_vif *vif,
2936 					struct cfg80211_sched_scan_request *req,
2937 					struct ieee80211_scan_ies *ies)
2938 {
2939 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2940 
2941 	int ret;
2942 
2943 	mutex_lock(&mvm->mutex);
2944 
2945 	if (!vif->bss_conf.idle) {
2946 		ret = -EBUSY;
2947 		goto out;
2948 	}
2949 
2950 	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2951 
2952 out:
2953 	mutex_unlock(&mvm->mutex);
2954 	return ret;
2955 }
2956 
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2957 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2958 				       struct ieee80211_vif *vif)
2959 {
2960 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2961 	int ret;
2962 
2963 	mutex_lock(&mvm->mutex);
2964 
2965 	/* Due to a race condition, it's possible that mac80211 asks
2966 	 * us to stop a sched_scan when it's already stopped.  This
2967 	 * can happen, for instance, if we stopped the scan ourselves,
2968 	 * called ieee80211_sched_scan_stopped() and the userspace called
2969 	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2970 	 * could run.  To handle this, simply return if the scan is
2971 	 * not running.
2972 	*/
2973 	if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2974 		mutex_unlock(&mvm->mutex);
2975 		return 0;
2976 	}
2977 
2978 	ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2979 	mutex_unlock(&mvm->mutex);
2980 	iwl_mvm_wait_for_async_handlers(mvm);
2981 
2982 	return ret;
2983 }
2984 
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)2985 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2986 			       enum set_key_cmd cmd,
2987 			       struct ieee80211_vif *vif,
2988 			       struct ieee80211_sta *sta,
2989 			       struct ieee80211_key_conf *key)
2990 {
2991 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2992 	int ret;
2993 	u8 key_offset;
2994 
2995 	if (iwlwifi_mod_params.sw_crypto) {
2996 		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2997 		return -EOPNOTSUPP;
2998 	}
2999 
3000 	switch (key->cipher) {
3001 	case WLAN_CIPHER_SUITE_TKIP:
3002 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3003 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3004 		break;
3005 	case WLAN_CIPHER_SUITE_CCMP:
3006 		key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3007 		break;
3008 	case WLAN_CIPHER_SUITE_AES_CMAC:
3009 		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
3010 		break;
3011 	case WLAN_CIPHER_SUITE_WEP40:
3012 	case WLAN_CIPHER_SUITE_WEP104:
3013 		/* For non-client mode, only use WEP keys for TX as we probably
3014 		 * don't have a station yet anyway and would then have to keep
3015 		 * track of the keys, linking them to each of the clients/peers
3016 		 * as they appear. For now, don't do that, for performance WEP
3017 		 * offload doesn't really matter much, but we need it for some
3018 		 * other offload features in client mode.
3019 		 */
3020 		if (vif->type != NL80211_IFTYPE_STATION)
3021 			return 0;
3022 		break;
3023 	default:
3024 		/* currently FW supports only one optional cipher scheme */
3025 		if (hw->n_cipher_schemes &&
3026 		    hw->cipher_schemes->cipher == key->cipher)
3027 			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3028 		else
3029 			return -EOPNOTSUPP;
3030 	}
3031 
3032 	mutex_lock(&mvm->mutex);
3033 
3034 	switch (cmd) {
3035 	case SET_KEY:
3036 		if ((vif->type == NL80211_IFTYPE_ADHOC ||
3037 		     vif->type == NL80211_IFTYPE_AP) && !sta) {
3038 			/*
3039 			 * GTK on AP interface is a TX-only key, return 0;
3040 			 * on IBSS they're per-station and because we're lazy
3041 			 * we don't support them for RX, so do the same.
3042 			 */
3043 			ret = 0;
3044 			key->hw_key_idx = STA_KEY_IDX_INVALID;
3045 			break;
3046 		}
3047 
3048 		/* During FW restart, in order to restore the state as it was,
3049 		 * don't try to reprogram keys we previously failed for.
3050 		 */
3051 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3052 		    key->hw_key_idx == STA_KEY_IDX_INVALID) {
3053 			IWL_DEBUG_MAC80211(mvm,
3054 					   "skip invalid idx key programming during restart\n");
3055 			ret = 0;
3056 			break;
3057 		}
3058 
3059 		/* in HW restart reuse the index, otherwise request a new one */
3060 		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3061 			key_offset = key->hw_key_idx;
3062 		else
3063 			key_offset = STA_KEY_IDX_INVALID;
3064 
3065 		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3066 		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3067 		if (ret) {
3068 			IWL_WARN(mvm, "set key failed\n");
3069 			/*
3070 			 * can't add key for RX, but we don't need it
3071 			 * in the device for TX so still return 0
3072 			 */
3073 			key->hw_key_idx = STA_KEY_IDX_INVALID;
3074 			ret = 0;
3075 		}
3076 
3077 		break;
3078 	case DISABLE_KEY:
3079 		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3080 			ret = 0;
3081 			break;
3082 		}
3083 
3084 		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3085 		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3086 		break;
3087 	default:
3088 		ret = -EINVAL;
3089 	}
3090 
3091 	mutex_unlock(&mvm->mutex);
3092 	return ret;
3093 }
3094 
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)3095 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3096 					struct ieee80211_vif *vif,
3097 					struct ieee80211_key_conf *keyconf,
3098 					struct ieee80211_sta *sta,
3099 					u32 iv32, u16 *phase1key)
3100 {
3101 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3102 
3103 	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3104 		return;
3105 
3106 	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3107 }
3108 
3109 
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)3110 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3111 			       struct iwl_rx_packet *pkt, void *data)
3112 {
3113 	struct iwl_mvm *mvm =
3114 		container_of(notif_wait, struct iwl_mvm, notif_wait);
3115 	struct iwl_hs20_roc_res *resp;
3116 	int resp_len = iwl_rx_packet_payload_len(pkt);
3117 	struct iwl_mvm_time_event_data *te_data = data;
3118 
3119 	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3120 		return true;
3121 
3122 	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3123 		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3124 		return true;
3125 	}
3126 
3127 	resp = (void *)pkt->data;
3128 
3129 	IWL_DEBUG_TE(mvm,
3130 		     "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3131 		     resp->status, resp->event_unique_id);
3132 
3133 	te_data->uid = le32_to_cpu(resp->event_unique_id);
3134 	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3135 		     te_data->uid);
3136 
3137 	spin_lock_bh(&mvm->time_event_lock);
3138 	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3139 	spin_unlock_bh(&mvm->time_event_lock);
3140 
3141 	return true;
3142 }
3143 
3144 #define AUX_ROC_MAX_DELAY_ON_CHANNEL 200
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)3145 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3146 				    struct ieee80211_channel *channel,
3147 				    struct ieee80211_vif *vif,
3148 				    int duration)
3149 {
3150 	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3151 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3152 	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3153 	static const u16 time_event_response[] = { HOT_SPOT_CMD };
3154 	struct iwl_notification_wait wait_time_event;
3155 	struct iwl_hs20_roc_req aux_roc_req = {
3156 		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3157 		.id_and_color =
3158 			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3159 		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3160 		/* Set the channel info data */
3161 		.channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
3162 			PHY_BAND_24 : PHY_BAND_5,
3163 		.channel_info.channel = channel->hw_value,
3164 		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
3165 		/* Set the time and duration */
3166 		.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3167 		.apply_time_max_delay =
3168 			cpu_to_le32(MSEC_TO_TU(AUX_ROC_MAX_DELAY_ON_CHANNEL)),
3169 		.duration = cpu_to_le32(MSEC_TO_TU(duration)),
3170 	 };
3171 
3172 	/* Set the node address */
3173 	memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3174 
3175 	lockdep_assert_held(&mvm->mutex);
3176 
3177 	spin_lock_bh(&mvm->time_event_lock);
3178 
3179 	if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3180 		spin_unlock_bh(&mvm->time_event_lock);
3181 		return -EIO;
3182 	}
3183 
3184 	te_data->vif = vif;
3185 	te_data->duration = duration;
3186 	te_data->id = HOT_SPOT_CMD;
3187 
3188 	spin_unlock_bh(&mvm->time_event_lock);
3189 
3190 	/*
3191 	 * Use a notification wait, which really just processes the
3192 	 * command response and doesn't wait for anything, in order
3193 	 * to be able to process the response and get the UID inside
3194 	 * the RX path. Using CMD_WANT_SKB doesn't work because it
3195 	 * stores the buffer and then wakes up this thread, by which
3196 	 * time another notification (that the time event started)
3197 	 * might already be processed unsuccessfully.
3198 	 */
3199 	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3200 				   time_event_response,
3201 				   ARRAY_SIZE(time_event_response),
3202 				   iwl_mvm_rx_aux_roc, te_data);
3203 
3204 	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3205 				   &aux_roc_req);
3206 
3207 	if (res) {
3208 		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3209 		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3210 		goto out_clear_te;
3211 	}
3212 
3213 	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
3214 	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3215 	/* should never fail */
3216 	WARN_ON_ONCE(res);
3217 
3218 	if (res) {
3219  out_clear_te:
3220 		spin_lock_bh(&mvm->time_event_lock);
3221 		iwl_mvm_te_clear_data(mvm, te_data);
3222 		spin_unlock_bh(&mvm->time_event_lock);
3223 	}
3224 
3225 	return res;
3226 }
3227 
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3228 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3229 		       struct ieee80211_vif *vif,
3230 		       struct ieee80211_channel *channel,
3231 		       int duration,
3232 		       enum ieee80211_roc_type type)
3233 {
3234 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3235 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3236 	struct cfg80211_chan_def chandef;
3237 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3238 	int ret, i;
3239 
3240 	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3241 			   duration, type);
3242 
3243 	flush_work(&mvm->roc_done_wk);
3244 
3245 	mutex_lock(&mvm->mutex);
3246 
3247 	switch (vif->type) {
3248 	case NL80211_IFTYPE_STATION:
3249 		if (fw_has_capa(&mvm->fw->ucode_capa,
3250 				IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3251 			/* Use aux roc framework (HS20) */
3252 			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3253 						       vif, duration);
3254 			goto out_unlock;
3255 		}
3256 		IWL_ERR(mvm, "hotspot not supported\n");
3257 		ret = -EINVAL;
3258 		goto out_unlock;
3259 	case NL80211_IFTYPE_P2P_DEVICE:
3260 		/* handle below */
3261 		break;
3262 	default:
3263 		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3264 		ret = -EINVAL;
3265 		goto out_unlock;
3266 	}
3267 
3268 	for (i = 0; i < NUM_PHY_CTX; i++) {
3269 		phy_ctxt = &mvm->phy_ctxts[i];
3270 		if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3271 			continue;
3272 
3273 		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3274 			/*
3275 			 * Unbind the P2P_DEVICE from the current PHY context,
3276 			 * and if the PHY context is not used remove it.
3277 			 */
3278 			ret = iwl_mvm_binding_remove_vif(mvm, vif);
3279 			if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3280 				goto out_unlock;
3281 
3282 			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3283 
3284 			/* Bind the P2P_DEVICE to the current PHY Context */
3285 			mvmvif->phy_ctxt = phy_ctxt;
3286 
3287 			ret = iwl_mvm_binding_add_vif(mvm, vif);
3288 			if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3289 				goto out_unlock;
3290 
3291 			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3292 			goto schedule_time_event;
3293 		}
3294 	}
3295 
3296 	/* Need to update the PHY context only if the ROC channel changed */
3297 	if (channel == mvmvif->phy_ctxt->channel)
3298 		goto schedule_time_event;
3299 
3300 	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3301 
3302 	/*
3303 	 * Change the PHY context configuration as it is currently referenced
3304 	 * only by the P2P Device MAC
3305 	 */
3306 	if (mvmvif->phy_ctxt->ref == 1) {
3307 		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3308 					       &chandef, 1, 1);
3309 		if (ret)
3310 			goto out_unlock;
3311 	} else {
3312 		/*
3313 		 * The PHY context is shared with other MACs. Need to remove the
3314 		 * P2P Device from the binding, allocate an new PHY context and
3315 		 * create a new binding
3316 		 */
3317 		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3318 		if (!phy_ctxt) {
3319 			ret = -ENOSPC;
3320 			goto out_unlock;
3321 		}
3322 
3323 		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3324 					       1, 1);
3325 		if (ret) {
3326 			IWL_ERR(mvm, "Failed to change PHY context\n");
3327 			goto out_unlock;
3328 		}
3329 
3330 		/* Unbind the P2P_DEVICE from the current PHY context */
3331 		ret = iwl_mvm_binding_remove_vif(mvm, vif);
3332 		if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3333 			goto out_unlock;
3334 
3335 		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3336 
3337 		/* Bind the P2P_DEVICE to the new allocated PHY context */
3338 		mvmvif->phy_ctxt = phy_ctxt;
3339 
3340 		ret = iwl_mvm_binding_add_vif(mvm, vif);
3341 		if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3342 			goto out_unlock;
3343 
3344 		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3345 	}
3346 
3347 schedule_time_event:
3348 	/* Schedule the time events */
3349 	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3350 
3351 out_unlock:
3352 	mutex_unlock(&mvm->mutex);
3353 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3354 	return ret;
3355 }
3356 
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3357 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3358 {
3359 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3360 
3361 	IWL_DEBUG_MAC80211(mvm, "enter\n");
3362 
3363 	mutex_lock(&mvm->mutex);
3364 	iwl_mvm_stop_roc(mvm);
3365 	mutex_unlock(&mvm->mutex);
3366 
3367 	IWL_DEBUG_MAC80211(mvm, "leave\n");
3368 	return 0;
3369 }
3370 
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3371 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3372 				 struct ieee80211_chanctx_conf *ctx)
3373 {
3374 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3375 	struct iwl_mvm_phy_ctxt *phy_ctxt;
3376 	int ret;
3377 
3378 	lockdep_assert_held(&mvm->mutex);
3379 
3380 	IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3381 
3382 	phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3383 	if (!phy_ctxt) {
3384 		ret = -ENOSPC;
3385 		goto out;
3386 	}
3387 
3388 	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3389 				       ctx->rx_chains_static,
3390 				       ctx->rx_chains_dynamic);
3391 	if (ret) {
3392 		IWL_ERR(mvm, "Failed to add PHY context\n");
3393 		goto out;
3394 	}
3395 
3396 	iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3397 	*phy_ctxt_id = phy_ctxt->id;
3398 out:
3399 	return ret;
3400 }
3401 
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3402 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3403 			       struct ieee80211_chanctx_conf *ctx)
3404 {
3405 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3406 	int ret;
3407 
3408 	mutex_lock(&mvm->mutex);
3409 	ret = __iwl_mvm_add_chanctx(mvm, ctx);
3410 	mutex_unlock(&mvm->mutex);
3411 
3412 	return ret;
3413 }
3414 
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3415 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3416 				     struct ieee80211_chanctx_conf *ctx)
3417 {
3418 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3419 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3420 
3421 	lockdep_assert_held(&mvm->mutex);
3422 
3423 	iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3424 }
3425 
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3426 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3427 				   struct ieee80211_chanctx_conf *ctx)
3428 {
3429 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3430 
3431 	mutex_lock(&mvm->mutex);
3432 	__iwl_mvm_remove_chanctx(mvm, ctx);
3433 	mutex_unlock(&mvm->mutex);
3434 }
3435 
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3436 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3437 				   struct ieee80211_chanctx_conf *ctx,
3438 				   u32 changed)
3439 {
3440 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3441 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3442 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3443 
3444 	if (WARN_ONCE((phy_ctxt->ref > 1) &&
3445 		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3446 				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3447 				   IEEE80211_CHANCTX_CHANGE_RADAR |
3448 				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3449 		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
3450 		      phy_ctxt->ref, changed))
3451 		return;
3452 
3453 	mutex_lock(&mvm->mutex);
3454 	iwl_mvm_bt_coex_vif_change(mvm);
3455 	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3456 				 ctx->rx_chains_static,
3457 				 ctx->rx_chains_dynamic);
3458 	mutex_unlock(&mvm->mutex);
3459 }
3460 
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3461 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3462 					struct ieee80211_vif *vif,
3463 					struct ieee80211_chanctx_conf *ctx,
3464 					bool switching_chanctx)
3465 {
3466 	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3467 	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3468 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3469 	int ret;
3470 
3471 	lockdep_assert_held(&mvm->mutex);
3472 
3473 	mvmvif->phy_ctxt = phy_ctxt;
3474 
3475 	switch (vif->type) {
3476 	case NL80211_IFTYPE_AP:
3477 		/* only needed if we're switching chanctx (i.e. during CSA) */
3478 		if (switching_chanctx) {
3479 			mvmvif->ap_ibss_active = true;
3480 			break;
3481 		}
3482 	case NL80211_IFTYPE_ADHOC:
3483 		/*
3484 		 * The AP binding flow is handled as part of the start_ap flow
3485 		 * (in bss_info_changed), similarly for IBSS.
3486 		 */
3487 		ret = 0;
3488 		goto out;
3489 	case NL80211_IFTYPE_STATION:
3490 		break;
3491 	case NL80211_IFTYPE_MONITOR:
3492 		/* always disable PS when a monitor interface is active */
3493 		mvmvif->ps_disabled = true;
3494 		break;
3495 	default:
3496 		ret = -EINVAL;
3497 		goto out;
3498 	}
3499 
3500 	ret = iwl_mvm_binding_add_vif(mvm, vif);
3501 	if (ret)
3502 		goto out;
3503 
3504 	/*
3505 	 * Power state must be updated before quotas,
3506 	 * otherwise fw will complain.
3507 	 */
3508 	iwl_mvm_power_update_mac(mvm);
3509 
3510 	/* Setting the quota at this stage is only required for monitor
3511 	 * interfaces. For the other types, the bss_info changed flow
3512 	 * will handle quota settings.
3513 	 */
3514 	if (vif->type == NL80211_IFTYPE_MONITOR) {
3515 		mvmvif->monitor_active = true;
3516 		ret = iwl_mvm_update_quotas(mvm, false, NULL);
3517 		if (ret)
3518 			goto out_remove_binding;
3519 	}
3520 
3521 	/* Handle binding during CSA */
3522 	if (vif->type == NL80211_IFTYPE_AP) {
3523 		iwl_mvm_update_quotas(mvm, false, NULL);
3524 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3525 	}
3526 
3527 	if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3528 		u32 duration = 2 * vif->bss_conf.beacon_int;
3529 
3530 		/* iwl_mvm_protect_session() reads directly from the
3531 		 * device (the system time), so make sure it is
3532 		 * available.
3533 		 */
3534 		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3535 		if (ret)
3536 			goto out_remove_binding;
3537 
3538 		/* Protect the session to make sure we hear the first
3539 		 * beacon on the new channel.
3540 		 */
3541 		iwl_mvm_protect_session(mvm, vif, duration, duration,
3542 					vif->bss_conf.beacon_int / 2,
3543 					true);
3544 
3545 		iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3546 
3547 		iwl_mvm_update_quotas(mvm, false, NULL);
3548 	}
3549 
3550 	goto out;
3551 
3552 out_remove_binding:
3553 	iwl_mvm_binding_remove_vif(mvm, vif);
3554 	iwl_mvm_power_update_mac(mvm);
3555 out:
3556 	if (ret)
3557 		mvmvif->phy_ctxt = NULL;
3558 	return ret;
3559 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3560 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3561 				      struct ieee80211_vif *vif,
3562 				      struct ieee80211_chanctx_conf *ctx)
3563 {
3564 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3565 	int ret;
3566 
3567 	mutex_lock(&mvm->mutex);
3568 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3569 	mutex_unlock(&mvm->mutex);
3570 
3571 	return ret;
3572 }
3573 
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3574 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3575 					   struct ieee80211_vif *vif,
3576 					   struct ieee80211_chanctx_conf *ctx,
3577 					   bool switching_chanctx)
3578 {
3579 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3580 	struct ieee80211_vif *disabled_vif = NULL;
3581 
3582 	lockdep_assert_held(&mvm->mutex);
3583 
3584 	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3585 
3586 	switch (vif->type) {
3587 	case NL80211_IFTYPE_ADHOC:
3588 		goto out;
3589 	case NL80211_IFTYPE_MONITOR:
3590 		mvmvif->monitor_active = false;
3591 		mvmvif->ps_disabled = false;
3592 		break;
3593 	case NL80211_IFTYPE_AP:
3594 		/* This part is triggered only during CSA */
3595 		if (!switching_chanctx || !mvmvif->ap_ibss_active)
3596 			goto out;
3597 
3598 		mvmvif->csa_countdown = false;
3599 
3600 		/* Set CS bit on all the stations */
3601 		iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3602 
3603 		/* Save blocked iface, the timeout is set on the next beacon */
3604 		rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3605 
3606 		mvmvif->ap_ibss_active = false;
3607 		break;
3608 	case NL80211_IFTYPE_STATION:
3609 		if (!switching_chanctx)
3610 			break;
3611 
3612 		disabled_vif = vif;
3613 
3614 		iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3615 		break;
3616 	default:
3617 		break;
3618 	}
3619 
3620 	iwl_mvm_update_quotas(mvm, false, disabled_vif);
3621 	iwl_mvm_binding_remove_vif(mvm, vif);
3622 
3623 out:
3624 	mvmvif->phy_ctxt = NULL;
3625 	iwl_mvm_power_update_mac(mvm);
3626 }
3627 
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3628 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3629 					 struct ieee80211_vif *vif,
3630 					 struct ieee80211_chanctx_conf *ctx)
3631 {
3632 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3633 
3634 	mutex_lock(&mvm->mutex);
3635 	__iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3636 	mutex_unlock(&mvm->mutex);
3637 }
3638 
3639 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3640 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3641 				struct ieee80211_vif_chanctx_switch *vifs)
3642 {
3643 	int ret;
3644 
3645 	mutex_lock(&mvm->mutex);
3646 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3647 	__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3648 
3649 	ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3650 	if (ret) {
3651 		IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3652 		goto out_reassign;
3653 	}
3654 
3655 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3656 					   true);
3657 	if (ret) {
3658 		IWL_ERR(mvm,
3659 			"failed to assign new_ctx during channel switch\n");
3660 		goto out_remove;
3661 	}
3662 
3663 	/* we don't support TDLS during DCM - can be caused by channel switch */
3664 	if (iwl_mvm_phy_ctx_count(mvm) > 1)
3665 		iwl_mvm_teardown_tdls_peers(mvm);
3666 
3667 	goto out;
3668 
3669 out_remove:
3670 	__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3671 
3672 out_reassign:
3673 	if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3674 		IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3675 		goto out_restart;
3676 	}
3677 
3678 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3679 					 true)) {
3680 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3681 		goto out_restart;
3682 	}
3683 
3684 	goto out;
3685 
3686 out_restart:
3687 	/* things keep failing, better restart the hw */
3688 	iwl_mvm_nic_restart(mvm, false);
3689 
3690 out:
3691 	mutex_unlock(&mvm->mutex);
3692 
3693 	return ret;
3694 }
3695 
3696 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3697 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3698 				    struct ieee80211_vif_chanctx_switch *vifs)
3699 {
3700 	int ret;
3701 
3702 	mutex_lock(&mvm->mutex);
3703 	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3704 
3705 	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3706 					   true);
3707 	if (ret) {
3708 		IWL_ERR(mvm,
3709 			"failed to assign new_ctx during channel switch\n");
3710 		goto out_reassign;
3711 	}
3712 
3713 	goto out;
3714 
3715 out_reassign:
3716 	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3717 					 true)) {
3718 		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3719 		goto out_restart;
3720 	}
3721 
3722 	goto out;
3723 
3724 out_restart:
3725 	/* things keep failing, better restart the hw */
3726 	iwl_mvm_nic_restart(mvm, false);
3727 
3728 out:
3729 	mutex_unlock(&mvm->mutex);
3730 
3731 	return ret;
3732 }
3733 
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)3734 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3735 				      struct ieee80211_vif_chanctx_switch *vifs,
3736 				      int n_vifs,
3737 				      enum ieee80211_chanctx_switch_mode mode)
3738 {
3739 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3740 	int ret;
3741 
3742 	/* we only support a single-vif right now */
3743 	if (n_vifs > 1)
3744 		return -EOPNOTSUPP;
3745 
3746 	switch (mode) {
3747 	case CHANCTX_SWMODE_SWAP_CONTEXTS:
3748 		ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3749 		break;
3750 	case CHANCTX_SWMODE_REASSIGN_VIF:
3751 		ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3752 		break;
3753 	default:
3754 		ret = -EOPNOTSUPP;
3755 		break;
3756 	}
3757 
3758 	return ret;
3759 }
3760 
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)3761 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3762 			   struct ieee80211_sta *sta,
3763 			   bool set)
3764 {
3765 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3766 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3767 
3768 	if (!mvm_sta || !mvm_sta->vif) {
3769 		IWL_ERR(mvm, "Station is not associated to a vif\n");
3770 		return -EINVAL;
3771 	}
3772 
3773 	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3774 }
3775 
3776 #ifdef CONFIG_NL80211_TESTMODE
3777 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3778 	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3779 	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3780 	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3781 };
3782 
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)3783 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3784 				      struct ieee80211_vif *vif,
3785 				      void *data, int len)
3786 {
3787 	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3788 	int err;
3789 	u32 noa_duration;
3790 
3791 	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3792 	if (err)
3793 		return err;
3794 
3795 	if (!tb[IWL_MVM_TM_ATTR_CMD])
3796 		return -EINVAL;
3797 
3798 	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3799 	case IWL_MVM_TM_CMD_SET_NOA:
3800 		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3801 		    !vif->bss_conf.enable_beacon ||
3802 		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3803 			return -EINVAL;
3804 
3805 		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3806 		if (noa_duration >= vif->bss_conf.beacon_int)
3807 			return -EINVAL;
3808 
3809 		mvm->noa_duration = noa_duration;
3810 		mvm->noa_vif = vif;
3811 
3812 		return iwl_mvm_update_quotas(mvm, false, NULL);
3813 	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3814 		/* must be associated client vif - ignore authorized */
3815 		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3816 		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3817 		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3818 			return -EINVAL;
3819 
3820 		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3821 			return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3822 		return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3823 	}
3824 
3825 	return -EOPNOTSUPP;
3826 }
3827 
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)3828 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3829 				    struct ieee80211_vif *vif,
3830 				    void *data, int len)
3831 {
3832 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3833 	int err;
3834 
3835 	mutex_lock(&mvm->mutex);
3836 	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3837 	mutex_unlock(&mvm->mutex);
3838 
3839 	return err;
3840 }
3841 #endif
3842 
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3843 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3844 				   struct ieee80211_vif *vif,
3845 				   struct ieee80211_channel_switch *chsw)
3846 {
3847 	/* By implementing this operation, we prevent mac80211 from
3848 	 * starting its own channel switch timer, so that we can call
3849 	 * ieee80211_chswitch_done() ourselves at the right time
3850 	 * (which is when the absence time event starts).
3851 	 */
3852 
3853 	IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3854 			   "dummy channel switch op\n");
3855 }
3856 
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3857 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3858 				      struct ieee80211_vif *vif,
3859 				      struct ieee80211_channel_switch *chsw)
3860 {
3861 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3862 	struct ieee80211_vif *csa_vif;
3863 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3864 	u32 apply_time;
3865 	int ret;
3866 
3867 	mutex_lock(&mvm->mutex);
3868 
3869 	mvmvif->csa_failed = false;
3870 
3871 	IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3872 			   chsw->chandef.center_freq1);
3873 
3874 	iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3875 
3876 	switch (vif->type) {
3877 	case NL80211_IFTYPE_AP:
3878 		csa_vif =
3879 			rcu_dereference_protected(mvm->csa_vif,
3880 						  lockdep_is_held(&mvm->mutex));
3881 		if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3882 			      "Another CSA is already in progress")) {
3883 			ret = -EBUSY;
3884 			goto out_unlock;
3885 		}
3886 
3887 		rcu_assign_pointer(mvm->csa_vif, vif);
3888 
3889 		if (WARN_ONCE(mvmvif->csa_countdown,
3890 			      "Previous CSA countdown didn't complete")) {
3891 			ret = -EBUSY;
3892 			goto out_unlock;
3893 		}
3894 
3895 		break;
3896 	case NL80211_IFTYPE_STATION:
3897 		/* Schedule the time event to a bit before beacon 1,
3898 		 * to make sure we're in the new channel when the
3899 		 * GO/AP arrives.
3900 		 */
3901 		apply_time = chsw->device_timestamp +
3902 			((vif->bss_conf.beacon_int * (chsw->count - 1) -
3903 			  IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3904 
3905 		if (chsw->block_tx)
3906 			iwl_mvm_csa_client_absent(mvm, vif);
3907 
3908 		iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3909 					    apply_time);
3910 		if (mvmvif->bf_data.bf_enabled) {
3911 			ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3912 			if (ret)
3913 				goto out_unlock;
3914 		}
3915 
3916 		break;
3917 	default:
3918 		break;
3919 	}
3920 
3921 	mvmvif->ps_disabled = true;
3922 
3923 	ret = iwl_mvm_power_update_ps(mvm);
3924 	if (ret)
3925 		goto out_unlock;
3926 
3927 	/* we won't be on this channel any longer */
3928 	iwl_mvm_teardown_tdls_peers(mvm);
3929 
3930 out_unlock:
3931 	mutex_unlock(&mvm->mutex);
3932 
3933 	return ret;
3934 }
3935 
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3936 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3937 				       struct ieee80211_vif *vif)
3938 {
3939 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3940 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3941 	int ret;
3942 
3943 	mutex_lock(&mvm->mutex);
3944 
3945 	if (mvmvif->csa_failed) {
3946 		mvmvif->csa_failed = false;
3947 		ret = -EIO;
3948 		goto out_unlock;
3949 	}
3950 
3951 	if (vif->type == NL80211_IFTYPE_STATION) {
3952 		struct iwl_mvm_sta *mvmsta;
3953 
3954 		mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3955 							  mvmvif->ap_sta_id);
3956 
3957 		if (WARN_ON(!mvmsta)) {
3958 			ret = -EIO;
3959 			goto out_unlock;
3960 		}
3961 
3962 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3963 
3964 		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3965 
3966 		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3967 		if (ret)
3968 			goto out_unlock;
3969 
3970 		iwl_mvm_stop_session_protection(mvm, vif);
3971 	}
3972 
3973 	mvmvif->ps_disabled = false;
3974 
3975 	ret = iwl_mvm_power_update_ps(mvm);
3976 
3977 out_unlock:
3978 	mutex_unlock(&mvm->mutex);
3979 
3980 	return ret;
3981 }
3982 
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)3983 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3984 			      struct ieee80211_vif *vif, u32 queues, bool drop)
3985 {
3986 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3987 	struct iwl_mvm_vif *mvmvif;
3988 	struct iwl_mvm_sta *mvmsta;
3989 	struct ieee80211_sta *sta;
3990 	int i;
3991 	u32 msk = 0;
3992 
3993 	if (!vif || vif->type != NL80211_IFTYPE_STATION)
3994 		return;
3995 
3996 	mutex_lock(&mvm->mutex);
3997 	mvmvif = iwl_mvm_vif_from_mac80211(vif);
3998 
3999 	/* flush the AP-station and all TDLS peers */
4000 	for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
4001 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4002 						lockdep_is_held(&mvm->mutex));
4003 		if (IS_ERR_OR_NULL(sta))
4004 			continue;
4005 
4006 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
4007 		if (mvmsta->vif != vif)
4008 			continue;
4009 
4010 		/* make sure only TDLS peers or the AP are flushed */
4011 		WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
4012 
4013 		msk |= mvmsta->tfd_queue_msk;
4014 	}
4015 
4016 	if (drop) {
4017 		if (iwl_mvm_flush_tx_path(mvm, msk, 0))
4018 			IWL_ERR(mvm, "flush request fail\n");
4019 		mutex_unlock(&mvm->mutex);
4020 	} else {
4021 		mutex_unlock(&mvm->mutex);
4022 
4023 		/* this can take a while, and we may need/want other operations
4024 		 * to succeed while doing this, so do it without the mutex held
4025 		 */
4026 		iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
4027 	}
4028 }
4029 
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4030 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
4031 				  struct survey_info *survey)
4032 {
4033 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4034 	int ret;
4035 
4036 	memset(survey, 0, sizeof(*survey));
4037 
4038 	/* only support global statistics right now */
4039 	if (idx != 0)
4040 		return -ENOENT;
4041 
4042 	if (!fw_has_capa(&mvm->fw->ucode_capa,
4043 			 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4044 		return -ENOENT;
4045 
4046 	mutex_lock(&mvm->mutex);
4047 
4048 	if (mvm->ucode_loaded) {
4049 		ret = iwl_mvm_request_statistics(mvm, false);
4050 		if (ret)
4051 			goto out;
4052 	}
4053 
4054 	survey->filled = SURVEY_INFO_TIME |
4055 			 SURVEY_INFO_TIME_RX |
4056 			 SURVEY_INFO_TIME_TX |
4057 			 SURVEY_INFO_TIME_SCAN;
4058 	survey->time = mvm->accu_radio_stats.on_time_rf +
4059 		       mvm->radio_stats.on_time_rf;
4060 	do_div(survey->time, USEC_PER_MSEC);
4061 
4062 	survey->time_rx = mvm->accu_radio_stats.rx_time +
4063 			  mvm->radio_stats.rx_time;
4064 	do_div(survey->time_rx, USEC_PER_MSEC);
4065 
4066 	survey->time_tx = mvm->accu_radio_stats.tx_time +
4067 			  mvm->radio_stats.tx_time;
4068 	do_div(survey->time_tx, USEC_PER_MSEC);
4069 
4070 	survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4071 			    mvm->radio_stats.on_time_scan;
4072 	do_div(survey->time_scan, USEC_PER_MSEC);
4073 
4074 	ret = 0;
4075  out:
4076 	mutex_unlock(&mvm->mutex);
4077 	return ret;
4078 }
4079 
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)4080 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4081 				       struct ieee80211_vif *vif,
4082 				       struct ieee80211_sta *sta,
4083 				       struct station_info *sinfo)
4084 {
4085 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4086 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4087 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4088 
4089 	/* if beacon filtering isn't on mac80211 does it anyway */
4090 	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4091 		return;
4092 
4093 	if (!vif->bss_conf.assoc)
4094 		return;
4095 
4096 	mutex_lock(&mvm->mutex);
4097 
4098 	if (mvmvif->ap_sta_id != mvmsta->sta_id)
4099 		goto unlock;
4100 
4101 	if (iwl_mvm_request_statistics(mvm, false))
4102 		goto unlock;
4103 
4104 	sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4105 			   mvmvif->beacon_stats.accu_num_beacons;
4106 	sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
4107 	if (mvmvif->beacon_stats.avg_signal) {
4108 		/* firmware only reports a value after RXing a few beacons */
4109 		sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4110 		sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4111 	}
4112  unlock:
4113 	mutex_unlock(&mvm->mutex);
4114 }
4115 
iwl_mvm_event_mlme_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4116 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4117 					struct ieee80211_vif *vif,
4118 					const struct ieee80211_event *event)
4119 {
4120 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...)	\
4121 	do {							\
4122 		if ((_cnt) && --(_cnt))				\
4123 			break;					\
4124 		iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4125 	} while (0)
4126 
4127 	struct iwl_fw_dbg_trigger_tlv *trig;
4128 	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4129 
4130 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4131 		return;
4132 
4133 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4134 	trig_mlme = (void *)trig->data;
4135 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4136 		return;
4137 
4138 	if (event->u.mlme.data == ASSOC_EVENT) {
4139 		if (event->u.mlme.status == MLME_DENIED)
4140 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4141 					   trig_mlme->stop_assoc_denied,
4142 					   "DENIED ASSOC: reason %d",
4143 					    event->u.mlme.reason);
4144 		else if (event->u.mlme.status == MLME_TIMEOUT)
4145 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4146 					   trig_mlme->stop_assoc_timeout,
4147 					   "ASSOC TIMEOUT");
4148 	} else if (event->u.mlme.data == AUTH_EVENT) {
4149 		if (event->u.mlme.status == MLME_DENIED)
4150 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4151 					   trig_mlme->stop_auth_denied,
4152 					   "DENIED AUTH: reason %d",
4153 					   event->u.mlme.reason);
4154 		else if (event->u.mlme.status == MLME_TIMEOUT)
4155 			CHECK_MLME_TRIGGER(mvm, trig, buf,
4156 					   trig_mlme->stop_auth_timeout,
4157 					   "AUTH TIMEOUT");
4158 	} else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4159 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4160 				   trig_mlme->stop_rx_deauth,
4161 				   "DEAUTH RX %d", event->u.mlme.reason);
4162 	} else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4163 		CHECK_MLME_TRIGGER(mvm, trig, buf,
4164 				   trig_mlme->stop_tx_deauth,
4165 				   "DEAUTH TX %d", event->u.mlme.reason);
4166 	}
4167 #undef CHECK_MLME_TRIGGER
4168 }
4169 
iwl_mvm_event_bar_rx_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4170 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4171 					  struct ieee80211_vif *vif,
4172 					  const struct ieee80211_event *event)
4173 {
4174 	struct iwl_fw_dbg_trigger_tlv *trig;
4175 	struct iwl_fw_dbg_trigger_ba *ba_trig;
4176 
4177 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4178 		return;
4179 
4180 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4181 	ba_trig = (void *)trig->data;
4182 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4183 		return;
4184 
4185 	if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4186 		return;
4187 
4188 	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4189 				    "BAR received from %pM, tid %d, ssn %d",
4190 				    event->u.ba.sta->addr, event->u.ba.tid,
4191 				    event->u.ba.ssn);
4192 }
4193 
4194 static void
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4195 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4196 				     struct ieee80211_vif *vif,
4197 				     const struct ieee80211_event *event)
4198 {
4199 	struct iwl_fw_dbg_trigger_tlv *trig;
4200 	struct iwl_fw_dbg_trigger_ba *ba_trig;
4201 
4202 	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4203 		return;
4204 
4205 	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4206 	ba_trig = (void *)trig->data;
4207 	if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4208 		return;
4209 
4210 	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4211 		return;
4212 
4213 	iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4214 				    "Frame from %pM timed out, tid %d",
4215 				    event->u.ba.sta->addr, event->u.ba.tid);
4216 }
4217 
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)4218 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4219 				       struct ieee80211_vif *vif,
4220 				       const struct ieee80211_event *event)
4221 {
4222 	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4223 
4224 	switch (event->type) {
4225 	case MLME_EVENT:
4226 		iwl_mvm_event_mlme_callback(mvm, vif, event);
4227 		break;
4228 	case BAR_RX_EVENT:
4229 		iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4230 		break;
4231 	case BA_FRAME_TIMEOUT:
4232 		iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4233 		break;
4234 	default:
4235 		break;
4236 	}
4237 }
4238 
4239 const struct ieee80211_ops iwl_mvm_hw_ops = {
4240 	.tx = iwl_mvm_mac_tx,
4241 	.ampdu_action = iwl_mvm_mac_ampdu_action,
4242 	.start = iwl_mvm_mac_start,
4243 	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
4244 	.stop = iwl_mvm_mac_stop,
4245 	.add_interface = iwl_mvm_mac_add_interface,
4246 	.remove_interface = iwl_mvm_mac_remove_interface,
4247 	.config = iwl_mvm_mac_config,
4248 	.prepare_multicast = iwl_mvm_prepare_multicast,
4249 	.configure_filter = iwl_mvm_configure_filter,
4250 	.config_iface_filter = iwl_mvm_config_iface_filter,
4251 	.bss_info_changed = iwl_mvm_bss_info_changed,
4252 	.hw_scan = iwl_mvm_mac_hw_scan,
4253 	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4254 	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4255 	.sta_state = iwl_mvm_mac_sta_state,
4256 	.sta_notify = iwl_mvm_mac_sta_notify,
4257 	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4258 	.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4259 	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4260 	.sta_rc_update = iwl_mvm_sta_rc_update,
4261 	.conf_tx = iwl_mvm_mac_conf_tx,
4262 	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4263 	.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4264 	.flush = iwl_mvm_mac_flush,
4265 	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
4266 	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4267 	.set_key = iwl_mvm_mac_set_key,
4268 	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
4269 	.remain_on_channel = iwl_mvm_roc,
4270 	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
4271 	.add_chanctx = iwl_mvm_add_chanctx,
4272 	.remove_chanctx = iwl_mvm_remove_chanctx,
4273 	.change_chanctx = iwl_mvm_change_chanctx,
4274 	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4275 	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4276 	.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4277 
4278 	.start_ap = iwl_mvm_start_ap_ibss,
4279 	.stop_ap = iwl_mvm_stop_ap_ibss,
4280 	.join_ibss = iwl_mvm_start_ap_ibss,
4281 	.leave_ibss = iwl_mvm_stop_ap_ibss,
4282 
4283 	.set_tim = iwl_mvm_set_tim,
4284 
4285 	.channel_switch = iwl_mvm_channel_switch,
4286 	.pre_channel_switch = iwl_mvm_pre_channel_switch,
4287 	.post_channel_switch = iwl_mvm_post_channel_switch,
4288 
4289 	.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4290 	.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4291 	.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4292 
4293 	.event_callback = iwl_mvm_mac_event_callback,
4294 
4295 	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4296 
4297 #ifdef CONFIG_PM_SLEEP
4298 	/* look at d3.c */
4299 	.suspend = iwl_mvm_suspend,
4300 	.resume = iwl_mvm_resume,
4301 	.set_wakeup = iwl_mvm_set_wakeup,
4302 	.set_rekey_data = iwl_mvm_set_rekey_data,
4303 #if IS_ENABLED(CONFIG_IPV6)
4304 	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4305 #endif
4306 	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4307 #endif
4308 	.get_survey = iwl_mvm_mac_get_survey,
4309 	.sta_statistics = iwl_mvm_mac_sta_statistics,
4310 };
4311