1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24 * USA
25 *
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
28 *
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32 *
33 * BSD LICENSE
34 *
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
37 * All rights reserved.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 *
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
48 * distribution.
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *
65 *****************************************************************************/
66 #include <linux/kernel.h>
67 #include <linux/slab.h>
68 #include <linux/skbuff.h>
69 #include <linux/netdevice.h>
70 #include <linux/etherdevice.h>
71 #include <linux/ip.h>
72 #include <linux/if_arp.h>
73 #include <linux/time.h>
74 #include <net/mac80211.h>
75 #include <net/ieee80211_radiotap.h>
76 #include <net/tcp.h>
77
78 #include "iwl-op-mode.h"
79 #include "iwl-io.h"
80 #include "mvm.h"
81 #include "sta.h"
82 #include "time-event.h"
83 #include "iwl-eeprom-parse.h"
84 #include "iwl-phy-db.h"
85 #include "testmode.h"
86 #include "iwl-fw-error-dump.h"
87 #include "iwl-prph.h"
88 #include "iwl-nvm-parse.h"
89 #include "fw-dbg.h"
90
91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
92 {
93 .max = 1,
94 .types = BIT(NL80211_IFTYPE_STATION),
95 },
96 {
97 .max = 1,
98 .types = BIT(NL80211_IFTYPE_AP) |
99 BIT(NL80211_IFTYPE_P2P_CLIENT) |
100 BIT(NL80211_IFTYPE_P2P_GO),
101 },
102 {
103 .max = 1,
104 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
105 },
106 };
107
108 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
109 {
110 .num_different_channels = 2,
111 .max_interfaces = 3,
112 .limits = iwl_mvm_limits,
113 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
114 },
115 };
116
117 #ifdef CONFIG_PM_SLEEP
118 static const struct nl80211_wowlan_tcp_data_token_feature
119 iwl_mvm_wowlan_tcp_token_feature = {
120 .min_len = 0,
121 .max_len = 255,
122 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
123 };
124
125 static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
126 .tok = &iwl_mvm_wowlan_tcp_token_feature,
127 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
128 sizeof(struct ethhdr) -
129 sizeof(struct iphdr) -
130 sizeof(struct tcphdr),
131 .data_interval_max = 65535, /* __le16 in API */
132 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
133 sizeof(struct ethhdr) -
134 sizeof(struct iphdr) -
135 sizeof(struct tcphdr),
136 .seq = true,
137 };
138 #endif
139
140 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
141 /*
142 * Use the reserved field to indicate magic values.
143 * these values will only be used internally by the driver,
144 * and won't make it to the fw (reserved will be 0).
145 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
146 * be the vif's ip address. in case there is not a single
147 * ip address (0, or more than 1), this attribute will
148 * be skipped.
149 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
150 * the LSB bytes of the vif's mac address
151 */
152 enum {
153 BC_FILTER_MAGIC_NONE = 0,
154 BC_FILTER_MAGIC_IP,
155 BC_FILTER_MAGIC_MAC,
156 };
157
158 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
159 {
160 /* arp */
161 .discard = 0,
162 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
163 .attrs = {
164 {
165 /* frame type - arp, hw type - ethernet */
166 .offset_type =
167 BCAST_FILTER_OFFSET_PAYLOAD_START,
168 .offset = sizeof(rfc1042_header),
169 .val = cpu_to_be32(0x08060001),
170 .mask = cpu_to_be32(0xffffffff),
171 },
172 {
173 /* arp dest ip */
174 .offset_type =
175 BCAST_FILTER_OFFSET_PAYLOAD_START,
176 .offset = sizeof(rfc1042_header) + 2 +
177 sizeof(struct arphdr) +
178 ETH_ALEN + sizeof(__be32) +
179 ETH_ALEN,
180 .mask = cpu_to_be32(0xffffffff),
181 /* mark it as special field */
182 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
183 },
184 },
185 },
186 {
187 /* dhcp offer bcast */
188 .discard = 0,
189 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
190 .attrs = {
191 {
192 /* udp dest port - 68 (bootp client)*/
193 .offset_type = BCAST_FILTER_OFFSET_IP_END,
194 .offset = offsetof(struct udphdr, dest),
195 .val = cpu_to_be32(0x00440000),
196 .mask = cpu_to_be32(0xffff0000),
197 },
198 {
199 /* dhcp - lsb bytes of client hw address */
200 .offset_type = BCAST_FILTER_OFFSET_IP_END,
201 .offset = 38,
202 .mask = cpu_to_be32(0xffffffff),
203 /* mark it as special field */
204 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
205 },
206 },
207 },
208 /* last filter must be empty */
209 {},
210 };
211 #endif
212
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)213 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
214 {
215 if (!iwl_mvm_is_d0i3_supported(mvm))
216 return;
217
218 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
219 spin_lock_bh(&mvm->refs_lock);
220 mvm->refs[ref_type]++;
221 spin_unlock_bh(&mvm->refs_lock);
222 iwl_trans_ref(mvm->trans);
223 }
224
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)225 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
226 {
227 if (!iwl_mvm_is_d0i3_supported(mvm))
228 return;
229
230 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
231 spin_lock_bh(&mvm->refs_lock);
232 if (WARN_ON(!mvm->refs[ref_type])) {
233 spin_unlock_bh(&mvm->refs_lock);
234 return;
235 }
236 mvm->refs[ref_type]--;
237 spin_unlock_bh(&mvm->refs_lock);
238 iwl_trans_unref(mvm->trans);
239 }
240
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)241 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
242 enum iwl_mvm_ref_type except_ref)
243 {
244 int i, j;
245
246 if (!iwl_mvm_is_d0i3_supported(mvm))
247 return;
248
249 spin_lock_bh(&mvm->refs_lock);
250 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
251 if (except_ref == i || !mvm->refs[i])
252 continue;
253
254 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
255 i, mvm->refs[i]);
256 for (j = 0; j < mvm->refs[i]; j++)
257 iwl_trans_unref(mvm->trans);
258 mvm->refs[i] = 0;
259 }
260 spin_unlock_bh(&mvm->refs_lock);
261 }
262
iwl_mvm_ref_taken(struct iwl_mvm * mvm)263 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
264 {
265 int i;
266 bool taken = false;
267
268 if (!iwl_mvm_is_d0i3_supported(mvm))
269 return true;
270
271 spin_lock_bh(&mvm->refs_lock);
272 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
273 if (mvm->refs[i]) {
274 taken = true;
275 break;
276 }
277 }
278 spin_unlock_bh(&mvm->refs_lock);
279
280 return taken;
281 }
282
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)283 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
284 {
285 iwl_mvm_ref(mvm, ref_type);
286
287 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
288 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
289 HZ)) {
290 WARN_ON_ONCE(1);
291 iwl_mvm_unref(mvm, ref_type);
292 return -EIO;
293 }
294
295 return 0;
296 }
297
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)298 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
299 {
300 int i;
301
302 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
303 for (i = 0; i < NUM_PHY_CTX; i++) {
304 mvm->phy_ctxts[i].id = i;
305 mvm->phy_ctxts[i].ref = 0;
306 }
307 }
308
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)309 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
310 const char *alpha2,
311 enum iwl_mcc_source src_id,
312 bool *changed)
313 {
314 struct ieee80211_regdomain *regd = NULL;
315 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
316 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
317 struct iwl_mcc_update_resp *resp;
318
319 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
320
321 lockdep_assert_held(&mvm->mutex);
322
323 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
324 if (IS_ERR_OR_NULL(resp)) {
325 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
326 PTR_ERR_OR_ZERO(resp));
327 goto out;
328 }
329
330 if (changed)
331 *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
332
333 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
334 __le32_to_cpu(resp->n_channels),
335 resp->channels,
336 __le16_to_cpu(resp->mcc));
337 /* Store the return source id */
338 src_id = resp->source_id;
339 kfree(resp);
340 if (IS_ERR_OR_NULL(regd)) {
341 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
342 PTR_ERR_OR_ZERO(regd));
343 goto out;
344 }
345
346 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
347 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
348 mvm->lar_regdom_set = true;
349 mvm->mcc_src = src_id;
350
351 out:
352 return regd;
353 }
354
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)355 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
356 {
357 bool changed;
358 struct ieee80211_regdomain *regd;
359
360 if (!iwl_mvm_is_lar_supported(mvm))
361 return;
362
363 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
364 if (!IS_ERR_OR_NULL(regd)) {
365 /* only update the regulatory core if changed */
366 if (changed)
367 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
368
369 kfree(regd);
370 }
371 }
372
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)373 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
374 bool *changed)
375 {
376 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
377 iwl_mvm_is_wifi_mcc_supported(mvm) ?
378 MCC_SOURCE_GET_CURRENT :
379 MCC_SOURCE_OLD_FW, changed);
380 }
381
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)382 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
383 {
384 enum iwl_mcc_source used_src;
385 struct ieee80211_regdomain *regd;
386 int ret;
387 bool changed;
388 const struct ieee80211_regdomain *r =
389 rtnl_dereference(mvm->hw->wiphy->regd);
390
391 if (!r)
392 return -ENOENT;
393
394 /* save the last source in case we overwrite it below */
395 used_src = mvm->mcc_src;
396 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
397 /* Notify the firmware we support wifi location updates */
398 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
399 if (!IS_ERR_OR_NULL(regd))
400 kfree(regd);
401 }
402
403 /* Now set our last stored MCC and source */
404 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
405 &changed);
406 if (IS_ERR_OR_NULL(regd))
407 return -EIO;
408
409 /* update cfg80211 if the regdomain was changed */
410 if (changed)
411 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
412 else
413 ret = 0;
414
415 kfree(regd);
416 return ret;
417 }
418
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)419 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
420 {
421 struct ieee80211_hw *hw = mvm->hw;
422 int num_mac, ret, i;
423 static const u32 mvm_ciphers[] = {
424 WLAN_CIPHER_SUITE_WEP40,
425 WLAN_CIPHER_SUITE_WEP104,
426 WLAN_CIPHER_SUITE_TKIP,
427 WLAN_CIPHER_SUITE_CCMP,
428 };
429
430 /* Tell mac80211 our characteristics */
431 ieee80211_hw_set(hw, SIGNAL_DBM);
432 ieee80211_hw_set(hw, SPECTRUM_MGMT);
433 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
434 ieee80211_hw_set(hw, QUEUE_CONTROL);
435 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
436 ieee80211_hw_set(hw, SUPPORTS_PS);
437 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
438 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
439 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
440 ieee80211_hw_set(hw, CONNECTION_MONITOR);
441 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
442 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
443 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
444 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
445 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
446 if (iwl_mvm_has_new_rx_api(mvm))
447 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
448
449 if (mvm->trans->num_rx_queues > 1)
450 ieee80211_hw_set(hw, USES_RSS);
451
452 if (mvm->trans->max_skb_frags)
453 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
454
455 if (!iwl_mvm_is_dqa_supported(mvm))
456 hw->queues = mvm->first_agg_queue;
457 else
458 hw->queues = IEEE80211_MAX_QUEUES;
459 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
460 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
461 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
462 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
463 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
464 hw->rate_control_algorithm = "iwl-mvm-rs";
465 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
466 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
467
468 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
469 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
470 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
471 hw->wiphy->cipher_suites = mvm->ciphers;
472
473 if (iwl_mvm_has_new_rx_api(mvm)) {
474 mvm->ciphers[hw->wiphy->n_cipher_suites] =
475 WLAN_CIPHER_SUITE_GCMP;
476 hw->wiphy->n_cipher_suites++;
477 mvm->ciphers[hw->wiphy->n_cipher_suites] =
478 WLAN_CIPHER_SUITE_GCMP_256;
479 hw->wiphy->n_cipher_suites++;
480 }
481
482 /* Enable 11w if software crypto is not enabled (as the
483 * firmware will interpret some mgmt packets, so enabling it
484 * with software crypto isn't safe).
485 */
486 if (!iwlwifi_mod_params.sw_crypto) {
487 ieee80211_hw_set(hw, MFP_CAPABLE);
488 mvm->ciphers[hw->wiphy->n_cipher_suites] =
489 WLAN_CIPHER_SUITE_AES_CMAC;
490 hw->wiphy->n_cipher_suites++;
491 if (iwl_mvm_has_new_rx_api(mvm)) {
492 mvm->ciphers[hw->wiphy->n_cipher_suites] =
493 WLAN_CIPHER_SUITE_BIP_GMAC_128;
494 hw->wiphy->n_cipher_suites++;
495 mvm->ciphers[hw->wiphy->n_cipher_suites] =
496 WLAN_CIPHER_SUITE_BIP_GMAC_256;
497 hw->wiphy->n_cipher_suites++;
498 }
499 }
500
501 /* currently FW API supports only one optional cipher scheme */
502 if (mvm->fw->cs[0].cipher) {
503 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
504 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
505
506 mvm->hw->n_cipher_schemes = 1;
507
508 cs->cipher = le32_to_cpu(fwcs->cipher);
509 cs->iftype = BIT(NL80211_IFTYPE_STATION);
510 cs->hdr_len = fwcs->hdr_len;
511 cs->pn_len = fwcs->pn_len;
512 cs->pn_off = fwcs->pn_off;
513 cs->key_idx_off = fwcs->key_idx_off;
514 cs->key_idx_mask = fwcs->key_idx_mask;
515 cs->key_idx_shift = fwcs->key_idx_shift;
516 cs->mic_len = fwcs->mic_len;
517
518 mvm->hw->cipher_schemes = mvm->cs;
519 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
520 hw->wiphy->n_cipher_suites++;
521 }
522
523 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
524 hw->wiphy->features |=
525 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
526 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
527 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
528
529 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
530 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
531 hw->chanctx_data_size = sizeof(u16);
532
533 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
534 BIT(NL80211_IFTYPE_P2P_CLIENT) |
535 BIT(NL80211_IFTYPE_AP) |
536 BIT(NL80211_IFTYPE_P2P_GO) |
537 BIT(NL80211_IFTYPE_P2P_DEVICE) |
538 BIT(NL80211_IFTYPE_ADHOC);
539
540 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
541 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
542 if (iwl_mvm_is_lar_supported(mvm))
543 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
544 else
545 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
546 REGULATORY_DISABLE_BEACON_HINTS;
547
548 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
549 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
550
551 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
552 hw->wiphy->n_iface_combinations =
553 ARRAY_SIZE(iwl_mvm_iface_combinations);
554
555 hw->wiphy->max_remain_on_channel_duration = 10000;
556 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
557 /* we can compensate an offset of up to 3 channels = 15 MHz */
558 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
559
560 /* Extract MAC address */
561 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
562 hw->wiphy->addresses = mvm->addresses;
563 hw->wiphy->n_addresses = 1;
564
565 /* Extract additional MAC addresses if available */
566 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
567 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
568
569 for (i = 1; i < num_mac; i++) {
570 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
571 ETH_ALEN);
572 mvm->addresses[i].addr[5]++;
573 hw->wiphy->n_addresses++;
574 }
575
576 iwl_mvm_reset_phy_ctxts(mvm);
577
578 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
579
580 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
581
582 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
583 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
584 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
585
586 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
587 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
588 else
589 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
590
591 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
592 hw->wiphy->bands[NL80211_BAND_2GHZ] =
593 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
594 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
595 hw->wiphy->bands[NL80211_BAND_5GHZ] =
596 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
597
598 if (fw_has_capa(&mvm->fw->ucode_capa,
599 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
600 fw_has_api(&mvm->fw->ucode_capa,
601 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
602 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
603 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
604 }
605
606 hw->wiphy->hw_version = mvm->trans->hw_id;
607
608 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
609 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
610 else
611 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
612
613 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
614 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
615 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
616 /* we create the 802.11 header and zero length SSID IE. */
617 hw->wiphy->max_sched_scan_ie_len =
618 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
619 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
620 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
621
622 /*
623 * the firmware uses u8 for num of iterations, but 0xff is saved for
624 * infinite loop, so the maximum number of iterations is actually 254.
625 */
626 hw->wiphy->max_sched_scan_plan_iterations = 254;
627
628 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
629 NL80211_FEATURE_LOW_PRIORITY_SCAN |
630 NL80211_FEATURE_P2P_GO_OPPPS |
631 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
632 NL80211_FEATURE_DYNAMIC_SMPS |
633 NL80211_FEATURE_STATIC_SMPS |
634 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
635
636 if (fw_has_capa(&mvm->fw->ucode_capa,
637 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
638 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
639 if (fw_has_capa(&mvm->fw->ucode_capa,
640 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
641 hw->wiphy->features |= NL80211_FEATURE_QUIET;
642
643 if (fw_has_capa(&mvm->fw->ucode_capa,
644 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
645 hw->wiphy->features |=
646 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
647
648 if (fw_has_capa(&mvm->fw->ucode_capa,
649 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
650 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
651
652 if (fw_has_api(&mvm->fw->ucode_capa,
653 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
654 wiphy_ext_feature_set(hw->wiphy,
655 NL80211_EXT_FEATURE_SCAN_START_TIME);
656 wiphy_ext_feature_set(hw->wiphy,
657 NL80211_EXT_FEATURE_BSS_PARENT_TSF);
658 wiphy_ext_feature_set(hw->wiphy,
659 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
660 }
661
662 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
663
664 #ifdef CONFIG_PM_SLEEP
665 if (iwl_mvm_is_d0i3_supported(mvm) &&
666 device_can_wakeup(mvm->trans->dev)) {
667 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
668 hw->wiphy->wowlan = &mvm->wowlan;
669 }
670
671 if (mvm->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
672 mvm->trans->ops->d3_suspend &&
673 mvm->trans->ops->d3_resume &&
674 device_can_wakeup(mvm->trans->dev)) {
675 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
676 WIPHY_WOWLAN_DISCONNECT |
677 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
678 WIPHY_WOWLAN_RFKILL_RELEASE |
679 WIPHY_WOWLAN_NET_DETECT;
680 if (!iwlwifi_mod_params.sw_crypto)
681 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
682 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
683 WIPHY_WOWLAN_4WAY_HANDSHAKE;
684
685 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
686 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
687 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
688 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
689 mvm->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
690 hw->wiphy->wowlan = &mvm->wowlan;
691 }
692 #endif
693
694 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
695 /* assign default bcast filtering configuration */
696 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
697 #endif
698
699 ret = iwl_mvm_leds_init(mvm);
700 if (ret)
701 return ret;
702
703 if (fw_has_capa(&mvm->fw->ucode_capa,
704 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
705 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
706 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
707 ieee80211_hw_set(hw, TDLS_WIDER_BW);
708 }
709
710 if (fw_has_capa(&mvm->fw->ucode_capa,
711 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
712 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
713 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
714 }
715
716 hw->netdev_features |= mvm->cfg->features;
717 if (!iwl_mvm_is_csum_supported(mvm)) {
718 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
719 NETIF_F_RXCSUM);
720 /* We may support SW TX CSUM */
721 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
722 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
723 }
724
725 ret = ieee80211_register_hw(mvm->hw);
726 if (ret)
727 iwl_mvm_leds_exit(mvm);
728
729 if (mvm->cfg->vht_mu_mimo_supported)
730 wiphy_ext_feature_set(hw->wiphy,
731 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
732
733 return ret;
734 }
735
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)736 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
737 struct ieee80211_sta *sta,
738 struct sk_buff *skb)
739 {
740 struct iwl_mvm_sta *mvmsta;
741 bool defer = false;
742
743 /*
744 * double check the IN_D0I3 flag both before and after
745 * taking the spinlock, in order to prevent taking
746 * the spinlock when not needed.
747 */
748 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
749 return false;
750
751 spin_lock(&mvm->d0i3_tx_lock);
752 /*
753 * testing the flag again ensures the skb dequeue
754 * loop (on d0i3 exit) hasn't run yet.
755 */
756 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
757 goto out;
758
759 mvmsta = iwl_mvm_sta_from_mac80211(sta);
760 if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
761 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
762 goto out;
763
764 __skb_queue_tail(&mvm->d0i3_tx, skb);
765 ieee80211_stop_queues(mvm->hw);
766
767 /* trigger wakeup */
768 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
769 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
770
771 defer = true;
772 out:
773 spin_unlock(&mvm->d0i3_tx_lock);
774 return defer;
775 }
776
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)777 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
778 struct ieee80211_tx_control *control,
779 struct sk_buff *skb)
780 {
781 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
782 struct ieee80211_sta *sta = control->sta;
783 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
784 struct ieee80211_hdr *hdr = (void *)skb->data;
785
786 if (iwl_mvm_is_radio_killed(mvm)) {
787 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
788 goto drop;
789 }
790
791 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
792 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
793 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
794 goto drop;
795
796 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
797 if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
798 ieee80211_is_mgmt(hdr->frame_control) &&
799 !ieee80211_is_deauth(hdr->frame_control) &&
800 !ieee80211_is_disassoc(hdr->frame_control) &&
801 !ieee80211_is_action(hdr->frame_control)))
802 sta = NULL;
803
804 if (sta) {
805 if (iwl_mvm_defer_tx(mvm, sta, skb))
806 return;
807 if (iwl_mvm_tx_skb(mvm, skb, sta))
808 goto drop;
809 return;
810 }
811
812 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
813 goto drop;
814 return;
815 drop:
816 ieee80211_free_txskb(hw, skb);
817 }
818
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)819 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
820 {
821 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
822 return false;
823 return true;
824 }
825
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)826 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
827 {
828 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
829 return false;
830 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
831 return true;
832
833 /* enabled by default */
834 return true;
835 }
836
837 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
838 do { \
839 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
840 break; \
841 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt); \
842 } while (0)
843
844 static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 rx_ba_ssn,enum ieee80211_ampdu_mlme_action action)845 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
846 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
847 enum ieee80211_ampdu_mlme_action action)
848 {
849 struct iwl_fw_dbg_trigger_tlv *trig;
850 struct iwl_fw_dbg_trigger_ba *ba_trig;
851
852 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
853 return;
854
855 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
856 ba_trig = (void *)trig->data;
857
858 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
859 return;
860
861 switch (action) {
862 case IEEE80211_AMPDU_TX_OPERATIONAL: {
863 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
864 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
865
866 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
867 "TX AGG START: MAC %pM tid %d ssn %d\n",
868 sta->addr, tid, tid_data->ssn);
869 break;
870 }
871 case IEEE80211_AMPDU_TX_STOP_CONT:
872 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
873 "TX AGG STOP: MAC %pM tid %d\n",
874 sta->addr, tid);
875 break;
876 case IEEE80211_AMPDU_RX_START:
877 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
878 "RX AGG START: MAC %pM tid %d ssn %d\n",
879 sta->addr, tid, rx_ba_ssn);
880 break;
881 case IEEE80211_AMPDU_RX_STOP:
882 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
883 "RX AGG STOP: MAC %pM tid %d\n",
884 sta->addr, tid);
885 break;
886 default:
887 break;
888 }
889 }
890
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)891 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
892 struct ieee80211_vif *vif,
893 struct ieee80211_ampdu_params *params)
894 {
895 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
896 int ret;
897 bool tx_agg_ref = false;
898 struct ieee80211_sta *sta = params->sta;
899 enum ieee80211_ampdu_mlme_action action = params->action;
900 u16 tid = params->tid;
901 u16 *ssn = ¶ms->ssn;
902 u8 buf_size = params->buf_size;
903 bool amsdu = params->amsdu;
904 u16 timeout = params->timeout;
905
906 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
907 sta->addr, tid, action);
908
909 if (!(mvm->nvm_data->sku_cap_11n_enable))
910 return -EACCES;
911
912 /* return from D0i3 before starting a new Tx aggregation */
913 switch (action) {
914 case IEEE80211_AMPDU_TX_START:
915 case IEEE80211_AMPDU_TX_STOP_CONT:
916 case IEEE80211_AMPDU_TX_STOP_FLUSH:
917 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
918 case IEEE80211_AMPDU_TX_OPERATIONAL:
919 /*
920 * for tx start, wait synchronously until D0i3 exit to
921 * get the correct sequence number for the tid.
922 * additionally, some other ampdu actions use direct
923 * target access, which is not handled automatically
924 * by the trans layer (unlike commands), so wait for
925 * d0i3 exit in these cases as well.
926 */
927 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
928 if (ret)
929 return ret;
930
931 tx_agg_ref = true;
932 break;
933 default:
934 break;
935 }
936
937 mutex_lock(&mvm->mutex);
938
939 switch (action) {
940 case IEEE80211_AMPDU_RX_START:
941 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
942 ret = -EINVAL;
943 break;
944 }
945 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
946 timeout);
947 break;
948 case IEEE80211_AMPDU_RX_STOP:
949 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
950 timeout);
951 break;
952 case IEEE80211_AMPDU_TX_START:
953 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
954 ret = -EINVAL;
955 break;
956 }
957 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
958 break;
959 case IEEE80211_AMPDU_TX_STOP_CONT:
960 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
961 break;
962 case IEEE80211_AMPDU_TX_STOP_FLUSH:
963 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
964 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
965 break;
966 case IEEE80211_AMPDU_TX_OPERATIONAL:
967 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
968 buf_size, amsdu);
969 break;
970 default:
971 WARN_ON_ONCE(1);
972 ret = -EINVAL;
973 break;
974 }
975
976 if (!ret) {
977 u16 rx_ba_ssn = 0;
978
979 if (action == IEEE80211_AMPDU_RX_START)
980 rx_ba_ssn = *ssn;
981
982 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
983 rx_ba_ssn, action);
984 }
985 mutex_unlock(&mvm->mutex);
986
987 /*
988 * If the tid is marked as started, we won't use it for offloaded
989 * traffic on the next D0i3 entry. It's safe to unref.
990 */
991 if (tx_agg_ref)
992 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
993
994 return ret;
995 }
996
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)997 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
998 struct ieee80211_vif *vif)
999 {
1000 struct iwl_mvm *mvm = data;
1001 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1002
1003 mvmvif->uploaded = false;
1004 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1005
1006 spin_lock_bh(&mvm->time_event_lock);
1007 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1008 spin_unlock_bh(&mvm->time_event_lock);
1009
1010 mvmvif->phy_ctxt = NULL;
1011 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1012 }
1013
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1014 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1015 {
1016 /* clear the D3 reconfig, we only need it to avoid dumping a
1017 * firmware coredump on reconfiguration, we shouldn't do that
1018 * on D3->D0 transition
1019 */
1020 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1021 mvm->fw_dump_desc = &iwl_mvm_dump_desc_assert;
1022 iwl_mvm_fw_error_dump(mvm);
1023 }
1024
1025 /* cleanup all stale references (scan, roc), but keep the
1026 * ucode_down ref until reconfig is complete
1027 */
1028 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1029
1030 iwl_mvm_stop_device(mvm);
1031
1032 mvm->scan_status = 0;
1033 mvm->ps_disabled = false;
1034 mvm->calibrating = false;
1035
1036 /* just in case one was running */
1037 iwl_mvm_cleanup_roc_te(mvm);
1038 ieee80211_remain_on_channel_expired(mvm->hw);
1039
1040 /*
1041 * cleanup all interfaces, even inactive ones, as some might have
1042 * gone down during the HW restart
1043 */
1044 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1045
1046 mvm->p2p_device_vif = NULL;
1047 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1048
1049 iwl_mvm_reset_phy_ctxts(mvm);
1050 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1051 memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
1052 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1053 memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
1054 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1055 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1056
1057 ieee80211_wake_queues(mvm->hw);
1058
1059 /* clear any stale d0i3 state */
1060 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1061
1062 mvm->vif_count = 0;
1063 mvm->rx_ba_sessions = 0;
1064 mvm->fw_dbg_conf = FW_DBG_INVALID;
1065
1066 /* keep statistics ticking */
1067 iwl_mvm_accu_radio_stats(mvm);
1068 }
1069
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1070 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1071 {
1072 int ret;
1073
1074 lockdep_assert_held(&mvm->mutex);
1075
1076 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1077 /* Clean up some internal and mac80211 state on restart */
1078 iwl_mvm_restart_cleanup(mvm);
1079 } else {
1080 /* Hold the reference to prevent runtime suspend while
1081 * the start procedure runs. It's a bit confusing
1082 * that the UCODE_DOWN reference is taken, but it just
1083 * means "UCODE is not UP yet". ( TODO: rename this
1084 * reference).
1085 */
1086 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1087 }
1088 ret = iwl_mvm_up(mvm);
1089
1090 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1091 /* Something went wrong - we need to finish some cleanup
1092 * that normally iwl_mvm_mac_restart_complete() below
1093 * would do.
1094 */
1095 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1096 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1097 }
1098
1099 return ret;
1100 }
1101
iwl_mvm_mac_start(struct ieee80211_hw * hw)1102 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1103 {
1104 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1105 int ret;
1106
1107 /* Some hw restart cleanups must not hold the mutex */
1108 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1109 /*
1110 * Make sure we are out of d0i3. This is needed
1111 * to make sure the reference accounting is correct
1112 * (and there is no stale d0i3_exit_work).
1113 */
1114 wait_event_timeout(mvm->d0i3_exit_waitq,
1115 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1116 &mvm->status),
1117 HZ);
1118 }
1119
1120 mutex_lock(&mvm->mutex);
1121 ret = __iwl_mvm_mac_start(mvm);
1122 mutex_unlock(&mvm->mutex);
1123
1124 return ret;
1125 }
1126
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1127 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1128 {
1129 int ret;
1130
1131 mutex_lock(&mvm->mutex);
1132
1133 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1134 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1135 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1136 if (ret)
1137 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1138 ret);
1139
1140 /* allow transport/FW low power modes */
1141 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1142
1143 /*
1144 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1145 * of packets the FW sent out, so we must reconnect.
1146 */
1147 iwl_mvm_teardown_tdls_peers(mvm);
1148
1149 mutex_unlock(&mvm->mutex);
1150 }
1151
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1152 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1153 {
1154 if (iwl_mvm_is_d0i3_supported(mvm) &&
1155 iwl_mvm_enter_d0i3_on_suspend(mvm))
1156 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1157 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1158 &mvm->status),
1159 HZ),
1160 "D0i3 exit on resume timed out\n");
1161 }
1162
1163 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1164 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1165 enum ieee80211_reconfig_type reconfig_type)
1166 {
1167 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1168
1169 switch (reconfig_type) {
1170 case IEEE80211_RECONFIG_TYPE_RESTART:
1171 iwl_mvm_restart_complete(mvm);
1172 break;
1173 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1174 iwl_mvm_resume_complete(mvm);
1175 break;
1176 }
1177 }
1178
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1179 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1180 {
1181 lockdep_assert_held(&mvm->mutex);
1182
1183 /* firmware counters are obviously reset now, but we shouldn't
1184 * partially track so also clear the fw_reset_accu counters.
1185 */
1186 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1187
1188 /* async_handlers_wk is now blocked */
1189
1190 /*
1191 * The work item could be running or queued if the
1192 * ROC time event stops just as we get here.
1193 */
1194 flush_work(&mvm->roc_done_wk);
1195
1196 iwl_mvm_stop_device(mvm);
1197
1198 iwl_mvm_async_handlers_purge(mvm);
1199 /* async_handlers_list is empty and will stay empty: HW is stopped */
1200
1201 /* the fw is stopped, the aux sta is dead: clean up driver state */
1202 iwl_mvm_del_aux_sta(mvm);
1203
1204 iwl_free_fw_paging(mvm);
1205
1206 /*
1207 * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
1208 * won't be called in this case).
1209 * But make sure to cleanup interfaces that have gone down before/during
1210 * HW restart was requested.
1211 */
1212 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1213 ieee80211_iterate_interfaces(mvm->hw, 0,
1214 iwl_mvm_cleanup_iterator, mvm);
1215
1216 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1217 * make sure there's nothing left there and warn if any is found.
1218 */
1219 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1220 int i;
1221
1222 for (i = 0; i < mvm->max_scans; i++) {
1223 if (WARN_ONCE(mvm->scan_uid_status[i],
1224 "UMAC scan UID %d status was not cleaned\n",
1225 i))
1226 mvm->scan_uid_status[i] = 0;
1227 }
1228 }
1229 }
1230
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1231 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1232 {
1233 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1234
1235 flush_work(&mvm->d0i3_exit_work);
1236 flush_work(&mvm->async_handlers_wk);
1237 flush_work(&mvm->add_stream_wk);
1238 cancel_delayed_work_sync(&mvm->fw_dump_wk);
1239 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1240 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1241 iwl_mvm_free_fw_dump_desc(mvm);
1242
1243 mutex_lock(&mvm->mutex);
1244 __iwl_mvm_mac_stop(mvm);
1245 mutex_unlock(&mvm->mutex);
1246
1247 /*
1248 * The worker might have been waiting for the mutex, let it run and
1249 * discover that its list is now empty.
1250 */
1251 cancel_work_sync(&mvm->async_handlers_wk);
1252 }
1253
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1254 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1255 {
1256 u16 i;
1257
1258 lockdep_assert_held(&mvm->mutex);
1259
1260 for (i = 0; i < NUM_PHY_CTX; i++)
1261 if (!mvm->phy_ctxts[i].ref)
1262 return &mvm->phy_ctxts[i];
1263
1264 IWL_ERR(mvm, "No available PHY context\n");
1265 return NULL;
1266 }
1267
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1268 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1269 s16 tx_power)
1270 {
1271 struct iwl_dev_tx_power_cmd cmd = {
1272 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1273 .v3.mac_context_id =
1274 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1275 .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1276 };
1277 int len = sizeof(cmd);
1278
1279 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1280 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1281
1282 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1283 len = sizeof(cmd.v3);
1284
1285 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1286 }
1287
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1288 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1289 struct ieee80211_vif *vif)
1290 {
1291 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1292 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1293 int ret;
1294
1295 mvmvif->mvm = mvm;
1296
1297 /*
1298 * make sure D0i3 exit is completed, otherwise a target access
1299 * during tx queue configuration could be done when still in
1300 * D0i3 state.
1301 */
1302 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1303 if (ret)
1304 return ret;
1305
1306 /*
1307 * Not much to do here. The stack will not allow interface
1308 * types or combinations that we didn't advertise, so we
1309 * don't really have to check the types.
1310 */
1311
1312 mutex_lock(&mvm->mutex);
1313
1314 /* make sure that beacon statistics don't go backwards with FW reset */
1315 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1316 mvmvif->beacon_stats.accu_num_beacons +=
1317 mvmvif->beacon_stats.num_beacons;
1318
1319 /* Allocate resources for the MAC context, and add it to the fw */
1320 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1321 if (ret)
1322 goto out_unlock;
1323
1324 /* Counting number of interfaces is needed for legacy PM */
1325 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1326 mvm->vif_count++;
1327
1328 /*
1329 * The AP binding flow can be done only after the beacon
1330 * template is configured (which happens only in the mac80211
1331 * start_ap() flow), and adding the broadcast station can happen
1332 * only after the binding.
1333 * In addition, since modifying the MAC before adding a bcast
1334 * station is not allowed by the FW, delay the adding of MAC context to
1335 * the point where we can also add the bcast station.
1336 * In short: there's not much we can do at this point, other than
1337 * allocating resources :)
1338 */
1339 if (vif->type == NL80211_IFTYPE_AP ||
1340 vif->type == NL80211_IFTYPE_ADHOC) {
1341 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1342 if (ret) {
1343 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1344 goto out_release;
1345 }
1346
1347 iwl_mvm_vif_dbgfs_register(mvm, vif);
1348 goto out_unlock;
1349 }
1350
1351 mvmvif->features |= hw->netdev_features;
1352
1353 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1354 if (ret)
1355 goto out_release;
1356
1357 ret = iwl_mvm_power_update_mac(mvm);
1358 if (ret)
1359 goto out_remove_mac;
1360
1361 /* beacon filtering */
1362 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1363 if (ret)
1364 goto out_remove_mac;
1365
1366 if (!mvm->bf_allowed_vif &&
1367 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1368 mvm->bf_allowed_vif = mvmvif;
1369 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1370 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1371 }
1372
1373 /*
1374 * P2P_DEVICE interface does not have a channel context assigned to it,
1375 * so a dedicated PHY context is allocated to it and the corresponding
1376 * MAC context is bound to it at this stage.
1377 */
1378 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1379
1380 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1381 if (!mvmvif->phy_ctxt) {
1382 ret = -ENOSPC;
1383 goto out_free_bf;
1384 }
1385
1386 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1387 ret = iwl_mvm_binding_add_vif(mvm, vif);
1388 if (ret)
1389 goto out_unref_phy;
1390
1391 ret = iwl_mvm_add_bcast_sta(mvm, vif);
1392 if (ret)
1393 goto out_unbind;
1394
1395 /* Save a pointer to p2p device vif, so it can later be used to
1396 * update the p2p device MAC when a GO is started/stopped */
1397 mvm->p2p_device_vif = vif;
1398 }
1399
1400 iwl_mvm_vif_dbgfs_register(mvm, vif);
1401 goto out_unlock;
1402
1403 out_unbind:
1404 iwl_mvm_binding_remove_vif(mvm, vif);
1405 out_unref_phy:
1406 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1407 out_free_bf:
1408 if (mvm->bf_allowed_vif == mvmvif) {
1409 mvm->bf_allowed_vif = NULL;
1410 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1411 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1412 }
1413 out_remove_mac:
1414 mvmvif->phy_ctxt = NULL;
1415 iwl_mvm_mac_ctxt_remove(mvm, vif);
1416 out_release:
1417 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1418 mvm->vif_count--;
1419
1420 iwl_mvm_mac_ctxt_release(mvm, vif);
1421 out_unlock:
1422 mutex_unlock(&mvm->mutex);
1423
1424 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1425
1426 return ret;
1427 }
1428
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1429 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1430 struct ieee80211_vif *vif)
1431 {
1432 u32 tfd_msk = iwl_mvm_mac_get_queues_mask(vif);
1433
1434 if (tfd_msk) {
1435 /*
1436 * mac80211 first removes all the stations of the vif and
1437 * then removes the vif. When it removes a station it also
1438 * flushes the AMPDU session. So by now, all the AMPDU sessions
1439 * of all the stations of this vif are closed, and the queues
1440 * of these AMPDU sessions are properly closed.
1441 * We still need to take care of the shared queues of the vif.
1442 * Flush them here.
1443 */
1444 mutex_lock(&mvm->mutex);
1445 iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
1446 mutex_unlock(&mvm->mutex);
1447
1448 /*
1449 * There are transports that buffer a few frames in the host.
1450 * For these, the flush above isn't enough since while we were
1451 * flushing, the transport might have sent more frames to the
1452 * device. To solve this, wait here until the transport is
1453 * empty. Technically, this could have replaced the flush
1454 * above, but flush is much faster than draining. So flush
1455 * first, and drain to make sure we have no frames in the
1456 * transport anymore.
1457 * If a station still had frames on the shared queues, it is
1458 * already marked as draining, so to complete the draining, we
1459 * just need to wait until the transport is empty.
1460 */
1461 iwl_trans_wait_tx_queue_empty(mvm->trans, tfd_msk);
1462 }
1463
1464 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1465 /*
1466 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1467 * We assume here that all the packets sent to the OFFCHANNEL
1468 * queue are sent in ROC session.
1469 */
1470 flush_work(&mvm->roc_done_wk);
1471 } else {
1472 /*
1473 * By now, all the AC queues are empty. The AGG queues are
1474 * empty too. We already got all the Tx responses for all the
1475 * packets in the queues. The drain work can have been
1476 * triggered. Flush it.
1477 */
1478 flush_work(&mvm->sta_drained_wk);
1479 }
1480 }
1481
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1482 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1483 struct ieee80211_vif *vif)
1484 {
1485 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1486 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1487
1488 iwl_mvm_prepare_mac_removal(mvm, vif);
1489
1490 mutex_lock(&mvm->mutex);
1491
1492 if (mvm->bf_allowed_vif == mvmvif) {
1493 mvm->bf_allowed_vif = NULL;
1494 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1495 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1496 }
1497
1498 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1499
1500 /*
1501 * For AP/GO interface, the tear down of the resources allocated to the
1502 * interface is be handled as part of the stop_ap flow.
1503 */
1504 if (vif->type == NL80211_IFTYPE_AP ||
1505 vif->type == NL80211_IFTYPE_ADHOC) {
1506 #ifdef CONFIG_NL80211_TESTMODE
1507 if (vif == mvm->noa_vif) {
1508 mvm->noa_vif = NULL;
1509 mvm->noa_duration = 0;
1510 }
1511 #endif
1512 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1513 goto out_release;
1514 }
1515
1516 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1517 mvm->p2p_device_vif = NULL;
1518 iwl_mvm_rm_bcast_sta(mvm, vif);
1519 iwl_mvm_binding_remove_vif(mvm, vif);
1520 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1521 mvmvif->phy_ctxt = NULL;
1522 }
1523
1524 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1525 mvm->vif_count--;
1526
1527 iwl_mvm_power_update_mac(mvm);
1528 iwl_mvm_mac_ctxt_remove(mvm, vif);
1529
1530 out_release:
1531 iwl_mvm_mac_ctxt_release(mvm, vif);
1532 mutex_unlock(&mvm->mutex);
1533 }
1534
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1535 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1536 {
1537 return 0;
1538 }
1539
1540 struct iwl_mvm_mc_iter_data {
1541 struct iwl_mvm *mvm;
1542 int port_id;
1543 };
1544
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1545 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1546 struct ieee80211_vif *vif)
1547 {
1548 struct iwl_mvm_mc_iter_data *data = _data;
1549 struct iwl_mvm *mvm = data->mvm;
1550 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1551 struct iwl_host_cmd hcmd = {
1552 .id = MCAST_FILTER_CMD,
1553 .flags = CMD_ASYNC,
1554 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1555 };
1556 int ret, len;
1557
1558 /* if we don't have free ports, mcast frames will be dropped */
1559 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1560 return;
1561
1562 if (vif->type != NL80211_IFTYPE_STATION ||
1563 !vif->bss_conf.assoc)
1564 return;
1565
1566 cmd->port_id = data->port_id++;
1567 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1568 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1569
1570 hcmd.len[0] = len;
1571 hcmd.data[0] = cmd;
1572
1573 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1574 if (ret)
1575 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1576 }
1577
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1578 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1579 {
1580 struct iwl_mvm_mc_iter_data iter_data = {
1581 .mvm = mvm,
1582 };
1583
1584 lockdep_assert_held(&mvm->mutex);
1585
1586 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1587 return;
1588
1589 ieee80211_iterate_active_interfaces_atomic(
1590 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1591 iwl_mvm_mc_iface_iterator, &iter_data);
1592 }
1593
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1594 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1595 struct netdev_hw_addr_list *mc_list)
1596 {
1597 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1598 struct iwl_mcast_filter_cmd *cmd;
1599 struct netdev_hw_addr *addr;
1600 int addr_count;
1601 bool pass_all;
1602 int len;
1603
1604 addr_count = netdev_hw_addr_list_count(mc_list);
1605 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1606 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1607 if (pass_all)
1608 addr_count = 0;
1609
1610 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1611 cmd = kzalloc(len, GFP_ATOMIC);
1612 if (!cmd)
1613 return 0;
1614
1615 if (pass_all) {
1616 cmd->pass_all = 1;
1617 return (u64)(unsigned long)cmd;
1618 }
1619
1620 netdev_hw_addr_list_for_each(addr, mc_list) {
1621 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1622 cmd->count, addr->addr);
1623 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1624 addr->addr, ETH_ALEN);
1625 cmd->count++;
1626 }
1627
1628 return (u64)(unsigned long)cmd;
1629 }
1630
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1631 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1632 unsigned int changed_flags,
1633 unsigned int *total_flags,
1634 u64 multicast)
1635 {
1636 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1637 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1638
1639 mutex_lock(&mvm->mutex);
1640
1641 /* replace previous configuration */
1642 kfree(mvm->mcast_filter_cmd);
1643 mvm->mcast_filter_cmd = cmd;
1644
1645 if (!cmd)
1646 goto out;
1647
1648 iwl_mvm_recalc_multicast(mvm);
1649 out:
1650 mutex_unlock(&mvm->mutex);
1651 *total_flags = 0;
1652 }
1653
iwl_mvm_config_iface_filter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int filter_flags,unsigned int changed_flags)1654 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1655 struct ieee80211_vif *vif,
1656 unsigned int filter_flags,
1657 unsigned int changed_flags)
1658 {
1659 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1660
1661 /* We support only filter for probe requests */
1662 if (!(changed_flags & FIF_PROBE_REQ))
1663 return;
1664
1665 /* Supported only for p2p client interfaces */
1666 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1667 !vif->p2p)
1668 return;
1669
1670 mutex_lock(&mvm->mutex);
1671 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1672 mutex_unlock(&mvm->mutex);
1673 }
1674
1675 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1676 struct iwl_bcast_iter_data {
1677 struct iwl_mvm *mvm;
1678 struct iwl_bcast_filter_cmd *cmd;
1679 u8 current_filter;
1680 };
1681
1682 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)1683 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1684 const struct iwl_fw_bcast_filter *in_filter,
1685 struct iwl_fw_bcast_filter *out_filter)
1686 {
1687 struct iwl_fw_bcast_filter_attr *attr;
1688 int i;
1689
1690 memcpy(out_filter, in_filter, sizeof(*out_filter));
1691
1692 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1693 attr = &out_filter->attrs[i];
1694
1695 if (!attr->mask)
1696 break;
1697
1698 switch (attr->reserved1) {
1699 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1700 if (vif->bss_conf.arp_addr_cnt != 1) {
1701 attr->mask = 0;
1702 continue;
1703 }
1704
1705 attr->val = vif->bss_conf.arp_addr_list[0];
1706 break;
1707 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1708 attr->val = *(__be32 *)&vif->addr[2];
1709 break;
1710 default:
1711 break;
1712 }
1713 attr->reserved1 = 0;
1714 out_filter->num_attrs++;
1715 }
1716 }
1717
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1718 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1719 struct ieee80211_vif *vif)
1720 {
1721 struct iwl_bcast_iter_data *data = _data;
1722 struct iwl_mvm *mvm = data->mvm;
1723 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1724 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1725 struct iwl_fw_bcast_mac *bcast_mac;
1726 int i;
1727
1728 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1729 return;
1730
1731 bcast_mac = &cmd->macs[mvmvif->id];
1732
1733 /*
1734 * enable filtering only for associated stations, but not for P2P
1735 * Clients
1736 */
1737 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1738 !vif->bss_conf.assoc)
1739 return;
1740
1741 bcast_mac->default_discard = 1;
1742
1743 /* copy all configured filters */
1744 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1745 /*
1746 * Make sure we don't exceed our filters limit.
1747 * if there is still a valid filter to be configured,
1748 * be on the safe side and just allow bcast for this mac.
1749 */
1750 if (WARN_ON_ONCE(data->current_filter >=
1751 ARRAY_SIZE(cmd->filters))) {
1752 bcast_mac->default_discard = 0;
1753 bcast_mac->attached_filters = 0;
1754 break;
1755 }
1756
1757 iwl_mvm_set_bcast_filter(vif,
1758 &mvm->bcast_filters[i],
1759 &cmd->filters[data->current_filter]);
1760
1761 /* skip current filter if it contains no attributes */
1762 if (!cmd->filters[data->current_filter].num_attrs)
1763 continue;
1764
1765 /* attach the filter to current mac */
1766 bcast_mac->attached_filters |=
1767 cpu_to_le16(BIT(data->current_filter));
1768
1769 data->current_filter++;
1770 }
1771 }
1772
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)1773 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1774 struct iwl_bcast_filter_cmd *cmd)
1775 {
1776 struct iwl_bcast_iter_data iter_data = {
1777 .mvm = mvm,
1778 .cmd = cmd,
1779 };
1780
1781 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1782 return false;
1783
1784 memset(cmd, 0, sizeof(*cmd));
1785 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1786 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1787
1788 #ifdef CONFIG_IWLWIFI_DEBUGFS
1789 /* use debugfs filters/macs if override is configured */
1790 if (mvm->dbgfs_bcast_filtering.override) {
1791 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1792 sizeof(cmd->filters));
1793 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1794 sizeof(cmd->macs));
1795 return true;
1796 }
1797 #endif
1798
1799 /* if no filters are configured, do nothing */
1800 if (!mvm->bcast_filters)
1801 return false;
1802
1803 /* configure and attach these filters for each associated sta vif */
1804 ieee80211_iterate_active_interfaces(
1805 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1806 iwl_mvm_bcast_filter_iterator, &iter_data);
1807
1808 return true;
1809 }
1810
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1811 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1812 {
1813 struct iwl_bcast_filter_cmd cmd;
1814
1815 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1816 return 0;
1817
1818 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1819 return 0;
1820
1821 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1822 sizeof(cmd), &cmd);
1823 }
1824 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1825 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1826 {
1827 return 0;
1828 }
1829 #endif
1830
iwl_mvm_update_mu_groups(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1831 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1832 struct ieee80211_vif *vif)
1833 {
1834 struct iwl_mu_group_mgmt_cmd cmd = {};
1835
1836 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1837 WLAN_MEMBERSHIP_LEN);
1838 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1839 WLAN_USER_POSITION_LEN);
1840
1841 return iwl_mvm_send_cmd_pdu(mvm,
1842 WIDE_ID(DATA_PATH_GROUP,
1843 UPDATE_MU_GROUPS_CMD),
1844 0, sizeof(cmd), &cmd);
1845 }
1846
iwl_mvm_mu_mimo_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1847 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1848 struct ieee80211_vif *vif)
1849 {
1850 if (vif->mu_mimo_owner) {
1851 struct iwl_mu_group_mgmt_notif *notif = _data;
1852
1853 /*
1854 * MU-MIMO Group Id action frame is little endian. We treat
1855 * the data received from firmware as if it came from the
1856 * action frame, so no conversion is needed.
1857 */
1858 ieee80211_update_mu_groups(vif,
1859 (u8 *)¬if->membership_status,
1860 (u8 *)¬if->user_position);
1861 }
1862 }
1863
iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1864 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1865 struct iwl_rx_cmd_buffer *rxb)
1866 {
1867 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1868 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1869
1870 ieee80211_iterate_active_interfaces_atomic(
1871 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1872 iwl_mvm_mu_mimo_iface_iterator, notif);
1873 }
1874
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)1875 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1876 struct ieee80211_vif *vif,
1877 struct ieee80211_bss_conf *bss_conf,
1878 u32 changes)
1879 {
1880 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1881 int ret;
1882
1883 /*
1884 * Re-calculate the tsf id, as the master-slave relations depend on the
1885 * beacon interval, which was not known when the station interface was
1886 * added.
1887 */
1888 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
1889 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
1890
1891 if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
1892 mvmvif->lqm_active)
1893 iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
1894 0, 0);
1895
1896 /*
1897 * If we're not associated yet, take the (new) BSSID before associating
1898 * so the firmware knows. If we're already associated, then use the old
1899 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
1900 * branch for disassociation below.
1901 */
1902 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
1903 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1904
1905 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
1906 if (ret)
1907 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
1908
1909 /* after sending it once, adopt mac80211 data */
1910 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
1911 mvmvif->associated = bss_conf->assoc;
1912
1913 if (changes & BSS_CHANGED_ASSOC) {
1914 if (bss_conf->assoc) {
1915 /* clear statistics to get clean beacon counter */
1916 iwl_mvm_request_statistics(mvm, true);
1917 memset(&mvmvif->beacon_stats, 0,
1918 sizeof(mvmvif->beacon_stats));
1919
1920 /* add quota for this interface */
1921 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1922 if (ret) {
1923 IWL_ERR(mvm, "failed to update quotas\n");
1924 return;
1925 }
1926
1927 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
1928 &mvm->status)) {
1929 /*
1930 * If we're restarting then the firmware will
1931 * obviously have lost synchronisation with
1932 * the AP. It will attempt to synchronise by
1933 * itself, but we can make it more reliable by
1934 * scheduling a session protection time event.
1935 *
1936 * The firmware needs to receive a beacon to
1937 * catch up with synchronisation, use 110% of
1938 * the beacon interval.
1939 *
1940 * Set a large maximum delay to allow for more
1941 * than a single interface.
1942 */
1943 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
1944 iwl_mvm_protect_session(mvm, vif, dur, dur,
1945 5 * dur, false);
1946 }
1947
1948 iwl_mvm_sf_update(mvm, vif, false);
1949 iwl_mvm_power_vif_assoc(mvm, vif);
1950 if (vif->p2p) {
1951 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
1952 iwl_mvm_update_smps(mvm, vif,
1953 IWL_MVM_SMPS_REQ_PROT,
1954 IEEE80211_SMPS_DYNAMIC);
1955 }
1956 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
1957 /*
1958 * If update fails - SF might be running in associated
1959 * mode while disassociated - which is forbidden.
1960 */
1961 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
1962 "Failed to update SF upon disassociation\n");
1963
1964 /* remove AP station now that the MAC is unassoc */
1965 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
1966 if (ret)
1967 IWL_ERR(mvm, "failed to remove AP station\n");
1968
1969 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
1970 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
1971 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
1972 /* remove quota for this interface */
1973 ret = iwl_mvm_update_quotas(mvm, false, NULL);
1974 if (ret)
1975 IWL_ERR(mvm, "failed to update quotas\n");
1976
1977 if (vif->p2p)
1978 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
1979
1980 /* this will take the cleared BSSID from bss_conf */
1981 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1982 if (ret)
1983 IWL_ERR(mvm,
1984 "failed to update MAC %pM (clear after unassoc)\n",
1985 vif->addr);
1986 }
1987
1988 /*
1989 * The firmware tracks the MU-MIMO group on its own.
1990 * However, on HW restart we should restore this data.
1991 */
1992 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
1993 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
1994 ret = iwl_mvm_update_mu_groups(mvm, vif);
1995 if (ret)
1996 IWL_ERR(mvm,
1997 "failed to update VHT MU_MIMO groups\n");
1998 }
1999
2000 iwl_mvm_recalc_multicast(mvm);
2001 iwl_mvm_configure_bcast_filter(mvm);
2002
2003 /* reset rssi values */
2004 mvmvif->bf_data.ave_beacon_signal = 0;
2005
2006 iwl_mvm_bt_coex_vif_change(mvm);
2007 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2008 IEEE80211_SMPS_AUTOMATIC);
2009 if (fw_has_capa(&mvm->fw->ucode_capa,
2010 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2011 iwl_mvm_config_scan(mvm);
2012 } else if (changes & BSS_CHANGED_BEACON_INFO) {
2013 /*
2014 * We received a beacon _after_ association so
2015 * remove the session protection.
2016 */
2017 iwl_mvm_remove_time_event(mvm, mvmvif,
2018 &mvmvif->time_event_data);
2019 }
2020
2021 if (changes & BSS_CHANGED_BEACON_INFO) {
2022 iwl_mvm_sf_update(mvm, vif, false);
2023 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2024 }
2025
2026 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2027 /*
2028 * Send power command on every beacon change,
2029 * because we may have not enabled beacon abort yet.
2030 */
2031 BSS_CHANGED_BEACON_INFO)) {
2032 ret = iwl_mvm_power_update_mac(mvm);
2033 if (ret)
2034 IWL_ERR(mvm, "failed to update power mode\n");
2035 }
2036
2037 if (changes & BSS_CHANGED_TXPOWER) {
2038 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2039 bss_conf->txpower);
2040 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2041 }
2042
2043 if (changes & BSS_CHANGED_CQM) {
2044 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2045 /* reset cqm events tracking */
2046 mvmvif->bf_data.last_cqm_event = 0;
2047 if (mvmvif->bf_data.bf_enabled) {
2048 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2049 if (ret)
2050 IWL_ERR(mvm,
2051 "failed to update CQM thresholds\n");
2052 }
2053 }
2054
2055 if (changes & BSS_CHANGED_ARP_FILTER) {
2056 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2057 iwl_mvm_configure_bcast_filter(mvm);
2058 }
2059 }
2060
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2061 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2062 struct ieee80211_vif *vif)
2063 {
2064 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2065 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2066 int ret;
2067
2068 /*
2069 * iwl_mvm_mac_ctxt_add() might read directly from the device
2070 * (the system time), so make sure it is available.
2071 */
2072 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2073 if (ret)
2074 return ret;
2075
2076 mutex_lock(&mvm->mutex);
2077
2078 /* Send the beacon template */
2079 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2080 if (ret)
2081 goto out_unlock;
2082
2083 /*
2084 * Re-calculate the tsf id, as the master-slave relations depend on the
2085 * beacon interval, which was not known when the AP interface was added.
2086 */
2087 if (vif->type == NL80211_IFTYPE_AP)
2088 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2089
2090 mvmvif->ap_assoc_sta_count = 0;
2091
2092 /* Add the mac context */
2093 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2094 if (ret)
2095 goto out_unlock;
2096
2097 /* Perform the binding */
2098 ret = iwl_mvm_binding_add_vif(mvm, vif);
2099 if (ret)
2100 goto out_remove;
2101
2102 /* Send the bcast station. At this stage the TBTT and DTIM time events
2103 * are added and applied to the scheduler */
2104 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2105 if (ret)
2106 goto out_unbind;
2107
2108 /* must be set before quota calculations */
2109 mvmvif->ap_ibss_active = true;
2110
2111 /* power updated needs to be done before quotas */
2112 iwl_mvm_power_update_mac(mvm);
2113
2114 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2115 if (ret)
2116 goto out_quota_failed;
2117
2118 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2119 if (vif->p2p && mvm->p2p_device_vif)
2120 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2121
2122 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2123
2124 iwl_mvm_bt_coex_vif_change(mvm);
2125
2126 /* we don't support TDLS during DCM */
2127 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2128 iwl_mvm_teardown_tdls_peers(mvm);
2129
2130 goto out_unlock;
2131
2132 out_quota_failed:
2133 iwl_mvm_power_update_mac(mvm);
2134 mvmvif->ap_ibss_active = false;
2135 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2136 out_unbind:
2137 iwl_mvm_binding_remove_vif(mvm, vif);
2138 out_remove:
2139 iwl_mvm_mac_ctxt_remove(mvm, vif);
2140 out_unlock:
2141 mutex_unlock(&mvm->mutex);
2142 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2143 return ret;
2144 }
2145
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2146 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2147 struct ieee80211_vif *vif)
2148 {
2149 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2150 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2151
2152 iwl_mvm_prepare_mac_removal(mvm, vif);
2153
2154 mutex_lock(&mvm->mutex);
2155
2156 /* Handle AP stop while in CSA */
2157 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2158 iwl_mvm_remove_time_event(mvm, mvmvif,
2159 &mvmvif->time_event_data);
2160 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2161 mvmvif->csa_countdown = false;
2162 }
2163
2164 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2165 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2166 mvm->csa_tx_block_bcn_timeout = 0;
2167 }
2168
2169 mvmvif->ap_ibss_active = false;
2170 mvm->ap_last_beacon_gp2 = 0;
2171
2172 iwl_mvm_bt_coex_vif_change(mvm);
2173
2174 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2175
2176 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2177 if (vif->p2p && mvm->p2p_device_vif)
2178 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2179
2180 iwl_mvm_update_quotas(mvm, false, NULL);
2181 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2182 iwl_mvm_binding_remove_vif(mvm, vif);
2183
2184 iwl_mvm_power_update_mac(mvm);
2185
2186 iwl_mvm_mac_ctxt_remove(mvm, vif);
2187
2188 mutex_unlock(&mvm->mutex);
2189 }
2190
2191 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2192 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2193 struct ieee80211_vif *vif,
2194 struct ieee80211_bss_conf *bss_conf,
2195 u32 changes)
2196 {
2197 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2198
2199 /* Changes will be applied when the AP/IBSS is started */
2200 if (!mvmvif->ap_ibss_active)
2201 return;
2202
2203 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2204 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2205 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2206 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2207
2208 /* Need to send a new beacon template to the FW */
2209 if (changes & BSS_CHANGED_BEACON &&
2210 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2211 IWL_WARN(mvm, "Failed updating beacon data\n");
2212
2213 if (changes & BSS_CHANGED_TXPOWER) {
2214 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2215 bss_conf->txpower);
2216 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2217 }
2218 }
2219
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2220 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2221 struct ieee80211_vif *vif,
2222 struct ieee80211_bss_conf *bss_conf,
2223 u32 changes)
2224 {
2225 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2226
2227 /*
2228 * iwl_mvm_bss_info_changed_station() might call
2229 * iwl_mvm_protect_session(), which reads directly from
2230 * the device (the system time), so make sure it is available.
2231 */
2232 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2233 return;
2234
2235 mutex_lock(&mvm->mutex);
2236
2237 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2238 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2239
2240 switch (vif->type) {
2241 case NL80211_IFTYPE_STATION:
2242 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2243 break;
2244 case NL80211_IFTYPE_AP:
2245 case NL80211_IFTYPE_ADHOC:
2246 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2247 break;
2248 case NL80211_IFTYPE_MONITOR:
2249 if (changes & BSS_CHANGED_MU_GROUPS)
2250 iwl_mvm_update_mu_groups(mvm, vif);
2251 break;
2252 default:
2253 /* shouldn't happen */
2254 WARN_ON_ONCE(1);
2255 }
2256
2257 mutex_unlock(&mvm->mutex);
2258 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2259 }
2260
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2261 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2262 struct ieee80211_vif *vif,
2263 struct ieee80211_scan_request *hw_req)
2264 {
2265 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2266 int ret;
2267
2268 if (hw_req->req.n_channels == 0 ||
2269 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2270 return -EINVAL;
2271
2272 mutex_lock(&mvm->mutex);
2273 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2274 mutex_unlock(&mvm->mutex);
2275
2276 return ret;
2277 }
2278
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2279 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2280 struct ieee80211_vif *vif)
2281 {
2282 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2283
2284 mutex_lock(&mvm->mutex);
2285
2286 /* Due to a race condition, it's possible that mac80211 asks
2287 * us to stop a hw_scan when it's already stopped. This can
2288 * happen, for instance, if we stopped the scan ourselves,
2289 * called ieee80211_scan_completed() and the userspace called
2290 * cancel scan scan before ieee80211_scan_work() could run.
2291 * To handle that, simply return if the scan is not running.
2292 */
2293 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2294 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2295
2296 mutex_unlock(&mvm->mutex);
2297 }
2298
2299 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2300 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2301 struct ieee80211_sta *sta, u16 tids,
2302 int num_frames,
2303 enum ieee80211_frame_release_type reason,
2304 bool more_data)
2305 {
2306 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2307
2308 /* Called when we need to transmit (a) frame(s) from mac80211 */
2309
2310 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2311 tids, more_data, false);
2312 }
2313
2314 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2315 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2316 struct ieee80211_sta *sta, u16 tids,
2317 int num_frames,
2318 enum ieee80211_frame_release_type reason,
2319 bool more_data)
2320 {
2321 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2322
2323 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2324
2325 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2326 tids, more_data, true);
2327 }
2328
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2329 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2330 struct ieee80211_vif *vif,
2331 enum sta_notify_cmd cmd,
2332 struct ieee80211_sta *sta)
2333 {
2334 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2335 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2336 unsigned long txqs = 0, tids = 0;
2337 int tid;
2338
2339 spin_lock_bh(&mvmsta->lock);
2340 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2341 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2342
2343 if (!iwl_mvm_is_dqa_supported(mvm) &&
2344 tid_data->state != IWL_AGG_ON &&
2345 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
2346 continue;
2347
2348 __set_bit(tid_data->txq_id, &txqs);
2349
2350 if (iwl_mvm_tid_queued(tid_data) == 0)
2351 continue;
2352
2353 __set_bit(tid, &tids);
2354 }
2355
2356 switch (cmd) {
2357 case STA_NOTIFY_SLEEP:
2358 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
2359 ieee80211_sta_block_awake(hw, sta, true);
2360
2361 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2362 ieee80211_sta_set_buffered(sta, tid, true);
2363
2364 if (txqs)
2365 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2366 /*
2367 * The fw updates the STA to be asleep. Tx packets on the Tx
2368 * queues to this station will not be transmitted. The fw will
2369 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2370 */
2371 break;
2372 case STA_NOTIFY_AWAKE:
2373 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
2374 break;
2375
2376 if (txqs)
2377 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2378 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2379 break;
2380 default:
2381 break;
2382 }
2383 spin_unlock_bh(&mvmsta->lock);
2384 }
2385
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2386 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2387 struct ieee80211_vif *vif,
2388 struct ieee80211_sta *sta)
2389 {
2390 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2391 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2392
2393 /*
2394 * This is called before mac80211 does RCU synchronisation,
2395 * so here we already invalidate our internal RCU-protected
2396 * station pointer. The rest of the code will thus no longer
2397 * be able to find the station this way, and we don't rely
2398 * on further RCU synchronisation after the sta_state()
2399 * callback deleted the station.
2400 */
2401 mutex_lock(&mvm->mutex);
2402 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2403 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2404 ERR_PTR(-ENOENT));
2405
2406 mutex_unlock(&mvm->mutex);
2407 }
2408
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2409 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2410 const u8 *bssid)
2411 {
2412 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2413 return;
2414
2415 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2416 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2417 return;
2418 }
2419
2420 if (!vif->p2p &&
2421 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2422 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2423 return;
2424 }
2425
2426 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2427 }
2428
2429 static void
iwl_mvm_tdls_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 * peer_addr,enum nl80211_tdls_operation action)2430 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2431 struct ieee80211_vif *vif, u8 *peer_addr,
2432 enum nl80211_tdls_operation action)
2433 {
2434 struct iwl_fw_dbg_trigger_tlv *trig;
2435 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2436
2437 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2438 return;
2439
2440 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2441 tdls_trig = (void *)trig->data;
2442 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
2443 return;
2444
2445 if (!(tdls_trig->action_bitmap & BIT(action)))
2446 return;
2447
2448 if (tdls_trig->peer_mode &&
2449 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2450 return;
2451
2452 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
2453 "TDLS event occurred, peer %pM, action %d",
2454 peer_addr, action);
2455 }
2456
iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvm_sta)2457 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2458 struct iwl_mvm_sta *mvm_sta)
2459 {
2460 struct iwl_mvm_tid_data *tid_data;
2461 struct sk_buff *skb;
2462 int i;
2463
2464 spin_lock_bh(&mvm_sta->lock);
2465 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2466 tid_data = &mvm_sta->tid_data[i];
2467 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
2468 ieee80211_free_txskb(mvm->hw, skb);
2469 }
2470 spin_unlock_bh(&mvm_sta->lock);
2471 }
2472
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2473 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2474 struct ieee80211_vif *vif,
2475 struct ieee80211_sta *sta,
2476 enum ieee80211_sta_state old_state,
2477 enum ieee80211_sta_state new_state)
2478 {
2479 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2480 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2481 int ret;
2482
2483 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2484 sta->addr, old_state, new_state);
2485
2486 /* this would be a mac80211 bug ... but don't crash */
2487 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2488 return -EINVAL;
2489
2490 /* if a STA is being removed, reuse its ID */
2491 flush_work(&mvm->sta_drained_wk);
2492
2493 /*
2494 * If we are in a STA removal flow and in DQA mode:
2495 *
2496 * This is after the sync_rcu part, so the queues have already been
2497 * flushed. No more TXs on their way in mac80211's path, and no more in
2498 * the queues.
2499 * Also, we won't be getting any new TX frames for this station.
2500 * What we might have are deferred TX frames that need to be taken care
2501 * of.
2502 *
2503 * Drop any still-queued deferred-frame before removing the STA, and
2504 * make sure the worker is no longer handling frames for this STA.
2505 */
2506 if (old_state == IEEE80211_STA_NONE &&
2507 new_state == IEEE80211_STA_NOTEXIST &&
2508 iwl_mvm_is_dqa_supported(mvm)) {
2509 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2510
2511 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2512 flush_work(&mvm->add_stream_wk);
2513
2514 /*
2515 * No need to make sure deferred TX indication is off since the
2516 * worker will already remove it if it was on
2517 */
2518 }
2519
2520 mutex_lock(&mvm->mutex);
2521 if (old_state == IEEE80211_STA_NOTEXIST &&
2522 new_state == IEEE80211_STA_NONE) {
2523 /*
2524 * Firmware bug - it'll crash if the beacon interval is less
2525 * than 16. We can't avoid connecting at all, so refuse the
2526 * station state change, this will cause mac80211 to abandon
2527 * attempts to connect to this AP, and eventually wpa_s will
2528 * blacklist the AP...
2529 */
2530 if (vif->type == NL80211_IFTYPE_STATION &&
2531 vif->bss_conf.beacon_int < 16) {
2532 IWL_ERR(mvm,
2533 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2534 sta->addr, vif->bss_conf.beacon_int);
2535 ret = -EINVAL;
2536 goto out_unlock;
2537 }
2538
2539 if (sta->tdls &&
2540 (vif->p2p ||
2541 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2542 IWL_MVM_TDLS_STA_COUNT ||
2543 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2544 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2545 ret = -EBUSY;
2546 goto out_unlock;
2547 }
2548
2549 ret = iwl_mvm_add_sta(mvm, vif, sta);
2550 if (sta->tdls && ret == 0) {
2551 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2552 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2553 NL80211_TDLS_SETUP);
2554 }
2555 } else if (old_state == IEEE80211_STA_NONE &&
2556 new_state == IEEE80211_STA_AUTH) {
2557 /*
2558 * EBS may be disabled due to previous failures reported by FW.
2559 * Reset EBS status here assuming environment has been changed.
2560 */
2561 mvm->last_ebs_successful = true;
2562 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2563 ret = 0;
2564 } else if (old_state == IEEE80211_STA_AUTH &&
2565 new_state == IEEE80211_STA_ASSOC) {
2566 if (vif->type == NL80211_IFTYPE_AP) {
2567 mvmvif->ap_assoc_sta_count++;
2568 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2569 }
2570 ret = iwl_mvm_update_sta(mvm, vif, sta);
2571 if (ret == 0)
2572 iwl_mvm_rs_rate_init(mvm, sta,
2573 mvmvif->phy_ctxt->channel->band,
2574 true);
2575 } else if (old_state == IEEE80211_STA_ASSOC &&
2576 new_state == IEEE80211_STA_AUTHORIZED) {
2577
2578 /* we don't support TDLS during DCM */
2579 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2580 iwl_mvm_teardown_tdls_peers(mvm);
2581
2582 if (sta->tdls)
2583 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2584 NL80211_TDLS_ENABLE_LINK);
2585
2586 /* enable beacon filtering */
2587 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2588 ret = 0;
2589 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2590 new_state == IEEE80211_STA_ASSOC) {
2591 /* disable beacon filtering */
2592 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2593 ret = 0;
2594 } else if (old_state == IEEE80211_STA_ASSOC &&
2595 new_state == IEEE80211_STA_AUTH) {
2596 if (vif->type == NL80211_IFTYPE_AP) {
2597 mvmvif->ap_assoc_sta_count--;
2598 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2599 }
2600 ret = 0;
2601 } else if (old_state == IEEE80211_STA_AUTH &&
2602 new_state == IEEE80211_STA_NONE) {
2603 ret = 0;
2604 } else if (old_state == IEEE80211_STA_NONE &&
2605 new_state == IEEE80211_STA_NOTEXIST) {
2606 ret = iwl_mvm_rm_sta(mvm, vif, sta);
2607 if (sta->tdls) {
2608 iwl_mvm_recalc_tdls_state(mvm, vif, false);
2609 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2610 NL80211_TDLS_DISABLE_LINK);
2611 }
2612 } else {
2613 ret = -EIO;
2614 }
2615 out_unlock:
2616 mutex_unlock(&mvm->mutex);
2617
2618 if (sta->tdls && ret == 0) {
2619 if (old_state == IEEE80211_STA_NOTEXIST &&
2620 new_state == IEEE80211_STA_NONE)
2621 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2622 else if (old_state == IEEE80211_STA_NONE &&
2623 new_state == IEEE80211_STA_NOTEXIST)
2624 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
2625 }
2626
2627 return ret;
2628 }
2629
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)2630 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2631 {
2632 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2633
2634 mvm->rts_threshold = value;
2635
2636 return 0;
2637 }
2638
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)2639 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
2640 struct ieee80211_vif *vif,
2641 struct ieee80211_sta *sta, u32 changed)
2642 {
2643 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2644
2645 if (vif->type == NL80211_IFTYPE_STATION &&
2646 changed & IEEE80211_RC_NSS_CHANGED)
2647 iwl_mvm_sf_update(mvm, vif, false);
2648 }
2649
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)2650 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
2651 struct ieee80211_vif *vif, u16 ac,
2652 const struct ieee80211_tx_queue_params *params)
2653 {
2654 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2655 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2656
2657 mvmvif->queue_params[ac] = *params;
2658
2659 /*
2660 * No need to update right away, we'll get BSS_CHANGED_QOS
2661 * The exception is P2P_DEVICE interface which needs immediate update.
2662 */
2663 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2664 int ret;
2665
2666 mutex_lock(&mvm->mutex);
2667 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2668 mutex_unlock(&mvm->mutex);
2669 return ret;
2670 }
2671 return 0;
2672 }
2673
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2674 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
2675 struct ieee80211_vif *vif)
2676 {
2677 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2678 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
2679 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
2680
2681 if (WARN_ON_ONCE(vif->bss_conf.assoc))
2682 return;
2683
2684 /*
2685 * iwl_mvm_protect_session() reads directly from the device
2686 * (the system time), so make sure it is available.
2687 */
2688 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
2689 return;
2690
2691 mutex_lock(&mvm->mutex);
2692 /* Try really hard to protect the session and hear a beacon */
2693 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
2694 mutex_unlock(&mvm->mutex);
2695
2696 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
2697 }
2698
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)2699 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
2700 struct ieee80211_vif *vif,
2701 struct cfg80211_sched_scan_request *req,
2702 struct ieee80211_scan_ies *ies)
2703 {
2704 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2705
2706 int ret;
2707
2708 mutex_lock(&mvm->mutex);
2709
2710 if (!vif->bss_conf.idle) {
2711 ret = -EBUSY;
2712 goto out;
2713 }
2714
2715 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
2716
2717 out:
2718 mutex_unlock(&mvm->mutex);
2719 return ret;
2720 }
2721
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2722 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
2723 struct ieee80211_vif *vif)
2724 {
2725 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2726 int ret;
2727
2728 mutex_lock(&mvm->mutex);
2729
2730 /* Due to a race condition, it's possible that mac80211 asks
2731 * us to stop a sched_scan when it's already stopped. This
2732 * can happen, for instance, if we stopped the scan ourselves,
2733 * called ieee80211_sched_scan_stopped() and the userspace called
2734 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
2735 * could run. To handle this, simply return if the scan is
2736 * not running.
2737 */
2738 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
2739 mutex_unlock(&mvm->mutex);
2740 return 0;
2741 }
2742
2743 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
2744 mutex_unlock(&mvm->mutex);
2745 iwl_mvm_wait_for_async_handlers(mvm);
2746
2747 return ret;
2748 }
2749
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)2750 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
2751 enum set_key_cmd cmd,
2752 struct ieee80211_vif *vif,
2753 struct ieee80211_sta *sta,
2754 struct ieee80211_key_conf *key)
2755 {
2756 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2757 struct iwl_mvm_sta *mvmsta;
2758 struct iwl_mvm_key_pn *ptk_pn;
2759 int keyidx = key->keyidx;
2760 int ret;
2761 u8 key_offset;
2762
2763 if (iwlwifi_mod_params.sw_crypto) {
2764 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
2765 return -EOPNOTSUPP;
2766 }
2767
2768 switch (key->cipher) {
2769 case WLAN_CIPHER_SUITE_TKIP:
2770 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2771 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2772 break;
2773 case WLAN_CIPHER_SUITE_CCMP:
2774 case WLAN_CIPHER_SUITE_GCMP:
2775 case WLAN_CIPHER_SUITE_GCMP_256:
2776 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2777 break;
2778 case WLAN_CIPHER_SUITE_AES_CMAC:
2779 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
2780 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
2781 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
2782 break;
2783 case WLAN_CIPHER_SUITE_WEP40:
2784 case WLAN_CIPHER_SUITE_WEP104:
2785 /* For non-client mode, only use WEP keys for TX as we probably
2786 * don't have a station yet anyway and would then have to keep
2787 * track of the keys, linking them to each of the clients/peers
2788 * as they appear. For now, don't do that, for performance WEP
2789 * offload doesn't really matter much, but we need it for some
2790 * other offload features in client mode.
2791 */
2792 if (vif->type != NL80211_IFTYPE_STATION)
2793 return 0;
2794 break;
2795 default:
2796 /* currently FW supports only one optional cipher scheme */
2797 if (hw->n_cipher_schemes &&
2798 hw->cipher_schemes->cipher == key->cipher)
2799 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
2800 else
2801 return -EOPNOTSUPP;
2802 }
2803
2804 mutex_lock(&mvm->mutex);
2805
2806 switch (cmd) {
2807 case SET_KEY:
2808 if ((vif->type == NL80211_IFTYPE_ADHOC ||
2809 vif->type == NL80211_IFTYPE_AP) && !sta) {
2810 /*
2811 * GTK on AP interface is a TX-only key, return 0;
2812 * on IBSS they're per-station and because we're lazy
2813 * we don't support them for RX, so do the same.
2814 * CMAC/GMAC in AP/IBSS modes must be done in software.
2815 */
2816 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
2817 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
2818 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
2819 ret = -EOPNOTSUPP;
2820 else
2821 ret = 0;
2822 key->hw_key_idx = STA_KEY_IDX_INVALID;
2823 break;
2824 }
2825
2826 /* During FW restart, in order to restore the state as it was,
2827 * don't try to reprogram keys we previously failed for.
2828 */
2829 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2830 key->hw_key_idx == STA_KEY_IDX_INVALID) {
2831 IWL_DEBUG_MAC80211(mvm,
2832 "skip invalid idx key programming during restart\n");
2833 ret = 0;
2834 break;
2835 }
2836
2837 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2838 sta && iwl_mvm_has_new_rx_api(mvm) &&
2839 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2840 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2841 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2842 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2843 struct ieee80211_key_seq seq;
2844 int tid, q;
2845
2846 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2847 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
2848 ptk_pn = kzalloc(sizeof(*ptk_pn) +
2849 mvm->trans->num_rx_queues *
2850 sizeof(ptk_pn->q[0]),
2851 GFP_KERNEL);
2852 if (!ptk_pn) {
2853 ret = -ENOMEM;
2854 break;
2855 }
2856
2857 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2858 ieee80211_get_key_rx_seq(key, tid, &seq);
2859 for (q = 0; q < mvm->trans->num_rx_queues; q++)
2860 memcpy(ptk_pn->q[q].pn[tid],
2861 seq.ccmp.pn,
2862 IEEE80211_CCMP_PN_LEN);
2863 }
2864
2865 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
2866 }
2867
2868 /* in HW restart reuse the index, otherwise request a new one */
2869 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2870 key_offset = key->hw_key_idx;
2871 else
2872 key_offset = STA_KEY_IDX_INVALID;
2873
2874 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
2875 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
2876 if (ret) {
2877 IWL_WARN(mvm, "set key failed\n");
2878 /*
2879 * can't add key for RX, but we don't need it
2880 * in the device for TX so still return 0
2881 */
2882 key->hw_key_idx = STA_KEY_IDX_INVALID;
2883 ret = 0;
2884 }
2885
2886 break;
2887 case DISABLE_KEY:
2888 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
2889 ret = 0;
2890 break;
2891 }
2892
2893 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
2894 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
2895 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
2896 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
2897 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
2898 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2899 ptk_pn = rcu_dereference_protected(
2900 mvmsta->ptk_pn[keyidx],
2901 lockdep_is_held(&mvm->mutex));
2902 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
2903 if (ptk_pn)
2904 kfree_rcu(ptk_pn, rcu_head);
2905 }
2906
2907 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
2908 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
2909 break;
2910 default:
2911 ret = -EINVAL;
2912 }
2913
2914 mutex_unlock(&mvm->mutex);
2915 return ret;
2916 }
2917
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)2918 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
2919 struct ieee80211_vif *vif,
2920 struct ieee80211_key_conf *keyconf,
2921 struct ieee80211_sta *sta,
2922 u32 iv32, u16 *phase1key)
2923 {
2924 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2925
2926 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
2927 return;
2928
2929 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
2930 }
2931
2932
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)2933 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
2934 struct iwl_rx_packet *pkt, void *data)
2935 {
2936 struct iwl_mvm *mvm =
2937 container_of(notif_wait, struct iwl_mvm, notif_wait);
2938 struct iwl_hs20_roc_res *resp;
2939 int resp_len = iwl_rx_packet_payload_len(pkt);
2940 struct iwl_mvm_time_event_data *te_data = data;
2941
2942 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
2943 return true;
2944
2945 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
2946 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
2947 return true;
2948 }
2949
2950 resp = (void *)pkt->data;
2951
2952 IWL_DEBUG_TE(mvm,
2953 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
2954 resp->status, resp->event_unique_id);
2955
2956 te_data->uid = le32_to_cpu(resp->event_unique_id);
2957 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
2958 te_data->uid);
2959
2960 spin_lock_bh(&mvm->time_event_lock);
2961 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
2962 spin_unlock_bh(&mvm->time_event_lock);
2963
2964 return true;
2965 }
2966
2967 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
2968 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
2969 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
2970 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
2971 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)2972 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2973 struct ieee80211_channel *channel,
2974 struct ieee80211_vif *vif,
2975 int duration)
2976 {
2977 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
2978 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2979 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
2980 static const u16 time_event_response[] = { HOT_SPOT_CMD };
2981 struct iwl_notification_wait wait_time_event;
2982 u32 dtim_interval = vif->bss_conf.dtim_period *
2983 vif->bss_conf.beacon_int;
2984 u32 req_dur, delay;
2985 struct iwl_hs20_roc_req aux_roc_req = {
2986 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
2987 .id_and_color =
2988 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
2989 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
2990 /* Set the channel info data */
2991 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
2992 PHY_BAND_24 : PHY_BAND_5,
2993 .channel_info.channel = channel->hw_value,
2994 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
2995 /* Set the time and duration */
2996 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
2997 };
2998
2999 delay = AUX_ROC_MIN_DELAY;
3000 req_dur = MSEC_TO_TU(duration);
3001
3002 /*
3003 * If we are associated we want the delay time to be at least one
3004 * dtim interval so that the FW can wait until after the DTIM and
3005 * then start the time event, this will potentially allow us to
3006 * remain off-channel for the max duration.
3007 * Since we want to use almost a whole dtim interval we would also
3008 * like the delay to be for 2-3 dtim intervals, in case there are
3009 * other time events with higher priority.
3010 */
3011 if (vif->bss_conf.assoc) {
3012 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3013 /* We cannot remain off-channel longer than the DTIM interval */
3014 if (dtim_interval <= req_dur) {
3015 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3016 if (req_dur <= AUX_ROC_MIN_DURATION)
3017 req_dur = dtim_interval -
3018 AUX_ROC_MIN_SAFETY_BUFFER;
3019 }
3020 }
3021
3022 aux_roc_req.duration = cpu_to_le32(req_dur);
3023 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
3024
3025 IWL_DEBUG_TE(mvm,
3026 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3027 channel->hw_value, req_dur, duration, delay,
3028 dtim_interval);
3029 /* Set the node address */
3030 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3031
3032 lockdep_assert_held(&mvm->mutex);
3033
3034 spin_lock_bh(&mvm->time_event_lock);
3035
3036 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3037 spin_unlock_bh(&mvm->time_event_lock);
3038 return -EIO;
3039 }
3040
3041 te_data->vif = vif;
3042 te_data->duration = duration;
3043 te_data->id = HOT_SPOT_CMD;
3044
3045 spin_unlock_bh(&mvm->time_event_lock);
3046
3047 /*
3048 * Use a notification wait, which really just processes the
3049 * command response and doesn't wait for anything, in order
3050 * to be able to process the response and get the UID inside
3051 * the RX path. Using CMD_WANT_SKB doesn't work because it
3052 * stores the buffer and then wakes up this thread, by which
3053 * time another notification (that the time event started)
3054 * might already be processed unsuccessfully.
3055 */
3056 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3057 time_event_response,
3058 ARRAY_SIZE(time_event_response),
3059 iwl_mvm_rx_aux_roc, te_data);
3060
3061 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3062 &aux_roc_req);
3063
3064 if (res) {
3065 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3066 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3067 goto out_clear_te;
3068 }
3069
3070 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3071 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3072 /* should never fail */
3073 WARN_ON_ONCE(res);
3074
3075 if (res) {
3076 out_clear_te:
3077 spin_lock_bh(&mvm->time_event_lock);
3078 iwl_mvm_te_clear_data(mvm, te_data);
3079 spin_unlock_bh(&mvm->time_event_lock);
3080 }
3081
3082 return res;
3083 }
3084
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3085 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3086 struct ieee80211_vif *vif,
3087 struct ieee80211_channel *channel,
3088 int duration,
3089 enum ieee80211_roc_type type)
3090 {
3091 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3092 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3093 struct cfg80211_chan_def chandef;
3094 struct iwl_mvm_phy_ctxt *phy_ctxt;
3095 int ret, i;
3096
3097 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3098 duration, type);
3099
3100 flush_work(&mvm->roc_done_wk);
3101
3102 mutex_lock(&mvm->mutex);
3103
3104 switch (vif->type) {
3105 case NL80211_IFTYPE_STATION:
3106 if (fw_has_capa(&mvm->fw->ucode_capa,
3107 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3108 /* Use aux roc framework (HS20) */
3109 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3110 vif, duration);
3111 goto out_unlock;
3112 }
3113 IWL_ERR(mvm, "hotspot not supported\n");
3114 ret = -EINVAL;
3115 goto out_unlock;
3116 case NL80211_IFTYPE_P2P_DEVICE:
3117 /* handle below */
3118 break;
3119 default:
3120 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3121 ret = -EINVAL;
3122 goto out_unlock;
3123 }
3124
3125 for (i = 0; i < NUM_PHY_CTX; i++) {
3126 phy_ctxt = &mvm->phy_ctxts[i];
3127 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3128 continue;
3129
3130 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3131 /*
3132 * Unbind the P2P_DEVICE from the current PHY context,
3133 * and if the PHY context is not used remove it.
3134 */
3135 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3136 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3137 goto out_unlock;
3138
3139 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3140
3141 /* Bind the P2P_DEVICE to the current PHY Context */
3142 mvmvif->phy_ctxt = phy_ctxt;
3143
3144 ret = iwl_mvm_binding_add_vif(mvm, vif);
3145 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3146 goto out_unlock;
3147
3148 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3149 goto schedule_time_event;
3150 }
3151 }
3152
3153 /* Need to update the PHY context only if the ROC channel changed */
3154 if (channel == mvmvif->phy_ctxt->channel)
3155 goto schedule_time_event;
3156
3157 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3158
3159 /*
3160 * Change the PHY context configuration as it is currently referenced
3161 * only by the P2P Device MAC
3162 */
3163 if (mvmvif->phy_ctxt->ref == 1) {
3164 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3165 &chandef, 1, 1);
3166 if (ret)
3167 goto out_unlock;
3168 } else {
3169 /*
3170 * The PHY context is shared with other MACs. Need to remove the
3171 * P2P Device from the binding, allocate an new PHY context and
3172 * create a new binding
3173 */
3174 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3175 if (!phy_ctxt) {
3176 ret = -ENOSPC;
3177 goto out_unlock;
3178 }
3179
3180 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3181 1, 1);
3182 if (ret) {
3183 IWL_ERR(mvm, "Failed to change PHY context\n");
3184 goto out_unlock;
3185 }
3186
3187 /* Unbind the P2P_DEVICE from the current PHY context */
3188 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3189 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3190 goto out_unlock;
3191
3192 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3193
3194 /* Bind the P2P_DEVICE to the new allocated PHY context */
3195 mvmvif->phy_ctxt = phy_ctxt;
3196
3197 ret = iwl_mvm_binding_add_vif(mvm, vif);
3198 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3199 goto out_unlock;
3200
3201 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3202 }
3203
3204 schedule_time_event:
3205 /* Schedule the time events */
3206 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3207
3208 out_unlock:
3209 mutex_unlock(&mvm->mutex);
3210 IWL_DEBUG_MAC80211(mvm, "leave\n");
3211 return ret;
3212 }
3213
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3214 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3215 {
3216 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3217
3218 IWL_DEBUG_MAC80211(mvm, "enter\n");
3219
3220 mutex_lock(&mvm->mutex);
3221 iwl_mvm_stop_roc(mvm);
3222 mutex_unlock(&mvm->mutex);
3223
3224 IWL_DEBUG_MAC80211(mvm, "leave\n");
3225 return 0;
3226 }
3227
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3228 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3229 struct ieee80211_chanctx_conf *ctx)
3230 {
3231 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3232 struct iwl_mvm_phy_ctxt *phy_ctxt;
3233 int ret;
3234
3235 lockdep_assert_held(&mvm->mutex);
3236
3237 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3238
3239 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3240 if (!phy_ctxt) {
3241 ret = -ENOSPC;
3242 goto out;
3243 }
3244
3245 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3246 ctx->rx_chains_static,
3247 ctx->rx_chains_dynamic);
3248 if (ret) {
3249 IWL_ERR(mvm, "Failed to add PHY context\n");
3250 goto out;
3251 }
3252
3253 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3254 *phy_ctxt_id = phy_ctxt->id;
3255 out:
3256 return ret;
3257 }
3258
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3259 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3260 struct ieee80211_chanctx_conf *ctx)
3261 {
3262 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3263 int ret;
3264
3265 mutex_lock(&mvm->mutex);
3266 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3267 mutex_unlock(&mvm->mutex);
3268
3269 return ret;
3270 }
3271
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3272 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3273 struct ieee80211_chanctx_conf *ctx)
3274 {
3275 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3276 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3277
3278 lockdep_assert_held(&mvm->mutex);
3279
3280 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3281 }
3282
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3283 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3284 struct ieee80211_chanctx_conf *ctx)
3285 {
3286 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3287
3288 mutex_lock(&mvm->mutex);
3289 __iwl_mvm_remove_chanctx(mvm, ctx);
3290 mutex_unlock(&mvm->mutex);
3291 }
3292
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3293 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3294 struct ieee80211_chanctx_conf *ctx,
3295 u32 changed)
3296 {
3297 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3298 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3299 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3300
3301 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3302 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3303 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3304 IEEE80211_CHANCTX_CHANGE_RADAR |
3305 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3306 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3307 phy_ctxt->ref, changed))
3308 return;
3309
3310 mutex_lock(&mvm->mutex);
3311 iwl_mvm_bt_coex_vif_change(mvm);
3312 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3313 ctx->rx_chains_static,
3314 ctx->rx_chains_dynamic);
3315 mutex_unlock(&mvm->mutex);
3316 }
3317
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3318 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3319 struct ieee80211_vif *vif,
3320 struct ieee80211_chanctx_conf *ctx,
3321 bool switching_chanctx)
3322 {
3323 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3324 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3325 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3326 int ret;
3327
3328 lockdep_assert_held(&mvm->mutex);
3329
3330 mvmvif->phy_ctxt = phy_ctxt;
3331
3332 switch (vif->type) {
3333 case NL80211_IFTYPE_AP:
3334 /* only needed if we're switching chanctx (i.e. during CSA) */
3335 if (switching_chanctx) {
3336 mvmvif->ap_ibss_active = true;
3337 break;
3338 }
3339 case NL80211_IFTYPE_ADHOC:
3340 /*
3341 * The AP binding flow is handled as part of the start_ap flow
3342 * (in bss_info_changed), similarly for IBSS.
3343 */
3344 ret = 0;
3345 goto out;
3346 case NL80211_IFTYPE_STATION:
3347 break;
3348 case NL80211_IFTYPE_MONITOR:
3349 /* always disable PS when a monitor interface is active */
3350 mvmvif->ps_disabled = true;
3351 break;
3352 default:
3353 ret = -EINVAL;
3354 goto out;
3355 }
3356
3357 ret = iwl_mvm_binding_add_vif(mvm, vif);
3358 if (ret)
3359 goto out;
3360
3361 /*
3362 * Power state must be updated before quotas,
3363 * otherwise fw will complain.
3364 */
3365 iwl_mvm_power_update_mac(mvm);
3366
3367 /* Setting the quota at this stage is only required for monitor
3368 * interfaces. For the other types, the bss_info changed flow
3369 * will handle quota settings.
3370 */
3371 if (vif->type == NL80211_IFTYPE_MONITOR) {
3372 mvmvif->monitor_active = true;
3373 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3374 if (ret)
3375 goto out_remove_binding;
3376
3377 ret = iwl_mvm_add_snif_sta(mvm, vif);
3378 if (ret)
3379 goto out_remove_binding;
3380
3381 }
3382
3383 /* Handle binding during CSA */
3384 if (vif->type == NL80211_IFTYPE_AP) {
3385 iwl_mvm_update_quotas(mvm, false, NULL);
3386 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3387 }
3388
3389 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3390 u32 duration = 2 * vif->bss_conf.beacon_int;
3391
3392 /* iwl_mvm_protect_session() reads directly from the
3393 * device (the system time), so make sure it is
3394 * available.
3395 */
3396 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3397 if (ret)
3398 goto out_remove_binding;
3399
3400 /* Protect the session to make sure we hear the first
3401 * beacon on the new channel.
3402 */
3403 iwl_mvm_protect_session(mvm, vif, duration, duration,
3404 vif->bss_conf.beacon_int / 2,
3405 true);
3406
3407 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3408
3409 iwl_mvm_update_quotas(mvm, false, NULL);
3410 }
3411
3412 goto out;
3413
3414 out_remove_binding:
3415 iwl_mvm_binding_remove_vif(mvm, vif);
3416 iwl_mvm_power_update_mac(mvm);
3417 out:
3418 if (ret)
3419 mvmvif->phy_ctxt = NULL;
3420 return ret;
3421 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3422 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3423 struct ieee80211_vif *vif,
3424 struct ieee80211_chanctx_conf *ctx)
3425 {
3426 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3427 int ret;
3428
3429 mutex_lock(&mvm->mutex);
3430 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3431 mutex_unlock(&mvm->mutex);
3432
3433 return ret;
3434 }
3435
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3436 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3437 struct ieee80211_vif *vif,
3438 struct ieee80211_chanctx_conf *ctx,
3439 bool switching_chanctx)
3440 {
3441 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3442 struct ieee80211_vif *disabled_vif = NULL;
3443
3444 lockdep_assert_held(&mvm->mutex);
3445
3446 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3447
3448 switch (vif->type) {
3449 case NL80211_IFTYPE_ADHOC:
3450 goto out;
3451 case NL80211_IFTYPE_MONITOR:
3452 mvmvif->monitor_active = false;
3453 mvmvif->ps_disabled = false;
3454 iwl_mvm_rm_snif_sta(mvm, vif);
3455 break;
3456 case NL80211_IFTYPE_AP:
3457 /* This part is triggered only during CSA */
3458 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3459 goto out;
3460
3461 mvmvif->csa_countdown = false;
3462
3463 /* Set CS bit on all the stations */
3464 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3465
3466 /* Save blocked iface, the timeout is set on the next beacon */
3467 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3468
3469 mvmvif->ap_ibss_active = false;
3470 break;
3471 case NL80211_IFTYPE_STATION:
3472 if (!switching_chanctx)
3473 break;
3474
3475 disabled_vif = vif;
3476
3477 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3478 break;
3479 default:
3480 break;
3481 }
3482
3483 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3484 iwl_mvm_binding_remove_vif(mvm, vif);
3485
3486 out:
3487 mvmvif->phy_ctxt = NULL;
3488 iwl_mvm_power_update_mac(mvm);
3489 }
3490
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3491 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3492 struct ieee80211_vif *vif,
3493 struct ieee80211_chanctx_conf *ctx)
3494 {
3495 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3496
3497 mutex_lock(&mvm->mutex);
3498 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3499 mutex_unlock(&mvm->mutex);
3500 }
3501
3502 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3503 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3504 struct ieee80211_vif_chanctx_switch *vifs)
3505 {
3506 int ret;
3507
3508 mutex_lock(&mvm->mutex);
3509 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3510 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3511
3512 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3513 if (ret) {
3514 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3515 goto out_reassign;
3516 }
3517
3518 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3519 true);
3520 if (ret) {
3521 IWL_ERR(mvm,
3522 "failed to assign new_ctx during channel switch\n");
3523 goto out_remove;
3524 }
3525
3526 /* we don't support TDLS during DCM - can be caused by channel switch */
3527 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3528 iwl_mvm_teardown_tdls_peers(mvm);
3529
3530 goto out;
3531
3532 out_remove:
3533 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3534
3535 out_reassign:
3536 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3537 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3538 goto out_restart;
3539 }
3540
3541 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3542 true)) {
3543 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3544 goto out_restart;
3545 }
3546
3547 goto out;
3548
3549 out_restart:
3550 /* things keep failing, better restart the hw */
3551 iwl_mvm_nic_restart(mvm, false);
3552
3553 out:
3554 mutex_unlock(&mvm->mutex);
3555
3556 return ret;
3557 }
3558
3559 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3560 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3561 struct ieee80211_vif_chanctx_switch *vifs)
3562 {
3563 int ret;
3564
3565 mutex_lock(&mvm->mutex);
3566 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3567
3568 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3569 true);
3570 if (ret) {
3571 IWL_ERR(mvm,
3572 "failed to assign new_ctx during channel switch\n");
3573 goto out_reassign;
3574 }
3575
3576 goto out;
3577
3578 out_reassign:
3579 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3580 true)) {
3581 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3582 goto out_restart;
3583 }
3584
3585 goto out;
3586
3587 out_restart:
3588 /* things keep failing, better restart the hw */
3589 iwl_mvm_nic_restart(mvm, false);
3590
3591 out:
3592 mutex_unlock(&mvm->mutex);
3593
3594 return ret;
3595 }
3596
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)3597 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
3598 struct ieee80211_vif_chanctx_switch *vifs,
3599 int n_vifs,
3600 enum ieee80211_chanctx_switch_mode mode)
3601 {
3602 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3603 int ret;
3604
3605 /* we only support a single-vif right now */
3606 if (n_vifs > 1)
3607 return -EOPNOTSUPP;
3608
3609 switch (mode) {
3610 case CHANCTX_SWMODE_SWAP_CONTEXTS:
3611 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
3612 break;
3613 case CHANCTX_SWMODE_REASSIGN_VIF:
3614 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
3615 break;
3616 default:
3617 ret = -EOPNOTSUPP;
3618 break;
3619 }
3620
3621 return ret;
3622 }
3623
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)3624 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
3625 struct ieee80211_sta *sta,
3626 bool set)
3627 {
3628 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3629 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3630
3631 if (!mvm_sta || !mvm_sta->vif) {
3632 IWL_ERR(mvm, "Station is not associated to a vif\n");
3633 return -EINVAL;
3634 }
3635
3636 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
3637 }
3638
3639 #ifdef CONFIG_NL80211_TESTMODE
3640 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
3641 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
3642 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
3643 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
3644 };
3645
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)3646 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
3647 struct ieee80211_vif *vif,
3648 void *data, int len)
3649 {
3650 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
3651 int err;
3652 u32 noa_duration;
3653
3654 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy);
3655 if (err)
3656 return err;
3657
3658 if (!tb[IWL_MVM_TM_ATTR_CMD])
3659 return -EINVAL;
3660
3661 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
3662 case IWL_MVM_TM_CMD_SET_NOA:
3663 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
3664 !vif->bss_conf.enable_beacon ||
3665 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
3666 return -EINVAL;
3667
3668 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
3669 if (noa_duration >= vif->bss_conf.beacon_int)
3670 return -EINVAL;
3671
3672 mvm->noa_duration = noa_duration;
3673 mvm->noa_vif = vif;
3674
3675 return iwl_mvm_update_quotas(mvm, false, NULL);
3676 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
3677 /* must be associated client vif - ignore authorized */
3678 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
3679 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
3680 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
3681 return -EINVAL;
3682
3683 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
3684 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3685 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3686 }
3687
3688 return -EOPNOTSUPP;
3689 }
3690
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)3691 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
3692 struct ieee80211_vif *vif,
3693 void *data, int len)
3694 {
3695 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3696 int err;
3697
3698 mutex_lock(&mvm->mutex);
3699 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
3700 mutex_unlock(&mvm->mutex);
3701
3702 return err;
3703 }
3704 #endif
3705
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3706 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
3707 struct ieee80211_vif *vif,
3708 struct ieee80211_channel_switch *chsw)
3709 {
3710 /* By implementing this operation, we prevent mac80211 from
3711 * starting its own channel switch timer, so that we can call
3712 * ieee80211_chswitch_done() ourselves at the right time
3713 * (which is when the absence time event starts).
3714 */
3715
3716 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
3717 "dummy channel switch op\n");
3718 }
3719
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)3720 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
3721 struct ieee80211_vif *vif,
3722 struct ieee80211_channel_switch *chsw)
3723 {
3724 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3725 struct ieee80211_vif *csa_vif;
3726 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3727 u32 apply_time;
3728 int ret;
3729
3730 mutex_lock(&mvm->mutex);
3731
3732 mvmvif->csa_failed = false;
3733
3734 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
3735 chsw->chandef.center_freq1);
3736
3737 iwl_fw_dbg_trigger_simple_stop(mvm, vif, FW_DBG_TRIGGER_CHANNEL_SWITCH);
3738
3739 switch (vif->type) {
3740 case NL80211_IFTYPE_AP:
3741 csa_vif =
3742 rcu_dereference_protected(mvm->csa_vif,
3743 lockdep_is_held(&mvm->mutex));
3744 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
3745 "Another CSA is already in progress")) {
3746 ret = -EBUSY;
3747 goto out_unlock;
3748 }
3749
3750 /* we still didn't unblock tx. prevent new CS meanwhile */
3751 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
3752 lockdep_is_held(&mvm->mutex))) {
3753 ret = -EBUSY;
3754 goto out_unlock;
3755 }
3756
3757 rcu_assign_pointer(mvm->csa_vif, vif);
3758
3759 if (WARN_ONCE(mvmvif->csa_countdown,
3760 "Previous CSA countdown didn't complete")) {
3761 ret = -EBUSY;
3762 goto out_unlock;
3763 }
3764
3765 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
3766
3767 break;
3768 case NL80211_IFTYPE_STATION:
3769 if (mvmvif->lqm_active)
3770 iwl_mvm_send_lqm_cmd(vif,
3771 LQM_CMD_OPERATION_STOP_MEASUREMENT,
3772 0, 0);
3773
3774 /* Schedule the time event to a bit before beacon 1,
3775 * to make sure we're in the new channel when the
3776 * GO/AP arrives.
3777 */
3778 apply_time = chsw->device_timestamp +
3779 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
3780 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
3781
3782 if (chsw->block_tx)
3783 iwl_mvm_csa_client_absent(mvm, vif);
3784
3785 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
3786 apply_time);
3787 if (mvmvif->bf_data.bf_enabled) {
3788 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
3789 if (ret)
3790 goto out_unlock;
3791 }
3792
3793 break;
3794 default:
3795 break;
3796 }
3797
3798 mvmvif->ps_disabled = true;
3799
3800 ret = iwl_mvm_power_update_ps(mvm);
3801 if (ret)
3802 goto out_unlock;
3803
3804 /* we won't be on this channel any longer */
3805 iwl_mvm_teardown_tdls_peers(mvm);
3806
3807 out_unlock:
3808 mutex_unlock(&mvm->mutex);
3809
3810 return ret;
3811 }
3812
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3813 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
3814 struct ieee80211_vif *vif)
3815 {
3816 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3817 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3818 int ret;
3819
3820 mutex_lock(&mvm->mutex);
3821
3822 if (mvmvif->csa_failed) {
3823 mvmvif->csa_failed = false;
3824 ret = -EIO;
3825 goto out_unlock;
3826 }
3827
3828 if (vif->type == NL80211_IFTYPE_STATION) {
3829 struct iwl_mvm_sta *mvmsta;
3830
3831 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
3832 mvmvif->ap_sta_id);
3833
3834 if (WARN_ON(!mvmsta)) {
3835 ret = -EIO;
3836 goto out_unlock;
3837 }
3838
3839 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
3840
3841 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3842
3843 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
3844 if (ret)
3845 goto out_unlock;
3846
3847 iwl_mvm_stop_session_protection(mvm, vif);
3848 }
3849
3850 mvmvif->ps_disabled = false;
3851
3852 ret = iwl_mvm_power_update_ps(mvm);
3853
3854 out_unlock:
3855 mutex_unlock(&mvm->mutex);
3856
3857 return ret;
3858 }
3859
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)3860 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3861 struct ieee80211_vif *vif, u32 queues, bool drop)
3862 {
3863 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3864 struct iwl_mvm_vif *mvmvif;
3865 struct iwl_mvm_sta *mvmsta;
3866 struct ieee80211_sta *sta;
3867 int i;
3868 u32 msk = 0;
3869
3870 if (!vif || vif->type != NL80211_IFTYPE_STATION)
3871 return;
3872
3873 /* Make sure we're done with the deferred traffic before flushing */
3874 if (iwl_mvm_is_dqa_supported(mvm))
3875 flush_work(&mvm->add_stream_wk);
3876
3877 mutex_lock(&mvm->mutex);
3878 mvmvif = iwl_mvm_vif_from_mac80211(vif);
3879
3880 /* flush the AP-station and all TDLS peers */
3881 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
3882 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
3883 lockdep_is_held(&mvm->mutex));
3884 if (IS_ERR_OR_NULL(sta))
3885 continue;
3886
3887 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3888 if (mvmsta->vif != vif)
3889 continue;
3890
3891 /* make sure only TDLS peers or the AP are flushed */
3892 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
3893
3894 msk |= mvmsta->tfd_queue_msk;
3895 }
3896
3897 if (drop) {
3898 if (iwl_mvm_flush_tx_path(mvm, msk, 0))
3899 IWL_ERR(mvm, "flush request fail\n");
3900 mutex_unlock(&mvm->mutex);
3901 } else {
3902 mutex_unlock(&mvm->mutex);
3903
3904 /* this can take a while, and we may need/want other operations
3905 * to succeed while doing this, so do it without the mutex held
3906 */
3907 iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
3908 }
3909 }
3910
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)3911 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
3912 struct survey_info *survey)
3913 {
3914 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3915 int ret;
3916
3917 memset(survey, 0, sizeof(*survey));
3918
3919 /* only support global statistics right now */
3920 if (idx != 0)
3921 return -ENOENT;
3922
3923 if (!fw_has_capa(&mvm->fw->ucode_capa,
3924 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3925 return -ENOENT;
3926
3927 mutex_lock(&mvm->mutex);
3928
3929 if (mvm->ucode_loaded) {
3930 ret = iwl_mvm_request_statistics(mvm, false);
3931 if (ret)
3932 goto out;
3933 }
3934
3935 survey->filled = SURVEY_INFO_TIME |
3936 SURVEY_INFO_TIME_RX |
3937 SURVEY_INFO_TIME_TX |
3938 SURVEY_INFO_TIME_SCAN;
3939 survey->time = mvm->accu_radio_stats.on_time_rf +
3940 mvm->radio_stats.on_time_rf;
3941 do_div(survey->time, USEC_PER_MSEC);
3942
3943 survey->time_rx = mvm->accu_radio_stats.rx_time +
3944 mvm->radio_stats.rx_time;
3945 do_div(survey->time_rx, USEC_PER_MSEC);
3946
3947 survey->time_tx = mvm->accu_radio_stats.tx_time +
3948 mvm->radio_stats.tx_time;
3949 do_div(survey->time_tx, USEC_PER_MSEC);
3950
3951 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
3952 mvm->radio_stats.on_time_scan;
3953 do_div(survey->time_scan, USEC_PER_MSEC);
3954
3955 ret = 0;
3956 out:
3957 mutex_unlock(&mvm->mutex);
3958 return ret;
3959 }
3960
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)3961 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
3962 struct ieee80211_vif *vif,
3963 struct ieee80211_sta *sta,
3964 struct station_info *sinfo)
3965 {
3966 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3967 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3968 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3969
3970 if (mvmsta->avg_energy) {
3971 sinfo->signal_avg = mvmsta->avg_energy;
3972 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG);
3973 }
3974
3975 if (!fw_has_capa(&mvm->fw->ucode_capa,
3976 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
3977 return;
3978
3979 /* if beacon filtering isn't on mac80211 does it anyway */
3980 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
3981 return;
3982
3983 if (!vif->bss_conf.assoc)
3984 return;
3985
3986 mutex_lock(&mvm->mutex);
3987
3988 if (mvmvif->ap_sta_id != mvmsta->sta_id)
3989 goto unlock;
3990
3991 if (iwl_mvm_request_statistics(mvm, false))
3992 goto unlock;
3993
3994 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
3995 mvmvif->beacon_stats.accu_num_beacons;
3996 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX);
3997 if (mvmvif->beacon_stats.avg_signal) {
3998 /* firmware only reports a value after RXing a few beacons */
3999 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4000 sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4001 }
4002 unlock:
4003 mutex_unlock(&mvm->mutex);
4004 }
4005
iwl_mvm_event_mlme_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4006 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4007 struct ieee80211_vif *vif,
4008 const struct ieee80211_event *event)
4009 {
4010 #define CHECK_MLME_TRIGGER(_mvm, _trig, _buf, _cnt, _fmt...) \
4011 do { \
4012 if ((_cnt) && --(_cnt)) \
4013 break; \
4014 iwl_mvm_fw_dbg_collect_trig(_mvm, _trig, _fmt);\
4015 } while (0)
4016
4017 struct iwl_fw_dbg_trigger_tlv *trig;
4018 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4019
4020 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4021 return;
4022
4023 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4024 trig_mlme = (void *)trig->data;
4025 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4026 return;
4027
4028 if (event->u.mlme.data == ASSOC_EVENT) {
4029 if (event->u.mlme.status == MLME_DENIED)
4030 CHECK_MLME_TRIGGER(mvm, trig, buf,
4031 trig_mlme->stop_assoc_denied,
4032 "DENIED ASSOC: reason %d",
4033 event->u.mlme.reason);
4034 else if (event->u.mlme.status == MLME_TIMEOUT)
4035 CHECK_MLME_TRIGGER(mvm, trig, buf,
4036 trig_mlme->stop_assoc_timeout,
4037 "ASSOC TIMEOUT");
4038 } else if (event->u.mlme.data == AUTH_EVENT) {
4039 if (event->u.mlme.status == MLME_DENIED)
4040 CHECK_MLME_TRIGGER(mvm, trig, buf,
4041 trig_mlme->stop_auth_denied,
4042 "DENIED AUTH: reason %d",
4043 event->u.mlme.reason);
4044 else if (event->u.mlme.status == MLME_TIMEOUT)
4045 CHECK_MLME_TRIGGER(mvm, trig, buf,
4046 trig_mlme->stop_auth_timeout,
4047 "AUTH TIMEOUT");
4048 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4049 CHECK_MLME_TRIGGER(mvm, trig, buf,
4050 trig_mlme->stop_rx_deauth,
4051 "DEAUTH RX %d", event->u.mlme.reason);
4052 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4053 CHECK_MLME_TRIGGER(mvm, trig, buf,
4054 trig_mlme->stop_tx_deauth,
4055 "DEAUTH TX %d", event->u.mlme.reason);
4056 }
4057 #undef CHECK_MLME_TRIGGER
4058 }
4059
iwl_mvm_event_bar_rx_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4060 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4061 struct ieee80211_vif *vif,
4062 const struct ieee80211_event *event)
4063 {
4064 struct iwl_fw_dbg_trigger_tlv *trig;
4065 struct iwl_fw_dbg_trigger_ba *ba_trig;
4066
4067 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4068 return;
4069
4070 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4071 ba_trig = (void *)trig->data;
4072 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4073 return;
4074
4075 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4076 return;
4077
4078 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4079 "BAR received from %pM, tid %d, ssn %d",
4080 event->u.ba.sta->addr, event->u.ba.tid,
4081 event->u.ba.ssn);
4082 }
4083
4084 static void
iwl_mvm_event_frame_timeout_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4085 iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
4086 struct ieee80211_vif *vif,
4087 const struct ieee80211_event *event)
4088 {
4089 struct iwl_fw_dbg_trigger_tlv *trig;
4090 struct iwl_fw_dbg_trigger_ba *ba_trig;
4091
4092 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4093 return;
4094
4095 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4096 ba_trig = (void *)trig->data;
4097 if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
4098 return;
4099
4100 if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(event->u.ba.tid)))
4101 return;
4102
4103 iwl_mvm_fw_dbg_collect_trig(mvm, trig,
4104 "Frame from %pM timed out, tid %d",
4105 event->u.ba.sta->addr, event->u.ba.tid);
4106 }
4107
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)4108 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4109 struct ieee80211_vif *vif,
4110 const struct ieee80211_event *event)
4111 {
4112 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4113
4114 switch (event->type) {
4115 case MLME_EVENT:
4116 iwl_mvm_event_mlme_callback(mvm, vif, event);
4117 break;
4118 case BAR_RX_EVENT:
4119 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4120 break;
4121 case BA_FRAME_TIMEOUT:
4122 iwl_mvm_event_frame_timeout_callback(mvm, vif, event);
4123 break;
4124 default:
4125 break;
4126 }
4127 }
4128
iwl_mvm_sync_rx_queues_internal(struct iwl_mvm * mvm,struct iwl_mvm_internal_rxq_notif * notif,u32 size)4129 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4130 struct iwl_mvm_internal_rxq_notif *notif,
4131 u32 size)
4132 {
4133 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4134 int ret;
4135
4136 lockdep_assert_held(&mvm->mutex);
4137
4138 if (!iwl_mvm_has_new_rx_api(mvm))
4139 return;
4140
4141 notif->cookie = mvm->queue_sync_cookie;
4142
4143 if (notif->sync)
4144 atomic_set(&mvm->queue_sync_counter,
4145 mvm->trans->num_rx_queues);
4146
4147 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4148 if (ret) {
4149 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4150 goto out;
4151 }
4152
4153 if (notif->sync)
4154 ret = wait_event_timeout(mvm->rx_sync_waitq,
4155 atomic_read(&mvm->queue_sync_counter) == 0,
4156 HZ);
4157 WARN_ON_ONCE(!ret);
4158
4159 out:
4160 atomic_set(&mvm->queue_sync_counter, 0);
4161 mvm->queue_sync_cookie++;
4162 }
4163
iwl_mvm_sync_rx_queues(struct ieee80211_hw * hw)4164 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4165 {
4166 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4167 struct iwl_mvm_internal_rxq_notif data = {
4168 .type = IWL_MVM_RXQ_EMPTY,
4169 .sync = 1,
4170 };
4171
4172 mutex_lock(&mvm->mutex);
4173 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4174 mutex_unlock(&mvm->mutex);
4175 }
4176
4177 const struct ieee80211_ops iwl_mvm_hw_ops = {
4178 .tx = iwl_mvm_mac_tx,
4179 .ampdu_action = iwl_mvm_mac_ampdu_action,
4180 .start = iwl_mvm_mac_start,
4181 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4182 .stop = iwl_mvm_mac_stop,
4183 .add_interface = iwl_mvm_mac_add_interface,
4184 .remove_interface = iwl_mvm_mac_remove_interface,
4185 .config = iwl_mvm_mac_config,
4186 .prepare_multicast = iwl_mvm_prepare_multicast,
4187 .configure_filter = iwl_mvm_configure_filter,
4188 .config_iface_filter = iwl_mvm_config_iface_filter,
4189 .bss_info_changed = iwl_mvm_bss_info_changed,
4190 .hw_scan = iwl_mvm_mac_hw_scan,
4191 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4192 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4193 .sta_state = iwl_mvm_mac_sta_state,
4194 .sta_notify = iwl_mvm_mac_sta_notify,
4195 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4196 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4197 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4198 .sta_rc_update = iwl_mvm_sta_rc_update,
4199 .conf_tx = iwl_mvm_mac_conf_tx,
4200 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4201 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4202 .flush = iwl_mvm_mac_flush,
4203 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4204 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4205 .set_key = iwl_mvm_mac_set_key,
4206 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4207 .remain_on_channel = iwl_mvm_roc,
4208 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4209 .add_chanctx = iwl_mvm_add_chanctx,
4210 .remove_chanctx = iwl_mvm_remove_chanctx,
4211 .change_chanctx = iwl_mvm_change_chanctx,
4212 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4213 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4214 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4215
4216 .start_ap = iwl_mvm_start_ap_ibss,
4217 .stop_ap = iwl_mvm_stop_ap_ibss,
4218 .join_ibss = iwl_mvm_start_ap_ibss,
4219 .leave_ibss = iwl_mvm_stop_ap_ibss,
4220
4221 .set_tim = iwl_mvm_set_tim,
4222
4223 .channel_switch = iwl_mvm_channel_switch,
4224 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4225 .post_channel_switch = iwl_mvm_post_channel_switch,
4226
4227 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4228 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4229 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4230
4231 .event_callback = iwl_mvm_mac_event_callback,
4232
4233 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4234
4235 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4236
4237 #ifdef CONFIG_PM_SLEEP
4238 /* look at d3.c */
4239 .suspend = iwl_mvm_suspend,
4240 .resume = iwl_mvm_resume,
4241 .set_wakeup = iwl_mvm_set_wakeup,
4242 .set_rekey_data = iwl_mvm_set_rekey_data,
4243 #if IS_ENABLED(CONFIG_IPV6)
4244 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4245 #endif
4246 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4247 #endif
4248 .get_survey = iwl_mvm_mac_get_survey,
4249 .sta_statistics = iwl_mvm_mac_sta_statistics,
4250 };
4251