1 /******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * USA
26 *
27 * The full GNU General Public License is included in this distribution
28 * in the file called COPYING.
29 *
30 * Contact Information:
31 * Intel Linux Wireless <linuxwifi@intel.com>
32 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
33 *
34 * BSD LICENSE
35 *
36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
40 * All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 *
46 * * Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * * Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in
50 * the documentation and/or other materials provided with the
51 * distribution.
52 * * Neither the name Intel Corporation nor the names of its
53 * contributors may be used to endorse or promote products derived
54 * from this software without specific prior written permission.
55 *
56 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
57 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
58 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
59 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
60 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
62 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
66 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 *
68 *****************************************************************************/
69 #include <linux/kernel.h>
70 #include <linux/slab.h>
71 #include <linux/skbuff.h>
72 #include <linux/netdevice.h>
73 #include <linux/etherdevice.h>
74 #include <linux/ip.h>
75 #include <linux/if_arp.h>
76 #include <linux/time.h>
77 #include <net/mac80211.h>
78 #include <net/ieee80211_radiotap.h>
79 #include <net/tcp.h>
80
81 #include "iwl-op-mode.h"
82 #include "iwl-io.h"
83 #include "mvm.h"
84 #include "sta.h"
85 #include "time-event.h"
86 #include "iwl-eeprom-parse.h"
87 #include "iwl-phy-db.h"
88 #include "testmode.h"
89 #include "fw/error-dump.h"
90 #include "iwl-prph.h"
91 #include "iwl-nvm-parse.h"
92
93 static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
94 {
95 .max = 1,
96 .types = BIT(NL80211_IFTYPE_STATION),
97 },
98 {
99 .max = 1,
100 .types = BIT(NL80211_IFTYPE_AP) |
101 BIT(NL80211_IFTYPE_P2P_CLIENT) |
102 BIT(NL80211_IFTYPE_P2P_GO),
103 },
104 {
105 .max = 1,
106 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
107 },
108 };
109
110 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
111 {
112 .num_different_channels = 2,
113 .max_interfaces = 3,
114 .limits = iwl_mvm_limits,
115 .n_limits = ARRAY_SIZE(iwl_mvm_limits),
116 },
117 };
118
119 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
120 /*
121 * Use the reserved field to indicate magic values.
122 * these values will only be used internally by the driver,
123 * and won't make it to the fw (reserved will be 0).
124 * BC_FILTER_MAGIC_IP - configure the val of this attribute to
125 * be the vif's ip address. in case there is not a single
126 * ip address (0, or more than 1), this attribute will
127 * be skipped.
128 * BC_FILTER_MAGIC_MAC - set the val of this attribute to
129 * the LSB bytes of the vif's mac address
130 */
131 enum {
132 BC_FILTER_MAGIC_NONE = 0,
133 BC_FILTER_MAGIC_IP,
134 BC_FILTER_MAGIC_MAC,
135 };
136
137 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
138 {
139 /* arp */
140 .discard = 0,
141 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
142 .attrs = {
143 {
144 /* frame type - arp, hw type - ethernet */
145 .offset_type =
146 BCAST_FILTER_OFFSET_PAYLOAD_START,
147 .offset = sizeof(rfc1042_header),
148 .val = cpu_to_be32(0x08060001),
149 .mask = cpu_to_be32(0xffffffff),
150 },
151 {
152 /* arp dest ip */
153 .offset_type =
154 BCAST_FILTER_OFFSET_PAYLOAD_START,
155 .offset = sizeof(rfc1042_header) + 2 +
156 sizeof(struct arphdr) +
157 ETH_ALEN + sizeof(__be32) +
158 ETH_ALEN,
159 .mask = cpu_to_be32(0xffffffff),
160 /* mark it as special field */
161 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
162 },
163 },
164 },
165 {
166 /* dhcp offer bcast */
167 .discard = 0,
168 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
169 .attrs = {
170 {
171 /* udp dest port - 68 (bootp client)*/
172 .offset_type = BCAST_FILTER_OFFSET_IP_END,
173 .offset = offsetof(struct udphdr, dest),
174 .val = cpu_to_be32(0x00440000),
175 .mask = cpu_to_be32(0xffff0000),
176 },
177 {
178 /* dhcp - lsb bytes of client hw address */
179 .offset_type = BCAST_FILTER_OFFSET_IP_END,
180 .offset = 38,
181 .mask = cpu_to_be32(0xffffffff),
182 /* mark it as special field */
183 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
184 },
185 },
186 },
187 /* last filter must be empty */
188 {},
189 };
190 #endif
191
iwl_mvm_ref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)192 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
193 {
194 if (!iwl_mvm_is_d0i3_supported(mvm))
195 return;
196
197 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
198 spin_lock_bh(&mvm->refs_lock);
199 mvm->refs[ref_type]++;
200 spin_unlock_bh(&mvm->refs_lock);
201 iwl_trans_ref(mvm->trans);
202 }
203
iwl_mvm_unref(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)204 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
205 {
206 if (!iwl_mvm_is_d0i3_supported(mvm))
207 return;
208
209 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
210 spin_lock_bh(&mvm->refs_lock);
211 if (WARN_ON(!mvm->refs[ref_type])) {
212 spin_unlock_bh(&mvm->refs_lock);
213 return;
214 }
215 mvm->refs[ref_type]--;
216 spin_unlock_bh(&mvm->refs_lock);
217 iwl_trans_unref(mvm->trans);
218 }
219
iwl_mvm_unref_all_except(struct iwl_mvm * mvm,enum iwl_mvm_ref_type except_ref)220 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
221 enum iwl_mvm_ref_type except_ref)
222 {
223 int i, j;
224
225 if (!iwl_mvm_is_d0i3_supported(mvm))
226 return;
227
228 spin_lock_bh(&mvm->refs_lock);
229 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
230 if (except_ref == i || !mvm->refs[i])
231 continue;
232
233 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
234 i, mvm->refs[i]);
235 for (j = 0; j < mvm->refs[i]; j++)
236 iwl_trans_unref(mvm->trans);
237 mvm->refs[i] = 0;
238 }
239 spin_unlock_bh(&mvm->refs_lock);
240 }
241
iwl_mvm_ref_taken(struct iwl_mvm * mvm)242 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
243 {
244 int i;
245 bool taken = false;
246
247 if (!iwl_mvm_is_d0i3_supported(mvm))
248 return true;
249
250 spin_lock_bh(&mvm->refs_lock);
251 for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
252 if (mvm->refs[i]) {
253 taken = true;
254 break;
255 }
256 }
257 spin_unlock_bh(&mvm->refs_lock);
258
259 return taken;
260 }
261
iwl_mvm_ref_sync(struct iwl_mvm * mvm,enum iwl_mvm_ref_type ref_type)262 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
263 {
264 iwl_mvm_ref(mvm, ref_type);
265
266 if (!wait_event_timeout(mvm->d0i3_exit_waitq,
267 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
268 HZ)) {
269 WARN_ON_ONCE(1);
270 iwl_mvm_unref(mvm, ref_type);
271 return -EIO;
272 }
273
274 return 0;
275 }
276
iwl_mvm_reset_phy_ctxts(struct iwl_mvm * mvm)277 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
278 {
279 int i;
280
281 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
282 for (i = 0; i < NUM_PHY_CTX; i++) {
283 mvm->phy_ctxts[i].id = i;
284 mvm->phy_ctxts[i].ref = 0;
285 }
286 }
287
iwl_mvm_get_regdomain(struct wiphy * wiphy,const char * alpha2,enum iwl_mcc_source src_id,bool * changed)288 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
289 const char *alpha2,
290 enum iwl_mcc_source src_id,
291 bool *changed)
292 {
293 struct ieee80211_regdomain *regd = NULL;
294 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
295 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
296 struct iwl_mcc_update_resp *resp;
297
298 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
299
300 lockdep_assert_held(&mvm->mutex);
301
302 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
303 if (IS_ERR_OR_NULL(resp)) {
304 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
305 PTR_ERR_OR_ZERO(resp));
306 goto out;
307 }
308
309 if (changed) {
310 u32 status = le32_to_cpu(resp->status);
311
312 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
313 status == MCC_RESP_ILLEGAL);
314 }
315
316 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
317 __le32_to_cpu(resp->n_channels),
318 resp->channels,
319 __le16_to_cpu(resp->mcc),
320 __le16_to_cpu(resp->geo_info),
321 __le16_to_cpu(resp->cap));
322 /* Store the return source id */
323 src_id = resp->source_id;
324 kfree(resp);
325 if (IS_ERR_OR_NULL(regd)) {
326 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
327 PTR_ERR_OR_ZERO(regd));
328 goto out;
329 }
330
331 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
332 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
333 mvm->lar_regdom_set = true;
334 mvm->mcc_src = src_id;
335
336 out:
337 return regd;
338 }
339
iwl_mvm_update_changed_regdom(struct iwl_mvm * mvm)340 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
341 {
342 bool changed;
343 struct ieee80211_regdomain *regd;
344
345 if (!iwl_mvm_is_lar_supported(mvm))
346 return;
347
348 regd = iwl_mvm_get_current_regdomain(mvm, &changed);
349 if (!IS_ERR_OR_NULL(regd)) {
350 /* only update the regulatory core if changed */
351 if (changed)
352 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
353
354 kfree(regd);
355 }
356 }
357
iwl_mvm_get_current_regdomain(struct iwl_mvm * mvm,bool * changed)358 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
359 bool *changed)
360 {
361 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
362 iwl_mvm_is_wifi_mcc_supported(mvm) ?
363 MCC_SOURCE_GET_CURRENT :
364 MCC_SOURCE_OLD_FW, changed);
365 }
366
iwl_mvm_init_fw_regd(struct iwl_mvm * mvm)367 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
368 {
369 enum iwl_mcc_source used_src;
370 struct ieee80211_regdomain *regd;
371 int ret;
372 bool changed;
373 const struct ieee80211_regdomain *r =
374 rtnl_dereference(mvm->hw->wiphy->regd);
375
376 if (!r)
377 return -ENOENT;
378
379 /* save the last source in case we overwrite it below */
380 used_src = mvm->mcc_src;
381 if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
382 /* Notify the firmware we support wifi location updates */
383 regd = iwl_mvm_get_current_regdomain(mvm, NULL);
384 if (!IS_ERR_OR_NULL(regd))
385 kfree(regd);
386 }
387
388 /* Now set our last stored MCC and source */
389 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
390 &changed);
391 if (IS_ERR_OR_NULL(regd))
392 return -EIO;
393
394 /* update cfg80211 if the regdomain was changed */
395 if (changed)
396 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
397 else
398 ret = 0;
399
400 kfree(regd);
401 return ret;
402 }
403
iwl_mvm_mac_setup_register(struct iwl_mvm * mvm)404 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
405 {
406 struct ieee80211_hw *hw = mvm->hw;
407 int num_mac, ret, i;
408 static const u32 mvm_ciphers[] = {
409 WLAN_CIPHER_SUITE_WEP40,
410 WLAN_CIPHER_SUITE_WEP104,
411 WLAN_CIPHER_SUITE_TKIP,
412 WLAN_CIPHER_SUITE_CCMP,
413 };
414
415 /* Tell mac80211 our characteristics */
416 ieee80211_hw_set(hw, SIGNAL_DBM);
417 ieee80211_hw_set(hw, SPECTRUM_MGMT);
418 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
419 ieee80211_hw_set(hw, QUEUE_CONTROL);
420 ieee80211_hw_set(hw, WANT_MONITOR_VIF);
421 ieee80211_hw_set(hw, SUPPORTS_PS);
422 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
423 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
424 ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
425 ieee80211_hw_set(hw, CONNECTION_MONITOR);
426 ieee80211_hw_set(hw, CHANCTX_STA_CSA);
427 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
428 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
429 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
430 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
431 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
432
433 if (iwl_mvm_has_tlc_offload(mvm)) {
434 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
435 ieee80211_hw_set(hw, HAS_RATE_CONTROL);
436 }
437
438 if (iwl_mvm_has_new_rx_api(mvm))
439 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
440
441 if (fw_has_capa(&mvm->fw->ucode_capa,
442 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
443 ieee80211_hw_set(hw, AP_LINK_PS);
444 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
445 /*
446 * we absolutely need this for the new TX API since that comes
447 * with many more queues than the current code can deal with
448 * for station powersave
449 */
450 return -EINVAL;
451 }
452
453 if (mvm->trans->num_rx_queues > 1)
454 ieee80211_hw_set(hw, USES_RSS);
455
456 if (mvm->trans->max_skb_frags)
457 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
458
459 hw->queues = IEEE80211_MAX_QUEUES;
460 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
461 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
462 IEEE80211_RADIOTAP_MCS_HAVE_STBC;
463 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
464 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
465
466 hw->radiotap_timestamp.units_pos =
467 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
468 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
469 /* this is the case for CCK frames, it's better (only 8) for OFDM */
470 hw->radiotap_timestamp.accuracy = 22;
471
472 if (!iwl_mvm_has_tlc_offload(mvm))
473 hw->rate_control_algorithm = RS_NAME;
474
475 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
476 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
477
478 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
479 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
480 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
481 hw->wiphy->cipher_suites = mvm->ciphers;
482
483 if (iwl_mvm_has_new_rx_api(mvm)) {
484 mvm->ciphers[hw->wiphy->n_cipher_suites] =
485 WLAN_CIPHER_SUITE_GCMP;
486 hw->wiphy->n_cipher_suites++;
487 mvm->ciphers[hw->wiphy->n_cipher_suites] =
488 WLAN_CIPHER_SUITE_GCMP_256;
489 hw->wiphy->n_cipher_suites++;
490 }
491
492 /* Enable 11w if software crypto is not enabled (as the
493 * firmware will interpret some mgmt packets, so enabling it
494 * with software crypto isn't safe).
495 */
496 if (!iwlwifi_mod_params.swcrypto) {
497 ieee80211_hw_set(hw, MFP_CAPABLE);
498 mvm->ciphers[hw->wiphy->n_cipher_suites] =
499 WLAN_CIPHER_SUITE_AES_CMAC;
500 hw->wiphy->n_cipher_suites++;
501 if (iwl_mvm_has_new_rx_api(mvm)) {
502 mvm->ciphers[hw->wiphy->n_cipher_suites] =
503 WLAN_CIPHER_SUITE_BIP_GMAC_128;
504 hw->wiphy->n_cipher_suites++;
505 mvm->ciphers[hw->wiphy->n_cipher_suites] =
506 WLAN_CIPHER_SUITE_BIP_GMAC_256;
507 hw->wiphy->n_cipher_suites++;
508 }
509 }
510
511 /* currently FW API supports only one optional cipher scheme */
512 if (mvm->fw->cs[0].cipher) {
513 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
514 struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
515
516 mvm->hw->n_cipher_schemes = 1;
517
518 cs->cipher = le32_to_cpu(fwcs->cipher);
519 cs->iftype = BIT(NL80211_IFTYPE_STATION);
520 cs->hdr_len = fwcs->hdr_len;
521 cs->pn_len = fwcs->pn_len;
522 cs->pn_off = fwcs->pn_off;
523 cs->key_idx_off = fwcs->key_idx_off;
524 cs->key_idx_mask = fwcs->key_idx_mask;
525 cs->key_idx_shift = fwcs->key_idx_shift;
526 cs->mic_len = fwcs->mic_len;
527
528 mvm->hw->cipher_schemes = mvm->cs;
529 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
530 hw->wiphy->n_cipher_suites++;
531 }
532
533 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
534 hw->wiphy->features |=
535 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
536 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
537 NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
538
539 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
540 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
541 hw->chanctx_data_size = sizeof(u16);
542
543 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
544 BIT(NL80211_IFTYPE_P2P_CLIENT) |
545 BIT(NL80211_IFTYPE_AP) |
546 BIT(NL80211_IFTYPE_P2P_GO) |
547 BIT(NL80211_IFTYPE_P2P_DEVICE) |
548 BIT(NL80211_IFTYPE_ADHOC);
549
550 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
551 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
552 if (iwl_mvm_is_lar_supported(mvm))
553 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
554 else
555 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
556 REGULATORY_DISABLE_BEACON_HINTS;
557
558 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
559 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
560
561 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
562 hw->wiphy->n_iface_combinations =
563 ARRAY_SIZE(iwl_mvm_iface_combinations);
564
565 hw->wiphy->max_remain_on_channel_duration = 10000;
566 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
567 /* we can compensate an offset of up to 3 channels = 15 MHz */
568 hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
569
570 /* Extract MAC address */
571 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
572 hw->wiphy->addresses = mvm->addresses;
573 hw->wiphy->n_addresses = 1;
574
575 /* Extract additional MAC addresses if available */
576 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
577 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
578
579 for (i = 1; i < num_mac; i++) {
580 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
581 ETH_ALEN);
582 mvm->addresses[i].addr[5]++;
583 hw->wiphy->n_addresses++;
584 }
585
586 iwl_mvm_reset_phy_ctxts(mvm);
587
588 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
589
590 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
591
592 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
593 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
594 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
595
596 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
597 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
598 else
599 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
600
601 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
602 hw->wiphy->bands[NL80211_BAND_2GHZ] =
603 &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
604 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
605 hw->wiphy->bands[NL80211_BAND_5GHZ] =
606 &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
607
608 if (fw_has_capa(&mvm->fw->ucode_capa,
609 IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
610 fw_has_api(&mvm->fw->ucode_capa,
611 IWL_UCODE_TLV_API_LQ_SS_PARAMS))
612 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
613 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
614 }
615
616 hw->wiphy->hw_version = mvm->trans->hw_id;
617
618 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
619 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
620 else
621 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
622
623 hw->wiphy->max_sched_scan_reqs = 1;
624 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
625 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
626 /* we create the 802.11 header and zero length SSID IE. */
627 hw->wiphy->max_sched_scan_ie_len =
628 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
629 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
630 hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
631
632 /*
633 * the firmware uses u8 for num of iterations, but 0xff is saved for
634 * infinite loop, so the maximum number of iterations is actually 254.
635 */
636 hw->wiphy->max_sched_scan_plan_iterations = 254;
637
638 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
639 NL80211_FEATURE_LOW_PRIORITY_SCAN |
640 NL80211_FEATURE_P2P_GO_OPPPS |
641 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
642 NL80211_FEATURE_DYNAMIC_SMPS |
643 NL80211_FEATURE_STATIC_SMPS |
644 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
645
646 if (fw_has_capa(&mvm->fw->ucode_capa,
647 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
648 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
649 if (fw_has_capa(&mvm->fw->ucode_capa,
650 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
651 hw->wiphy->features |= NL80211_FEATURE_QUIET;
652
653 if (fw_has_capa(&mvm->fw->ucode_capa,
654 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
655 hw->wiphy->features |=
656 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
657
658 if (fw_has_capa(&mvm->fw->ucode_capa,
659 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
660 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
661
662 if (fw_has_api(&mvm->fw->ucode_capa,
663 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
664 wiphy_ext_feature_set(hw->wiphy,
665 NL80211_EXT_FEATURE_SCAN_START_TIME);
666 wiphy_ext_feature_set(hw->wiphy,
667 NL80211_EXT_FEATURE_BSS_PARENT_TSF);
668 wiphy_ext_feature_set(hw->wiphy,
669 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
670 }
671
672 if (iwl_mvm_is_oce_supported(mvm)) {
673 wiphy_ext_feature_set(hw->wiphy,
674 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
675 wiphy_ext_feature_set(hw->wiphy,
676 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
677 wiphy_ext_feature_set(hw->wiphy,
678 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
679 wiphy_ext_feature_set(hw->wiphy,
680 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
681 }
682
683 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
684
685 #ifdef CONFIG_PM_SLEEP
686 if (iwl_mvm_is_d0i3_supported(mvm) &&
687 device_can_wakeup(mvm->trans->dev)) {
688 mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
689 hw->wiphy->wowlan = &mvm->wowlan;
690 }
691
692 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
693 mvm->trans->ops->d3_suspend &&
694 mvm->trans->ops->d3_resume &&
695 device_can_wakeup(mvm->trans->dev)) {
696 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
697 WIPHY_WOWLAN_DISCONNECT |
698 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
699 WIPHY_WOWLAN_RFKILL_RELEASE |
700 WIPHY_WOWLAN_NET_DETECT;
701 if (!iwlwifi_mod_params.swcrypto)
702 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
703 WIPHY_WOWLAN_GTK_REKEY_FAILURE |
704 WIPHY_WOWLAN_4WAY_HANDSHAKE;
705
706 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
707 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
708 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
709 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
710 hw->wiphy->wowlan = &mvm->wowlan;
711 }
712 #endif
713
714 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
715 /* assign default bcast filtering configuration */
716 mvm->bcast_filters = iwl_mvm_default_bcast_filters;
717 #endif
718
719 ret = iwl_mvm_leds_init(mvm);
720 if (ret)
721 return ret;
722
723 if (fw_has_capa(&mvm->fw->ucode_capa,
724 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
725 IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
726 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
727 ieee80211_hw_set(hw, TDLS_WIDER_BW);
728 }
729
730 if (fw_has_capa(&mvm->fw->ucode_capa,
731 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
732 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
733 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
734 }
735
736 hw->netdev_features |= mvm->cfg->features;
737 if (!iwl_mvm_is_csum_supported(mvm)) {
738 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
739 NETIF_F_RXCSUM);
740 /* We may support SW TX CSUM */
741 if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
742 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
743 }
744
745 ret = ieee80211_register_hw(mvm->hw);
746 if (ret)
747 iwl_mvm_leds_exit(mvm);
748 mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
749
750 if (mvm->cfg->vht_mu_mimo_supported)
751 wiphy_ext_feature_set(hw->wiphy,
752 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
753
754 return ret;
755 }
756
iwl_mvm_defer_tx(struct iwl_mvm * mvm,struct ieee80211_sta * sta,struct sk_buff * skb)757 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
758 struct ieee80211_sta *sta,
759 struct sk_buff *skb)
760 {
761 struct iwl_mvm_sta *mvmsta;
762 bool defer = false;
763
764 /*
765 * double check the IN_D0I3 flag both before and after
766 * taking the spinlock, in order to prevent taking
767 * the spinlock when not needed.
768 */
769 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
770 return false;
771
772 spin_lock(&mvm->d0i3_tx_lock);
773 /*
774 * testing the flag again ensures the skb dequeue
775 * loop (on d0i3 exit) hasn't run yet.
776 */
777 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
778 goto out;
779
780 mvmsta = iwl_mvm_sta_from_mac80211(sta);
781 if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
782 mvmsta->sta_id != mvm->d0i3_ap_sta_id)
783 goto out;
784
785 __skb_queue_tail(&mvm->d0i3_tx, skb);
786 ieee80211_stop_queues(mvm->hw);
787
788 /* trigger wakeup */
789 iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
790 iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
791
792 defer = true;
793 out:
794 spin_unlock(&mvm->d0i3_tx_lock);
795 return defer;
796 }
797
iwl_mvm_mac_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)798 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
799 struct ieee80211_tx_control *control,
800 struct sk_buff *skb)
801 {
802 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
803 struct ieee80211_sta *sta = control->sta;
804 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
805 struct ieee80211_hdr *hdr = (void *)skb->data;
806
807 if (iwl_mvm_is_radio_killed(mvm)) {
808 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
809 goto drop;
810 }
811
812 if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
813 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
814 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
815 goto drop;
816
817 /* treat non-bufferable MMPDUs on AP interfaces as broadcast */
818 if ((info->control.vif->type == NL80211_IFTYPE_AP ||
819 info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
820 ieee80211_is_mgmt(hdr->frame_control) &&
821 !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
822 sta = NULL;
823
824 /* If there is no sta, and it's not offchannel - send through AP */
825 if (info->control.vif->type == NL80211_IFTYPE_STATION &&
826 info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
827 struct iwl_mvm_vif *mvmvif =
828 iwl_mvm_vif_from_mac80211(info->control.vif);
829 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
830
831 if (ap_sta_id < IWL_MVM_STATION_COUNT) {
832 /* mac80211 holds rcu read lock */
833 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
834 if (IS_ERR_OR_NULL(sta))
835 goto drop;
836 }
837 }
838
839 if (sta) {
840 if (iwl_mvm_defer_tx(mvm, sta, skb))
841 return;
842 if (iwl_mvm_tx_skb(mvm, skb, sta))
843 goto drop;
844 return;
845 }
846
847 if (iwl_mvm_tx_skb_non_sta(mvm, skb))
848 goto drop;
849 return;
850 drop:
851 ieee80211_free_txskb(hw, skb);
852 }
853
iwl_enable_rx_ampdu(const struct iwl_cfg * cfg)854 static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
855 {
856 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
857 return false;
858 return true;
859 }
860
iwl_enable_tx_ampdu(const struct iwl_cfg * cfg)861 static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
862 {
863 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
864 return false;
865 if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
866 return true;
867
868 /* enabled by default */
869 return true;
870 }
871
872 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \
873 do { \
874 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \
875 break; \
876 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \
877 } while (0)
878
879 static void
iwl_mvm_ampdu_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u16 tid,u16 rx_ba_ssn,enum ieee80211_ampdu_mlme_action action)880 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
881 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
882 enum ieee80211_ampdu_mlme_action action)
883 {
884 struct iwl_fw_dbg_trigger_tlv *trig;
885 struct iwl_fw_dbg_trigger_ba *ba_trig;
886
887 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
888 return;
889
890 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
891 ba_trig = (void *)trig->data;
892
893 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
894 ieee80211_vif_to_wdev(vif), trig))
895 return;
896
897 switch (action) {
898 case IEEE80211_AMPDU_TX_OPERATIONAL: {
899 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
900 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
901
902 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
903 "TX AGG START: MAC %pM tid %d ssn %d\n",
904 sta->addr, tid, tid_data->ssn);
905 break;
906 }
907 case IEEE80211_AMPDU_TX_STOP_CONT:
908 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
909 "TX AGG STOP: MAC %pM tid %d\n",
910 sta->addr, tid);
911 break;
912 case IEEE80211_AMPDU_RX_START:
913 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
914 "RX AGG START: MAC %pM tid %d ssn %d\n",
915 sta->addr, tid, rx_ba_ssn);
916 break;
917 case IEEE80211_AMPDU_RX_STOP:
918 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
919 "RX AGG STOP: MAC %pM tid %d\n",
920 sta->addr, tid);
921 break;
922 default:
923 break;
924 }
925 }
926
iwl_mvm_mac_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)927 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
928 struct ieee80211_vif *vif,
929 struct ieee80211_ampdu_params *params)
930 {
931 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
932 int ret;
933 bool tx_agg_ref = false;
934 struct ieee80211_sta *sta = params->sta;
935 enum ieee80211_ampdu_mlme_action action = params->action;
936 u16 tid = params->tid;
937 u16 *ssn = ¶ms->ssn;
938 u16 buf_size = params->buf_size;
939 bool amsdu = params->amsdu;
940 u16 timeout = params->timeout;
941
942 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
943 sta->addr, tid, action);
944
945 if (!(mvm->nvm_data->sku_cap_11n_enable))
946 return -EACCES;
947
948 /* return from D0i3 before starting a new Tx aggregation */
949 switch (action) {
950 case IEEE80211_AMPDU_TX_START:
951 case IEEE80211_AMPDU_TX_STOP_CONT:
952 case IEEE80211_AMPDU_TX_STOP_FLUSH:
953 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
954 case IEEE80211_AMPDU_TX_OPERATIONAL:
955 /*
956 * for tx start, wait synchronously until D0i3 exit to
957 * get the correct sequence number for the tid.
958 * additionally, some other ampdu actions use direct
959 * target access, which is not handled automatically
960 * by the trans layer (unlike commands), so wait for
961 * d0i3 exit in these cases as well.
962 */
963 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
964 if (ret)
965 return ret;
966
967 tx_agg_ref = true;
968 break;
969 default:
970 break;
971 }
972
973 mutex_lock(&mvm->mutex);
974
975 switch (action) {
976 case IEEE80211_AMPDU_RX_START:
977 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
978 iwl_mvm_sta_from_mac80211(sta)->sta_id) {
979 struct iwl_mvm_vif *mvmvif;
980 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
981 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
982
983 mdata->opened_rx_ba_sessions = true;
984 mvmvif = iwl_mvm_vif_from_mac80211(vif);
985 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
986 }
987 if (!iwl_enable_rx_ampdu(mvm->cfg)) {
988 ret = -EINVAL;
989 break;
990 }
991 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
992 timeout);
993 break;
994 case IEEE80211_AMPDU_RX_STOP:
995 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
996 timeout);
997 break;
998 case IEEE80211_AMPDU_TX_START:
999 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
1000 ret = -EINVAL;
1001 break;
1002 }
1003 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
1004 break;
1005 case IEEE80211_AMPDU_TX_STOP_CONT:
1006 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
1007 break;
1008 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1009 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1010 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
1011 break;
1012 case IEEE80211_AMPDU_TX_OPERATIONAL:
1013 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
1014 buf_size, amsdu);
1015 break;
1016 default:
1017 WARN_ON_ONCE(1);
1018 ret = -EINVAL;
1019 break;
1020 }
1021
1022 if (!ret) {
1023 u16 rx_ba_ssn = 0;
1024
1025 if (action == IEEE80211_AMPDU_RX_START)
1026 rx_ba_ssn = *ssn;
1027
1028 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
1029 rx_ba_ssn, action);
1030 }
1031 mutex_unlock(&mvm->mutex);
1032
1033 /*
1034 * If the tid is marked as started, we won't use it for offloaded
1035 * traffic on the next D0i3 entry. It's safe to unref.
1036 */
1037 if (tx_agg_ref)
1038 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
1039
1040 return ret;
1041 }
1042
iwl_mvm_cleanup_iterator(void * data,u8 * mac,struct ieee80211_vif * vif)1043 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
1044 struct ieee80211_vif *vif)
1045 {
1046 struct iwl_mvm *mvm = data;
1047 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1048
1049 mvmvif->uploaded = false;
1050 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1051
1052 spin_lock_bh(&mvm->time_event_lock);
1053 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
1054 spin_unlock_bh(&mvm->time_event_lock);
1055
1056 mvmvif->phy_ctxt = NULL;
1057 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
1058 }
1059
iwl_mvm_restart_cleanup(struct iwl_mvm * mvm)1060 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
1061 {
1062 /* clear the D3 reconfig, we only need it to avoid dumping a
1063 * firmware coredump on reconfiguration, we shouldn't do that
1064 * on D3->D0 transition
1065 */
1066 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
1067 mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
1068 iwl_fw_error_dump(&mvm->fwrt);
1069 }
1070
1071 /* cleanup all stale references (scan, roc), but keep the
1072 * ucode_down ref until reconfig is complete
1073 */
1074 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
1075
1076 iwl_mvm_stop_device(mvm);
1077
1078 mvm->scan_status = 0;
1079 mvm->ps_disabled = false;
1080 mvm->calibrating = false;
1081
1082 /* just in case one was running */
1083 iwl_mvm_cleanup_roc_te(mvm);
1084 ieee80211_remain_on_channel_expired(mvm->hw);
1085
1086 /*
1087 * cleanup all interfaces, even inactive ones, as some might have
1088 * gone down during the HW restart
1089 */
1090 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
1091
1092 mvm->p2p_device_vif = NULL;
1093 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
1094
1095 iwl_mvm_reset_phy_ctxts(mvm);
1096 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
1097 memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
1098 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
1099 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
1100
1101 ieee80211_wake_queues(mvm->hw);
1102
1103 /* clear any stale d0i3 state */
1104 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
1105
1106 mvm->vif_count = 0;
1107 mvm->rx_ba_sessions = 0;
1108 mvm->fwrt.dump.conf = FW_DBG_INVALID;
1109 mvm->monitor_on = false;
1110
1111 /* keep statistics ticking */
1112 iwl_mvm_accu_radio_stats(mvm);
1113 }
1114
__iwl_mvm_mac_start(struct iwl_mvm * mvm)1115 int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
1116 {
1117 int ret;
1118
1119 lockdep_assert_held(&mvm->mutex);
1120
1121 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
1122 /*
1123 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
1124 * so later code will - from now on - see that we're doing it.
1125 */
1126 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1127 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
1128 /* Clean up some internal and mac80211 state on restart */
1129 iwl_mvm_restart_cleanup(mvm);
1130 } else {
1131 /* Hold the reference to prevent runtime suspend while
1132 * the start procedure runs. It's a bit confusing
1133 * that the UCODE_DOWN reference is taken, but it just
1134 * means "UCODE is not UP yet". ( TODO: rename this
1135 * reference).
1136 */
1137 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
1138 }
1139 ret = iwl_mvm_up(mvm);
1140
1141 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1142 /* Something went wrong - we need to finish some cleanup
1143 * that normally iwl_mvm_mac_restart_complete() below
1144 * would do.
1145 */
1146 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1147 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1148 }
1149
1150 return ret;
1151 }
1152
iwl_mvm_mac_start(struct ieee80211_hw * hw)1153 static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
1154 {
1155 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1156 int ret;
1157
1158 /* Some hw restart cleanups must not hold the mutex */
1159 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1160 /*
1161 * Make sure we are out of d0i3. This is needed
1162 * to make sure the reference accounting is correct
1163 * (and there is no stale d0i3_exit_work).
1164 */
1165 wait_event_timeout(mvm->d0i3_exit_waitq,
1166 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1167 &mvm->status),
1168 HZ);
1169 }
1170
1171 mutex_lock(&mvm->mutex);
1172 ret = __iwl_mvm_mac_start(mvm);
1173 mutex_unlock(&mvm->mutex);
1174
1175 return ret;
1176 }
1177
iwl_mvm_restart_complete(struct iwl_mvm * mvm)1178 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
1179 {
1180 int ret;
1181
1182 mutex_lock(&mvm->mutex);
1183
1184 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
1185 iwl_mvm_d0i3_enable_tx(mvm, NULL);
1186 ret = iwl_mvm_update_quotas(mvm, true, NULL);
1187 if (ret)
1188 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
1189 ret);
1190
1191 /* allow transport/FW low power modes */
1192 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1193
1194 /*
1195 * If we have TDLS peers, remove them. We don't know the last seqno/PN
1196 * of packets the FW sent out, so we must reconnect.
1197 */
1198 iwl_mvm_teardown_tdls_peers(mvm);
1199
1200 mutex_unlock(&mvm->mutex);
1201 }
1202
iwl_mvm_resume_complete(struct iwl_mvm * mvm)1203 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
1204 {
1205 if (iwl_mvm_is_d0i3_supported(mvm) &&
1206 iwl_mvm_enter_d0i3_on_suspend(mvm))
1207 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
1208 !test_bit(IWL_MVM_STATUS_IN_D0I3,
1209 &mvm->status),
1210 HZ),
1211 "D0i3 exit on resume timed out\n");
1212 }
1213
1214 static void
iwl_mvm_mac_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)1215 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
1216 enum ieee80211_reconfig_type reconfig_type)
1217 {
1218 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1219
1220 switch (reconfig_type) {
1221 case IEEE80211_RECONFIG_TYPE_RESTART:
1222 iwl_mvm_restart_complete(mvm);
1223 break;
1224 case IEEE80211_RECONFIG_TYPE_SUSPEND:
1225 iwl_mvm_resume_complete(mvm);
1226 break;
1227 }
1228 }
1229
__iwl_mvm_mac_stop(struct iwl_mvm * mvm)1230 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
1231 {
1232 lockdep_assert_held(&mvm->mutex);
1233
1234 /* firmware counters are obviously reset now, but we shouldn't
1235 * partially track so also clear the fw_reset_accu counters.
1236 */
1237 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
1238
1239 /* async_handlers_wk is now blocked */
1240
1241 /*
1242 * The work item could be running or queued if the
1243 * ROC time event stops just as we get here.
1244 */
1245 flush_work(&mvm->roc_done_wk);
1246
1247 iwl_mvm_stop_device(mvm);
1248
1249 iwl_mvm_async_handlers_purge(mvm);
1250 /* async_handlers_list is empty and will stay empty: HW is stopped */
1251
1252 /* the fw is stopped, the aux sta is dead: clean up driver state */
1253 iwl_mvm_del_aux_sta(mvm);
1254
1255 /*
1256 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
1257 * hw (as restart_complete() won't be called in this case) and mac80211
1258 * won't execute the restart.
1259 * But make sure to cleanup interfaces that have gone down before/during
1260 * HW restart was requested.
1261 */
1262 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
1263 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
1264 &mvm->status))
1265 ieee80211_iterate_interfaces(mvm->hw, 0,
1266 iwl_mvm_cleanup_iterator, mvm);
1267
1268 /* We shouldn't have any UIDs still set. Loop over all the UIDs to
1269 * make sure there's nothing left there and warn if any is found.
1270 */
1271 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1272 int i;
1273
1274 for (i = 0; i < mvm->max_scans; i++) {
1275 if (WARN_ONCE(mvm->scan_uid_status[i],
1276 "UMAC scan UID %d status was not cleaned\n",
1277 i))
1278 mvm->scan_uid_status[i] = 0;
1279 }
1280 }
1281 }
1282
iwl_mvm_mac_stop(struct ieee80211_hw * hw)1283 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
1284 {
1285 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1286
1287 flush_work(&mvm->d0i3_exit_work);
1288 flush_work(&mvm->async_handlers_wk);
1289 flush_work(&mvm->add_stream_wk);
1290
1291 /*
1292 * Lock and clear the firmware running bit here already, so that
1293 * new commands coming in elsewhere, e.g. from debugfs, will not
1294 * be able to proceed. This is important here because one of those
1295 * debugfs files causes the firmware dump to be triggered, and if we
1296 * don't stop debugfs accesses before canceling that it could be
1297 * retriggered after we flush it but before we've cleared the bit.
1298 */
1299 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1300
1301 iwl_fw_cancel_dump(&mvm->fwrt);
1302 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
1303 cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
1304 iwl_fw_free_dump_desc(&mvm->fwrt);
1305
1306 mutex_lock(&mvm->mutex);
1307 __iwl_mvm_mac_stop(mvm);
1308 mutex_unlock(&mvm->mutex);
1309
1310 /*
1311 * The worker might have been waiting for the mutex, let it run and
1312 * discover that its list is now empty.
1313 */
1314 cancel_work_sync(&mvm->async_handlers_wk);
1315 }
1316
iwl_mvm_get_free_phy_ctxt(struct iwl_mvm * mvm)1317 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
1318 {
1319 u16 i;
1320
1321 lockdep_assert_held(&mvm->mutex);
1322
1323 for (i = 0; i < NUM_PHY_CTX; i++)
1324 if (!mvm->phy_ctxts[i].ref)
1325 return &mvm->phy_ctxts[i];
1326
1327 IWL_ERR(mvm, "No available PHY context\n");
1328 return NULL;
1329 }
1330
iwl_mvm_set_tx_power(struct iwl_mvm * mvm,struct ieee80211_vif * vif,s16 tx_power)1331 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1332 s16 tx_power)
1333 {
1334 struct iwl_dev_tx_power_cmd cmd = {
1335 .v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
1336 .v3.mac_context_id =
1337 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
1338 .v3.pwr_restriction = cpu_to_le16(8 * tx_power),
1339 };
1340 int len = sizeof(cmd);
1341
1342 if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
1343 cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
1344
1345 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1346 len = sizeof(cmd.v3);
1347
1348 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1349 }
1350
iwl_mvm_mac_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1351 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
1352 struct ieee80211_vif *vif)
1353 {
1354 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1355 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1356 int ret;
1357
1358 mvmvif->mvm = mvm;
1359
1360 /*
1361 * make sure D0i3 exit is completed, otherwise a target access
1362 * during tx queue configuration could be done when still in
1363 * D0i3 state.
1364 */
1365 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
1366 if (ret)
1367 return ret;
1368
1369 /*
1370 * Not much to do here. The stack will not allow interface
1371 * types or combinations that we didn't advertise, so we
1372 * don't really have to check the types.
1373 */
1374
1375 mutex_lock(&mvm->mutex);
1376
1377 /* make sure that beacon statistics don't go backwards with FW reset */
1378 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1379 mvmvif->beacon_stats.accu_num_beacons +=
1380 mvmvif->beacon_stats.num_beacons;
1381
1382 /* Allocate resources for the MAC context, and add it to the fw */
1383 ret = iwl_mvm_mac_ctxt_init(mvm, vif);
1384 if (ret)
1385 goto out_unlock;
1386
1387 /* Counting number of interfaces is needed for legacy PM */
1388 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1389 mvm->vif_count++;
1390
1391 /*
1392 * The AP binding flow can be done only after the beacon
1393 * template is configured (which happens only in the mac80211
1394 * start_ap() flow), and adding the broadcast station can happen
1395 * only after the binding.
1396 * In addition, since modifying the MAC before adding a bcast
1397 * station is not allowed by the FW, delay the adding of MAC context to
1398 * the point where we can also add the bcast station.
1399 * In short: there's not much we can do at this point, other than
1400 * allocating resources :)
1401 */
1402 if (vif->type == NL80211_IFTYPE_AP ||
1403 vif->type == NL80211_IFTYPE_ADHOC) {
1404 ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
1405 if (ret) {
1406 IWL_ERR(mvm, "Failed to allocate bcast sta\n");
1407 goto out_release;
1408 }
1409
1410 /*
1411 * Only queue for this station is the mcast queue,
1412 * which shouldn't be in TFD mask anyway
1413 */
1414 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
1415 0, vif->type,
1416 IWL_STA_MULTICAST);
1417 if (ret)
1418 goto out_release;
1419
1420 iwl_mvm_vif_dbgfs_register(mvm, vif);
1421 goto out_unlock;
1422 }
1423
1424 mvmvif->features |= hw->netdev_features;
1425
1426 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
1427 if (ret)
1428 goto out_release;
1429
1430 ret = iwl_mvm_power_update_mac(mvm);
1431 if (ret)
1432 goto out_remove_mac;
1433
1434 /* beacon filtering */
1435 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
1436 if (ret)
1437 goto out_remove_mac;
1438
1439 if (!mvm->bf_allowed_vif &&
1440 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
1441 mvm->bf_allowed_vif = mvmvif;
1442 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
1443 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
1444 }
1445
1446 /*
1447 * P2P_DEVICE interface does not have a channel context assigned to it,
1448 * so a dedicated PHY context is allocated to it and the corresponding
1449 * MAC context is bound to it at this stage.
1450 */
1451 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1452
1453 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
1454 if (!mvmvif->phy_ctxt) {
1455 ret = -ENOSPC;
1456 goto out_free_bf;
1457 }
1458
1459 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
1460 ret = iwl_mvm_binding_add_vif(mvm, vif);
1461 if (ret)
1462 goto out_unref_phy;
1463
1464 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
1465 if (ret)
1466 goto out_unbind;
1467
1468 /* Save a pointer to p2p device vif, so it can later be used to
1469 * update the p2p device MAC when a GO is started/stopped */
1470 mvm->p2p_device_vif = vif;
1471 }
1472
1473 iwl_mvm_tcm_add_vif(mvm, vif);
1474
1475 if (vif->type == NL80211_IFTYPE_MONITOR)
1476 mvm->monitor_on = true;
1477
1478 iwl_mvm_vif_dbgfs_register(mvm, vif);
1479 goto out_unlock;
1480
1481 out_unbind:
1482 iwl_mvm_binding_remove_vif(mvm, vif);
1483 out_unref_phy:
1484 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1485 out_free_bf:
1486 if (mvm->bf_allowed_vif == mvmvif) {
1487 mvm->bf_allowed_vif = NULL;
1488 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1489 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1490 }
1491 out_remove_mac:
1492 mvmvif->phy_ctxt = NULL;
1493 iwl_mvm_mac_ctxt_remove(mvm, vif);
1494 out_release:
1495 if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
1496 mvm->vif_count--;
1497 out_unlock:
1498 mutex_unlock(&mvm->mutex);
1499
1500 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
1501
1502 return ret;
1503 }
1504
iwl_mvm_prepare_mac_removal(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1505 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
1506 struct ieee80211_vif *vif)
1507 {
1508 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1509 /*
1510 * Flush the ROC worker which will flush the OFFCHANNEL queue.
1511 * We assume here that all the packets sent to the OFFCHANNEL
1512 * queue are sent in ROC session.
1513 */
1514 flush_work(&mvm->roc_done_wk);
1515 }
1516 }
1517
iwl_mvm_mac_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1518 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
1519 struct ieee80211_vif *vif)
1520 {
1521 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1522 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1523
1524 iwl_mvm_prepare_mac_removal(mvm, vif);
1525
1526 if (!(vif->type == NL80211_IFTYPE_AP ||
1527 vif->type == NL80211_IFTYPE_ADHOC))
1528 iwl_mvm_tcm_rm_vif(mvm, vif);
1529
1530 mutex_lock(&mvm->mutex);
1531
1532 if (mvm->bf_allowed_vif == mvmvif) {
1533 mvm->bf_allowed_vif = NULL;
1534 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
1535 IEEE80211_VIF_SUPPORTS_CQM_RSSI);
1536 }
1537
1538 iwl_mvm_vif_dbgfs_clean(mvm, vif);
1539
1540 /*
1541 * For AP/GO interface, the tear down of the resources allocated to the
1542 * interface is be handled as part of the stop_ap flow.
1543 */
1544 if (vif->type == NL80211_IFTYPE_AP ||
1545 vif->type == NL80211_IFTYPE_ADHOC) {
1546 #ifdef CONFIG_NL80211_TESTMODE
1547 if (vif == mvm->noa_vif) {
1548 mvm->noa_vif = NULL;
1549 mvm->noa_duration = 0;
1550 }
1551 #endif
1552 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
1553 iwl_mvm_dealloc_bcast_sta(mvm, vif);
1554 goto out_release;
1555 }
1556
1557 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
1558 mvm->p2p_device_vif = NULL;
1559 iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
1560 iwl_mvm_binding_remove_vif(mvm, vif);
1561 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
1562 mvmvif->phy_ctxt = NULL;
1563 }
1564
1565 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
1566 mvm->vif_count--;
1567
1568 iwl_mvm_power_update_mac(mvm);
1569 iwl_mvm_mac_ctxt_remove(mvm, vif);
1570
1571 if (vif->type == NL80211_IFTYPE_MONITOR)
1572 mvm->monitor_on = false;
1573
1574 out_release:
1575 mutex_unlock(&mvm->mutex);
1576 }
1577
iwl_mvm_mac_config(struct ieee80211_hw * hw,u32 changed)1578 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
1579 {
1580 return 0;
1581 }
1582
1583 struct iwl_mvm_mc_iter_data {
1584 struct iwl_mvm *mvm;
1585 int port_id;
1586 };
1587
iwl_mvm_mc_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1588 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1589 struct ieee80211_vif *vif)
1590 {
1591 struct iwl_mvm_mc_iter_data *data = _data;
1592 struct iwl_mvm *mvm = data->mvm;
1593 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1594 struct iwl_host_cmd hcmd = {
1595 .id = MCAST_FILTER_CMD,
1596 .flags = CMD_ASYNC,
1597 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1598 };
1599 int ret, len;
1600
1601 /* if we don't have free ports, mcast frames will be dropped */
1602 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
1603 return;
1604
1605 if (vif->type != NL80211_IFTYPE_STATION ||
1606 !vif->bss_conf.assoc)
1607 return;
1608
1609 cmd->port_id = data->port_id++;
1610 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1611 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1612
1613 hcmd.len[0] = len;
1614 hcmd.data[0] = cmd;
1615
1616 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1617 if (ret)
1618 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1619 }
1620
iwl_mvm_recalc_multicast(struct iwl_mvm * mvm)1621 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
1622 {
1623 struct iwl_mvm_mc_iter_data iter_data = {
1624 .mvm = mvm,
1625 };
1626
1627 lockdep_assert_held(&mvm->mutex);
1628
1629 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
1630 return;
1631
1632 ieee80211_iterate_active_interfaces_atomic(
1633 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1634 iwl_mvm_mc_iface_iterator, &iter_data);
1635 }
1636
iwl_mvm_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)1637 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
1638 struct netdev_hw_addr_list *mc_list)
1639 {
1640 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1641 struct iwl_mcast_filter_cmd *cmd;
1642 struct netdev_hw_addr *addr;
1643 int addr_count;
1644 bool pass_all;
1645 int len;
1646
1647 addr_count = netdev_hw_addr_list_count(mc_list);
1648 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
1649 IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
1650 if (pass_all)
1651 addr_count = 0;
1652
1653 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
1654 cmd = kzalloc(len, GFP_ATOMIC);
1655 if (!cmd)
1656 return 0;
1657
1658 if (pass_all) {
1659 cmd->pass_all = 1;
1660 return (u64)(unsigned long)cmd;
1661 }
1662
1663 netdev_hw_addr_list_for_each(addr, mc_list) {
1664 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
1665 cmd->count, addr->addr);
1666 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
1667 addr->addr, ETH_ALEN);
1668 cmd->count++;
1669 }
1670
1671 return (u64)(unsigned long)cmd;
1672 }
1673
iwl_mvm_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)1674 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1675 unsigned int changed_flags,
1676 unsigned int *total_flags,
1677 u64 multicast)
1678 {
1679 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1680 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
1681
1682 mutex_lock(&mvm->mutex);
1683
1684 /* replace previous configuration */
1685 kfree(mvm->mcast_filter_cmd);
1686 mvm->mcast_filter_cmd = cmd;
1687
1688 if (!cmd)
1689 goto out;
1690
1691 if (changed_flags & FIF_ALLMULTI)
1692 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
1693
1694 if (cmd->pass_all)
1695 cmd->count = 0;
1696
1697 iwl_mvm_recalc_multicast(mvm);
1698 out:
1699 mutex_unlock(&mvm->mutex);
1700 *total_flags = 0;
1701 }
1702
iwl_mvm_config_iface_filter(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int filter_flags,unsigned int changed_flags)1703 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
1704 struct ieee80211_vif *vif,
1705 unsigned int filter_flags,
1706 unsigned int changed_flags)
1707 {
1708 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1709
1710 /* We support only filter for probe requests */
1711 if (!(changed_flags & FIF_PROBE_REQ))
1712 return;
1713
1714 /* Supported only for p2p client interfaces */
1715 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
1716 !vif->p2p)
1717 return;
1718
1719 mutex_lock(&mvm->mutex);
1720 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
1721 mutex_unlock(&mvm->mutex);
1722 }
1723
1724 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
1725 struct iwl_bcast_iter_data {
1726 struct iwl_mvm *mvm;
1727 struct iwl_bcast_filter_cmd *cmd;
1728 u8 current_filter;
1729 };
1730
1731 static void
iwl_mvm_set_bcast_filter(struct ieee80211_vif * vif,const struct iwl_fw_bcast_filter * in_filter,struct iwl_fw_bcast_filter * out_filter)1732 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
1733 const struct iwl_fw_bcast_filter *in_filter,
1734 struct iwl_fw_bcast_filter *out_filter)
1735 {
1736 struct iwl_fw_bcast_filter_attr *attr;
1737 int i;
1738
1739 memcpy(out_filter, in_filter, sizeof(*out_filter));
1740
1741 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
1742 attr = &out_filter->attrs[i];
1743
1744 if (!attr->mask)
1745 break;
1746
1747 switch (attr->reserved1) {
1748 case cpu_to_le16(BC_FILTER_MAGIC_IP):
1749 if (vif->bss_conf.arp_addr_cnt != 1) {
1750 attr->mask = 0;
1751 continue;
1752 }
1753
1754 attr->val = vif->bss_conf.arp_addr_list[0];
1755 break;
1756 case cpu_to_le16(BC_FILTER_MAGIC_MAC):
1757 attr->val = *(__be32 *)&vif->addr[2];
1758 break;
1759 default:
1760 break;
1761 }
1762 attr->reserved1 = 0;
1763 out_filter->num_attrs++;
1764 }
1765 }
1766
iwl_mvm_bcast_filter_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1767 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1768 struct ieee80211_vif *vif)
1769 {
1770 struct iwl_bcast_iter_data *data = _data;
1771 struct iwl_mvm *mvm = data->mvm;
1772 struct iwl_bcast_filter_cmd *cmd = data->cmd;
1773 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1774 struct iwl_fw_bcast_mac *bcast_mac;
1775 int i;
1776
1777 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
1778 return;
1779
1780 bcast_mac = &cmd->macs[mvmvif->id];
1781
1782 /*
1783 * enable filtering only for associated stations, but not for P2P
1784 * Clients
1785 */
1786 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1787 !vif->bss_conf.assoc)
1788 return;
1789
1790 bcast_mac->default_discard = 1;
1791
1792 /* copy all configured filters */
1793 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
1794 /*
1795 * Make sure we don't exceed our filters limit.
1796 * if there is still a valid filter to be configured,
1797 * be on the safe side and just allow bcast for this mac.
1798 */
1799 if (WARN_ON_ONCE(data->current_filter >=
1800 ARRAY_SIZE(cmd->filters))) {
1801 bcast_mac->default_discard = 0;
1802 bcast_mac->attached_filters = 0;
1803 break;
1804 }
1805
1806 iwl_mvm_set_bcast_filter(vif,
1807 &mvm->bcast_filters[i],
1808 &cmd->filters[data->current_filter]);
1809
1810 /* skip current filter if it contains no attributes */
1811 if (!cmd->filters[data->current_filter].num_attrs)
1812 continue;
1813
1814 /* attach the filter to current mac */
1815 bcast_mac->attached_filters |=
1816 cpu_to_le16(BIT(data->current_filter));
1817
1818 data->current_filter++;
1819 }
1820 }
1821
iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm * mvm,struct iwl_bcast_filter_cmd * cmd)1822 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
1823 struct iwl_bcast_filter_cmd *cmd)
1824 {
1825 struct iwl_bcast_iter_data iter_data = {
1826 .mvm = mvm,
1827 .cmd = cmd,
1828 };
1829
1830 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
1831 return false;
1832
1833 memset(cmd, 0, sizeof(*cmd));
1834 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
1835 cmd->max_macs = ARRAY_SIZE(cmd->macs);
1836
1837 #ifdef CONFIG_IWLWIFI_DEBUGFS
1838 /* use debugfs filters/macs if override is configured */
1839 if (mvm->dbgfs_bcast_filtering.override) {
1840 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
1841 sizeof(cmd->filters));
1842 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
1843 sizeof(cmd->macs));
1844 return true;
1845 }
1846 #endif
1847
1848 /* if no filters are configured, do nothing */
1849 if (!mvm->bcast_filters)
1850 return false;
1851
1852 /* configure and attach these filters for each associated sta vif */
1853 ieee80211_iterate_active_interfaces(
1854 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1855 iwl_mvm_bcast_filter_iterator, &iter_data);
1856
1857 return true;
1858 }
1859
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1860 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1861 {
1862 struct iwl_bcast_filter_cmd cmd;
1863
1864 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1865 return 0;
1866
1867 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1868 return 0;
1869
1870 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
1871 sizeof(cmd), &cmd);
1872 }
1873 #else
iwl_mvm_configure_bcast_filter(struct iwl_mvm * mvm)1874 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
1875 {
1876 return 0;
1877 }
1878 #endif
1879
iwl_mvm_update_mu_groups(struct iwl_mvm * mvm,struct ieee80211_vif * vif)1880 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
1881 struct ieee80211_vif *vif)
1882 {
1883 struct iwl_mu_group_mgmt_cmd cmd = {};
1884
1885 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
1886 WLAN_MEMBERSHIP_LEN);
1887 memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
1888 WLAN_USER_POSITION_LEN);
1889
1890 return iwl_mvm_send_cmd_pdu(mvm,
1891 WIDE_ID(DATA_PATH_GROUP,
1892 UPDATE_MU_GROUPS_CMD),
1893 0, sizeof(cmd), &cmd);
1894 }
1895
iwl_mvm_mu_mimo_iface_iterator(void * _data,u8 * mac,struct ieee80211_vif * vif)1896 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
1897 struct ieee80211_vif *vif)
1898 {
1899 if (vif->mu_mimo_owner) {
1900 struct iwl_mu_group_mgmt_notif *notif = _data;
1901
1902 /*
1903 * MU-MIMO Group Id action frame is little endian. We treat
1904 * the data received from firmware as if it came from the
1905 * action frame, so no conversion is needed.
1906 */
1907 ieee80211_update_mu_groups(vif,
1908 (u8 *)¬if->membership_status,
1909 (u8 *)¬if->user_position);
1910 }
1911 }
1912
iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)1913 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
1914 struct iwl_rx_cmd_buffer *rxb)
1915 {
1916 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1917 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
1918
1919 ieee80211_iterate_active_interfaces_atomic(
1920 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
1921 iwl_mvm_mu_mimo_iface_iterator, notif);
1922 }
1923
iwl_mvm_he_get_ppe_val(u8 * ppe,u8 ppe_pos_bit)1924 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
1925 {
1926 u8 byte_num = ppe_pos_bit / 8;
1927 u8 bit_num = ppe_pos_bit % 8;
1928 u8 residue_bits;
1929 u8 res;
1930
1931 if (bit_num <= 5)
1932 return (ppe[byte_num] >> bit_num) &
1933 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
1934
1935 /*
1936 * If bit_num > 5, we have to combine bits with next byte.
1937 * Calculate how many bits we need to take from current byte (called
1938 * here "residue_bits"), and add them to bits from next byte.
1939 */
1940
1941 residue_bits = 8 - bit_num;
1942
1943 res = (ppe[byte_num + 1] &
1944 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
1945 residue_bits;
1946 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
1947
1948 return res;
1949 }
1950
iwl_mvm_cfg_he_sta(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 sta_id)1951 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
1952 struct ieee80211_vif *vif, u8 sta_id)
1953 {
1954 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1955 struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
1956 .sta_id = sta_id,
1957 .tid_limit = IWL_MAX_TID_COUNT,
1958 .bss_color = vif->bss_conf.bss_color,
1959 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
1960 .frame_time_rts_th =
1961 cpu_to_le16(vif->bss_conf.frame_time_rts_th),
1962 };
1963 struct ieee80211_sta *sta;
1964 u32 flags;
1965 int i;
1966
1967 rcu_read_lock();
1968
1969 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
1970 if (IS_ERR(sta)) {
1971 rcu_read_unlock();
1972 WARN(1, "Can't find STA to configure HE\n");
1973 return;
1974 }
1975
1976 if (!sta->he_cap.has_he) {
1977 rcu_read_unlock();
1978 return;
1979 }
1980
1981 flags = 0;
1982
1983 /* HTC flags */
1984 if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
1985 IEEE80211_HE_MAC_CAP0_HTC_HE)
1986 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
1987 if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
1988 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
1989 (sta->he_cap.he_cap_elem.mac_cap_info[2] &
1990 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
1991 u8 link_adap =
1992 ((sta->he_cap.he_cap_elem.mac_cap_info[2] &
1993 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
1994 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
1995 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
1996
1997 if (link_adap == 2)
1998 sta_ctxt_cmd.htc_flags |=
1999 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
2000 else if (link_adap == 3)
2001 sta_ctxt_cmd.htc_flags |=
2002 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
2003 }
2004 if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
2005 IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
2006 sta_ctxt_cmd.htc_flags |=
2007 cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
2008 if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
2009 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
2010 if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
2011 IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
2012 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
2013 if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
2014 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
2015
2016 /*
2017 * Initialize the PPE thresholds to "None" (7), as described in Table
2018 * 9-262ac of 80211.ax/D3.0.
2019 */
2020 memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
2021
2022 /* If PPE Thresholds exist, parse them into a FW-familiar format. */
2023 if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
2024 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
2025 u8 nss = (sta->he_cap.ppe_thres[0] &
2026 IEEE80211_PPE_THRES_NSS_MASK) + 1;
2027 u8 ru_index_bitmap =
2028 (sta->he_cap.ppe_thres[0] &
2029 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
2030 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
2031 u8 *ppe = &sta->he_cap.ppe_thres[0];
2032 u8 ppe_pos_bit = 7; /* Starting after PPE header */
2033
2034 /*
2035 * FW currently supports only nss == MAX_HE_SUPP_NSS
2036 *
2037 * If nss > MAX: we can ignore values we don't support
2038 * If nss < MAX: we can set zeros in other streams
2039 */
2040 if (nss > MAX_HE_SUPP_NSS) {
2041 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
2042 MAX_HE_SUPP_NSS);
2043 nss = MAX_HE_SUPP_NSS;
2044 }
2045
2046 for (i = 0; i < nss; i++) {
2047 u8 ru_index_tmp = ru_index_bitmap << 1;
2048 u8 bw;
2049
2050 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
2051 ru_index_tmp >>= 1;
2052 if (!(ru_index_tmp & 1))
2053 continue;
2054
2055 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
2056 iwl_mvm_he_get_ppe_val(ppe,
2057 ppe_pos_bit);
2058 ppe_pos_bit +=
2059 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2060 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
2061 iwl_mvm_he_get_ppe_val(ppe,
2062 ppe_pos_bit);
2063 ppe_pos_bit +=
2064 IEEE80211_PPE_THRES_INFO_PPET_SIZE;
2065 }
2066 }
2067
2068 flags |= STA_CTXT_HE_PACKET_EXT;
2069 }
2070 rcu_read_unlock();
2071
2072 /* Mark MU EDCA as enabled, unless none detected on some AC */
2073 flags |= STA_CTXT_HE_MU_EDCA_CW;
2074 for (i = 0; i < AC_NUM; i++) {
2075 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
2076 &mvmvif->queue_params[i].mu_edca_param_rec;
2077
2078 if (!mvmvif->queue_params[i].mu_edca) {
2079 flags &= ~STA_CTXT_HE_MU_EDCA_CW;
2080 break;
2081 }
2082
2083 sta_ctxt_cmd.trig_based_txf[i].cwmin =
2084 cpu_to_le16(mu_edca->ecw_min_max & 0xf);
2085 sta_ctxt_cmd.trig_based_txf[i].cwmax =
2086 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
2087 sta_ctxt_cmd.trig_based_txf[i].aifsn =
2088 cpu_to_le16(mu_edca->aifsn);
2089 sta_ctxt_cmd.trig_based_txf[i].mu_time =
2090 cpu_to_le16(mu_edca->mu_edca_timer);
2091 }
2092
2093 if (vif->bss_conf.multi_sta_back_32bit)
2094 flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
2095
2096 if (vif->bss_conf.ack_enabled)
2097 flags |= STA_CTXT_HE_ACK_ENABLED;
2098
2099 if (vif->bss_conf.uora_exists) {
2100 flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
2101
2102 sta_ctxt_cmd.rand_alloc_ecwmin =
2103 vif->bss_conf.uora_ocw_range & 0x7;
2104 sta_ctxt_cmd.rand_alloc_ecwmax =
2105 (vif->bss_conf.uora_ocw_range >> 3) & 0x7;
2106 }
2107
2108 /* TODO: support Multi BSSID IE */
2109
2110 sta_ctxt_cmd.flags = cpu_to_le32(flags);
2111
2112 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
2113 DATA_PATH_GROUP, 0),
2114 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
2115 IWL_ERR(mvm, "Failed to config FW to work HE!\n");
2116 }
2117
iwl_mvm_bss_info_changed_station(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2118 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
2119 struct ieee80211_vif *vif,
2120 struct ieee80211_bss_conf *bss_conf,
2121 u32 changes)
2122 {
2123 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2124 int ret;
2125
2126 /*
2127 * Re-calculate the tsf id, as the master-slave relations depend on the
2128 * beacon interval, which was not known when the station interface was
2129 * added.
2130 */
2131 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
2132 if (vif->bss_conf.he_support &&
2133 !iwlwifi_mod_params.disable_11ax)
2134 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
2135
2136 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2137 }
2138
2139 /*
2140 * If we're not associated yet, take the (new) BSSID before associating
2141 * so the firmware knows. If we're already associated, then use the old
2142 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
2143 * branch for disassociation below.
2144 */
2145 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
2146 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2147
2148 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
2149 if (ret)
2150 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2151
2152 /* after sending it once, adopt mac80211 data */
2153 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
2154 mvmvif->associated = bss_conf->assoc;
2155
2156 if (changes & BSS_CHANGED_ASSOC) {
2157 if (bss_conf->assoc) {
2158 /* clear statistics to get clean beacon counter */
2159 iwl_mvm_request_statistics(mvm, true);
2160 memset(&mvmvif->beacon_stats, 0,
2161 sizeof(mvmvif->beacon_stats));
2162
2163 /* add quota for this interface */
2164 ret = iwl_mvm_update_quotas(mvm, true, NULL);
2165 if (ret) {
2166 IWL_ERR(mvm, "failed to update quotas\n");
2167 return;
2168 }
2169
2170 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2171 &mvm->status)) {
2172 /*
2173 * If we're restarting then the firmware will
2174 * obviously have lost synchronisation with
2175 * the AP. It will attempt to synchronise by
2176 * itself, but we can make it more reliable by
2177 * scheduling a session protection time event.
2178 *
2179 * The firmware needs to receive a beacon to
2180 * catch up with synchronisation, use 110% of
2181 * the beacon interval.
2182 *
2183 * Set a large maximum delay to allow for more
2184 * than a single interface.
2185 */
2186 u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
2187 iwl_mvm_protect_session(mvm, vif, dur, dur,
2188 5 * dur, false);
2189 }
2190
2191 iwl_mvm_sf_update(mvm, vif, false);
2192 iwl_mvm_power_vif_assoc(mvm, vif);
2193 if (vif->p2p) {
2194 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
2195 iwl_mvm_update_smps(mvm, vif,
2196 IWL_MVM_SMPS_REQ_PROT,
2197 IEEE80211_SMPS_DYNAMIC);
2198 }
2199 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
2200 /*
2201 * If update fails - SF might be running in associated
2202 * mode while disassociated - which is forbidden.
2203 */
2204 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
2205 "Failed to update SF upon disassociation\n");
2206
2207 /*
2208 * If we get an assert during the connection (after the
2209 * station has been added, but before the vif is set
2210 * to associated), mac80211 will re-add the station and
2211 * then configure the vif. Since the vif is not
2212 * associated, we would remove the station here and
2213 * this would fail the recovery.
2214 */
2215 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
2216 &mvm->status)) {
2217 /*
2218 * Remove AP station now that
2219 * the MAC is unassoc
2220 */
2221 ret = iwl_mvm_rm_sta_id(mvm, vif,
2222 mvmvif->ap_sta_id);
2223 if (ret)
2224 IWL_ERR(mvm,
2225 "failed to remove AP station\n");
2226
2227 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
2228 mvm->d0i3_ap_sta_id =
2229 IWL_MVM_INVALID_STA;
2230 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
2231 }
2232
2233 /* remove quota for this interface */
2234 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2235 if (ret)
2236 IWL_ERR(mvm, "failed to update quotas\n");
2237
2238 if (vif->p2p)
2239 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
2240
2241 /* this will take the cleared BSSID from bss_conf */
2242 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2243 if (ret)
2244 IWL_ERR(mvm,
2245 "failed to update MAC %pM (clear after unassoc)\n",
2246 vif->addr);
2247 }
2248
2249 /*
2250 * The firmware tracks the MU-MIMO group on its own.
2251 * However, on HW restart we should restore this data.
2252 */
2253 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
2254 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
2255 ret = iwl_mvm_update_mu_groups(mvm, vif);
2256 if (ret)
2257 IWL_ERR(mvm,
2258 "failed to update VHT MU_MIMO groups\n");
2259 }
2260
2261 iwl_mvm_recalc_multicast(mvm);
2262 iwl_mvm_configure_bcast_filter(mvm);
2263
2264 /* reset rssi values */
2265 mvmvif->bf_data.ave_beacon_signal = 0;
2266
2267 iwl_mvm_bt_coex_vif_change(mvm);
2268 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
2269 IEEE80211_SMPS_AUTOMATIC);
2270 if (fw_has_capa(&mvm->fw->ucode_capa,
2271 IWL_UCODE_TLV_CAPA_UMAC_SCAN))
2272 iwl_mvm_config_scan(mvm);
2273 }
2274
2275 if (changes & BSS_CHANGED_BEACON_INFO) {
2276 /*
2277 * We received a beacon from the associated AP so
2278 * remove the session protection.
2279 */
2280 iwl_mvm_stop_session_protection(mvm, vif);
2281
2282 iwl_mvm_sf_update(mvm, vif, false);
2283 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2284 }
2285
2286 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
2287 /*
2288 * Send power command on every beacon change,
2289 * because we may have not enabled beacon abort yet.
2290 */
2291 BSS_CHANGED_BEACON_INFO)) {
2292 ret = iwl_mvm_power_update_mac(mvm);
2293 if (ret)
2294 IWL_ERR(mvm, "failed to update power mode\n");
2295 }
2296
2297 if (changes & BSS_CHANGED_TXPOWER) {
2298 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2299 bss_conf->txpower);
2300 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2301 }
2302
2303 if (changes & BSS_CHANGED_CQM) {
2304 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
2305 /* reset cqm events tracking */
2306 mvmvif->bf_data.last_cqm_event = 0;
2307 if (mvmvif->bf_data.bf_enabled) {
2308 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
2309 if (ret)
2310 IWL_ERR(mvm,
2311 "failed to update CQM thresholds\n");
2312 }
2313 }
2314
2315 if (changes & BSS_CHANGED_ARP_FILTER) {
2316 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
2317 iwl_mvm_configure_bcast_filter(mvm);
2318 }
2319 }
2320
iwl_mvm_start_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2321 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2322 struct ieee80211_vif *vif)
2323 {
2324 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2325 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2326 int ret;
2327
2328 /*
2329 * iwl_mvm_mac_ctxt_add() might read directly from the device
2330 * (the system time), so make sure it is available.
2331 */
2332 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
2333 if (ret)
2334 return ret;
2335
2336 mutex_lock(&mvm->mutex);
2337
2338 /* Send the beacon template */
2339 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
2340 if (ret)
2341 goto out_unlock;
2342
2343 /*
2344 * Re-calculate the tsf id, as the master-slave relations depend on the
2345 * beacon interval, which was not known when the AP interface was added.
2346 */
2347 if (vif->type == NL80211_IFTYPE_AP)
2348 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
2349
2350 mvmvif->ap_assoc_sta_count = 0;
2351
2352 /* Add the mac context */
2353 ret = iwl_mvm_mac_ctxt_add(mvm, vif);
2354 if (ret)
2355 goto out_unlock;
2356
2357 /* Perform the binding */
2358 ret = iwl_mvm_binding_add_vif(mvm, vif);
2359 if (ret)
2360 goto out_remove;
2361
2362 /*
2363 * This is not very nice, but the simplest:
2364 * For older FWs adding the mcast sta before the bcast station may
2365 * cause assert 0x2b00.
2366 * This is fixed in later FW so make the order of removal depend on
2367 * the TLV
2368 */
2369 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2370 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2371 if (ret)
2372 goto out_unbind;
2373 /*
2374 * Send the bcast station. At this stage the TBTT and DTIM time
2375 * events are added and applied to the scheduler
2376 */
2377 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2378 if (ret) {
2379 iwl_mvm_rm_mcast_sta(mvm, vif);
2380 goto out_unbind;
2381 }
2382 } else {
2383 /*
2384 * Send the bcast station. At this stage the TBTT and DTIM time
2385 * events are added and applied to the scheduler
2386 */
2387 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2388 if (ret)
2389 goto out_unbind;
2390 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2391 if (ret) {
2392 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2393 goto out_unbind;
2394 }
2395 }
2396
2397 /* must be set before quota calculations */
2398 mvmvif->ap_ibss_active = true;
2399
2400 /* power updated needs to be done before quotas */
2401 iwl_mvm_power_update_mac(mvm);
2402
2403 ret = iwl_mvm_update_quotas(mvm, false, NULL);
2404 if (ret)
2405 goto out_quota_failed;
2406
2407 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2408 if (vif->p2p && mvm->p2p_device_vif)
2409 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2410
2411 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
2412
2413 iwl_mvm_bt_coex_vif_change(mvm);
2414
2415 /* we don't support TDLS during DCM */
2416 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2417 iwl_mvm_teardown_tdls_peers(mvm);
2418
2419 goto out_unlock;
2420
2421 out_quota_failed:
2422 iwl_mvm_power_update_mac(mvm);
2423 mvmvif->ap_ibss_active = false;
2424 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2425 iwl_mvm_rm_mcast_sta(mvm, vif);
2426 out_unbind:
2427 iwl_mvm_binding_remove_vif(mvm, vif);
2428 out_remove:
2429 iwl_mvm_mac_ctxt_remove(mvm, vif);
2430 out_unlock:
2431 mutex_unlock(&mvm->mutex);
2432 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
2433 return ret;
2434 }
2435
iwl_mvm_stop_ap_ibss(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2436 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2437 struct ieee80211_vif *vif)
2438 {
2439 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2440 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2441
2442 iwl_mvm_prepare_mac_removal(mvm, vif);
2443
2444 mutex_lock(&mvm->mutex);
2445
2446 /* Handle AP stop while in CSA */
2447 if (rcu_access_pointer(mvm->csa_vif) == vif) {
2448 iwl_mvm_remove_time_event(mvm, mvmvif,
2449 &mvmvif->time_event_data);
2450 RCU_INIT_POINTER(mvm->csa_vif, NULL);
2451 mvmvif->csa_countdown = false;
2452 }
2453
2454 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
2455 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
2456 mvm->csa_tx_block_bcn_timeout = 0;
2457 }
2458
2459 mvmvif->ap_ibss_active = false;
2460 mvm->ap_last_beacon_gp2 = 0;
2461
2462 iwl_mvm_bt_coex_vif_change(mvm);
2463
2464 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
2465
2466 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
2467 if (vif->p2p && mvm->p2p_device_vif)
2468 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
2469
2470 iwl_mvm_update_quotas(mvm, false, NULL);
2471
2472 /*
2473 * This is not very nice, but the simplest:
2474 * For older FWs removing the mcast sta before the bcast station may
2475 * cause assert 0x2b00.
2476 * This is fixed in later FW (which will stop beaconing when removing
2477 * bcast station).
2478 * So make the order of removal depend on the TLV
2479 */
2480 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2481 iwl_mvm_rm_mcast_sta(mvm, vif);
2482 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2483 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
2484 iwl_mvm_rm_mcast_sta(mvm, vif);
2485 iwl_mvm_binding_remove_vif(mvm, vif);
2486
2487 iwl_mvm_power_update_mac(mvm);
2488
2489 iwl_mvm_mac_ctxt_remove(mvm, vif);
2490
2491 mutex_unlock(&mvm->mutex);
2492 }
2493
2494 static void
iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2495 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
2496 struct ieee80211_vif *vif,
2497 struct ieee80211_bss_conf *bss_conf,
2498 u32 changes)
2499 {
2500 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2501
2502 /* Changes will be applied when the AP/IBSS is started */
2503 if (!mvmvif->ap_ibss_active)
2504 return;
2505
2506 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
2507 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
2508 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
2509 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
2510
2511 /* Need to send a new beacon template to the FW */
2512 if (changes & BSS_CHANGED_BEACON &&
2513 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
2514 IWL_WARN(mvm, "Failed updating beacon data\n");
2515
2516 if (changes & BSS_CHANGED_TXPOWER) {
2517 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
2518 bss_conf->txpower);
2519 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
2520 }
2521 }
2522
iwl_mvm_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changes)2523 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
2524 struct ieee80211_vif *vif,
2525 struct ieee80211_bss_conf *bss_conf,
2526 u32 changes)
2527 {
2528 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2529
2530 /*
2531 * iwl_mvm_bss_info_changed_station() might call
2532 * iwl_mvm_protect_session(), which reads directly from
2533 * the device (the system time), so make sure it is available.
2534 */
2535 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
2536 return;
2537
2538 mutex_lock(&mvm->mutex);
2539
2540 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
2541 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2542
2543 switch (vif->type) {
2544 case NL80211_IFTYPE_STATION:
2545 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
2546 break;
2547 case NL80211_IFTYPE_AP:
2548 case NL80211_IFTYPE_ADHOC:
2549 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
2550 break;
2551 case NL80211_IFTYPE_MONITOR:
2552 if (changes & BSS_CHANGED_MU_GROUPS)
2553 iwl_mvm_update_mu_groups(mvm, vif);
2554 break;
2555 default:
2556 /* shouldn't happen */
2557 WARN_ON_ONCE(1);
2558 }
2559
2560 mutex_unlock(&mvm->mutex);
2561 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
2562 }
2563
iwl_mvm_mac_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)2564 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
2565 struct ieee80211_vif *vif,
2566 struct ieee80211_scan_request *hw_req)
2567 {
2568 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2569 int ret;
2570
2571 if (hw_req->req.n_channels == 0 ||
2572 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
2573 return -EINVAL;
2574
2575 mutex_lock(&mvm->mutex);
2576 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
2577 mutex_unlock(&mvm->mutex);
2578
2579 return ret;
2580 }
2581
iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2582 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
2583 struct ieee80211_vif *vif)
2584 {
2585 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2586
2587 mutex_lock(&mvm->mutex);
2588
2589 /* Due to a race condition, it's possible that mac80211 asks
2590 * us to stop a hw_scan when it's already stopped. This can
2591 * happen, for instance, if we stopped the scan ourselves,
2592 * called ieee80211_scan_completed() and the userspace called
2593 * cancel scan scan before ieee80211_scan_work() could run.
2594 * To handle that, simply return if the scan is not running.
2595 */
2596 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
2597 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2598
2599 mutex_unlock(&mvm->mutex);
2600 }
2601
2602 static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2603 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
2604 struct ieee80211_sta *sta, u16 tids,
2605 int num_frames,
2606 enum ieee80211_frame_release_type reason,
2607 bool more_data)
2608 {
2609 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2610
2611 /* Called when we need to transmit (a) frame(s) from mac80211 */
2612
2613 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2614 tids, more_data, false);
2615 }
2616
2617 static void
iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int num_frames,enum ieee80211_frame_release_type reason,bool more_data)2618 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
2619 struct ieee80211_sta *sta, u16 tids,
2620 int num_frames,
2621 enum ieee80211_frame_release_type reason,
2622 bool more_data)
2623 {
2624 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2625
2626 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */
2627
2628 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
2629 tids, more_data, true);
2630 }
2631
__iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2632 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2633 enum sta_notify_cmd cmd,
2634 struct ieee80211_sta *sta)
2635 {
2636 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2637 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2638 unsigned long txqs = 0, tids = 0;
2639 int tid;
2640
2641 /*
2642 * If we have TVQM then we get too high queue numbers - luckily
2643 * we really shouldn't get here with that because such hardware
2644 * should have firmware supporting buffer station offload.
2645 */
2646 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
2647 return;
2648
2649 spin_lock_bh(&mvmsta->lock);
2650 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
2651 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
2652
2653 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
2654 continue;
2655
2656 __set_bit(tid_data->txq_id, &txqs);
2657
2658 if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
2659 continue;
2660
2661 __set_bit(tid, &tids);
2662 }
2663
2664 switch (cmd) {
2665 case STA_NOTIFY_SLEEP:
2666 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
2667 ieee80211_sta_set_buffered(sta, tid, true);
2668
2669 if (txqs)
2670 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
2671 /*
2672 * The fw updates the STA to be asleep. Tx packets on the Tx
2673 * queues to this station will not be transmitted. The fw will
2674 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
2675 */
2676 break;
2677 case STA_NOTIFY_AWAKE:
2678 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
2679 break;
2680
2681 if (txqs)
2682 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
2683 iwl_mvm_sta_modify_ps_wake(mvm, sta);
2684 break;
2685 default:
2686 break;
2687 }
2688 spin_unlock_bh(&mvmsta->lock);
2689 }
2690
iwl_mvm_mac_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)2691 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
2692 struct ieee80211_vif *vif,
2693 enum sta_notify_cmd cmd,
2694 struct ieee80211_sta *sta)
2695 {
2696 __iwl_mvm_mac_sta_notify(hw, cmd, sta);
2697 }
2698
iwl_mvm_sta_pm_notif(struct iwl_mvm * mvm,struct iwl_rx_cmd_buffer * rxb)2699 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2700 {
2701 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2702 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
2703 struct ieee80211_sta *sta;
2704 struct iwl_mvm_sta *mvmsta;
2705 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
2706
2707 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
2708 return;
2709
2710 rcu_read_lock();
2711 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
2712 if (WARN_ON(IS_ERR_OR_NULL(sta))) {
2713 rcu_read_unlock();
2714 return;
2715 }
2716
2717 mvmsta = iwl_mvm_sta_from_mac80211(sta);
2718
2719 if (!mvmsta->vif ||
2720 mvmsta->vif->type != NL80211_IFTYPE_AP) {
2721 rcu_read_unlock();
2722 return;
2723 }
2724
2725 if (mvmsta->sleeping != sleeping) {
2726 mvmsta->sleeping = sleeping;
2727 __iwl_mvm_mac_sta_notify(mvm->hw,
2728 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
2729 sta);
2730 ieee80211_sta_ps_transition(sta, sleeping);
2731 }
2732
2733 if (sleeping) {
2734 switch (notif->type) {
2735 case IWL_MVM_PM_EVENT_AWAKE:
2736 case IWL_MVM_PM_EVENT_ASLEEP:
2737 break;
2738 case IWL_MVM_PM_EVENT_UAPSD:
2739 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
2740 break;
2741 case IWL_MVM_PM_EVENT_PS_POLL:
2742 ieee80211_sta_pspoll(sta);
2743 break;
2744 default:
2745 break;
2746 }
2747 }
2748
2749 rcu_read_unlock();
2750 }
2751
iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2752 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
2753 struct ieee80211_vif *vif,
2754 struct ieee80211_sta *sta)
2755 {
2756 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2757 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2758
2759 /*
2760 * This is called before mac80211 does RCU synchronisation,
2761 * so here we already invalidate our internal RCU-protected
2762 * station pointer. The rest of the code will thus no longer
2763 * be able to find the station this way, and we don't rely
2764 * on further RCU synchronisation after the sta_state()
2765 * callback deleted the station.
2766 */
2767 mutex_lock(&mvm->mutex);
2768 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
2769 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
2770 ERR_PTR(-ENOENT));
2771
2772 mutex_unlock(&mvm->mutex);
2773 }
2774
iwl_mvm_check_uapsd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const u8 * bssid)2775 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2776 const u8 *bssid)
2777 {
2778 int i;
2779
2780 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
2781 struct iwl_mvm_tcm_mac *mdata;
2782
2783 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
2784 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
2785 mdata->opened_rx_ba_sessions = false;
2786 }
2787
2788 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
2789 return;
2790
2791 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
2792 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2793 return;
2794 }
2795
2796 if (!vif->p2p &&
2797 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
2798 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2799 return;
2800 }
2801
2802 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
2803 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
2804 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
2805 return;
2806 }
2807 }
2808
2809 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
2810 }
2811
2812 static void
iwl_mvm_tdls_check_trigger(struct iwl_mvm * mvm,struct ieee80211_vif * vif,u8 * peer_addr,enum nl80211_tdls_operation action)2813 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
2814 struct ieee80211_vif *vif, u8 *peer_addr,
2815 enum nl80211_tdls_operation action)
2816 {
2817 struct iwl_fw_dbg_trigger_tlv *trig;
2818 struct iwl_fw_dbg_trigger_tdls *tdls_trig;
2819
2820 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
2821 return;
2822
2823 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
2824 tdls_trig = (void *)trig->data;
2825 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
2826 ieee80211_vif_to_wdev(vif), trig))
2827 return;
2828
2829 if (!(tdls_trig->action_bitmap & BIT(action)))
2830 return;
2831
2832 if (tdls_trig->peer_mode &&
2833 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
2834 return;
2835
2836 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
2837 "TDLS event occurred, peer %pM, action %d",
2838 peer_addr, action);
2839 }
2840
iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm * mvm,struct iwl_mvm_sta * mvm_sta)2841 static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2842 struct iwl_mvm_sta *mvm_sta)
2843 {
2844 struct iwl_mvm_tid_data *tid_data;
2845 struct sk_buff *skb;
2846 int i;
2847
2848 spin_lock_bh(&mvm_sta->lock);
2849 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
2850 tid_data = &mvm_sta->tid_data[i];
2851
2852 while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
2853 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2854
2855 /*
2856 * The first deferred frame should've stopped the MAC
2857 * queues, so we should never get a second deferred
2858 * frame for the RA/TID.
2859 */
2860 iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
2861 ieee80211_free_txskb(mvm->hw, skb);
2862 }
2863 }
2864 spin_unlock_bh(&mvm_sta->lock);
2865 }
2866
iwl_mvm_mac_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)2867 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2868 struct ieee80211_vif *vif,
2869 struct ieee80211_sta *sta,
2870 enum ieee80211_sta_state old_state,
2871 enum ieee80211_sta_state new_state)
2872 {
2873 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
2874 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2875 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2876 int ret;
2877
2878 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
2879 sta->addr, old_state, new_state);
2880
2881 /* this would be a mac80211 bug ... but don't crash */
2882 if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
2883 return -EINVAL;
2884
2885 /*
2886 * If we are in a STA removal flow and in DQA mode:
2887 *
2888 * This is after the sync_rcu part, so the queues have already been
2889 * flushed. No more TXs on their way in mac80211's path, and no more in
2890 * the queues.
2891 * Also, we won't be getting any new TX frames for this station.
2892 * What we might have are deferred TX frames that need to be taken care
2893 * of.
2894 *
2895 * Drop any still-queued deferred-frame before removing the STA, and
2896 * make sure the worker is no longer handling frames for this STA.
2897 */
2898 if (old_state == IEEE80211_STA_NONE &&
2899 new_state == IEEE80211_STA_NOTEXIST) {
2900 iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
2901 flush_work(&mvm->add_stream_wk);
2902
2903 /*
2904 * No need to make sure deferred TX indication is off since the
2905 * worker will already remove it if it was on
2906 */
2907 }
2908
2909 mutex_lock(&mvm->mutex);
2910 /* track whether or not the station is associated */
2911 mvm_sta->sta_state = new_state;
2912
2913 if (old_state == IEEE80211_STA_NOTEXIST &&
2914 new_state == IEEE80211_STA_NONE) {
2915 /*
2916 * Firmware bug - it'll crash if the beacon interval is less
2917 * than 16. We can't avoid connecting at all, so refuse the
2918 * station state change, this will cause mac80211 to abandon
2919 * attempts to connect to this AP, and eventually wpa_s will
2920 * blacklist the AP...
2921 */
2922 if (vif->type == NL80211_IFTYPE_STATION &&
2923 vif->bss_conf.beacon_int < 16) {
2924 IWL_ERR(mvm,
2925 "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
2926 sta->addr, vif->bss_conf.beacon_int);
2927 ret = -EINVAL;
2928 goto out_unlock;
2929 }
2930
2931 if (sta->tdls &&
2932 (vif->p2p ||
2933 iwl_mvm_tdls_sta_count(mvm, NULL) ==
2934 IWL_MVM_TDLS_STA_COUNT ||
2935 iwl_mvm_phy_ctx_count(mvm) > 1)) {
2936 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
2937 ret = -EBUSY;
2938 goto out_unlock;
2939 }
2940
2941 ret = iwl_mvm_add_sta(mvm, vif, sta);
2942 if (sta->tdls && ret == 0) {
2943 iwl_mvm_recalc_tdls_state(mvm, vif, true);
2944 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2945 NL80211_TDLS_SETUP);
2946 }
2947 } else if (old_state == IEEE80211_STA_NONE &&
2948 new_state == IEEE80211_STA_AUTH) {
2949 /*
2950 * EBS may be disabled due to previous failures reported by FW.
2951 * Reset EBS status here assuming environment has been changed.
2952 */
2953 mvm->last_ebs_successful = true;
2954 iwl_mvm_check_uapsd(mvm, vif, sta->addr);
2955 ret = 0;
2956 } else if (old_state == IEEE80211_STA_AUTH &&
2957 new_state == IEEE80211_STA_ASSOC) {
2958 if (vif->type == NL80211_IFTYPE_AP) {
2959 mvmvif->ap_assoc_sta_count++;
2960 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2961 }
2962
2963 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2964 false);
2965 ret = iwl_mvm_update_sta(mvm, vif, sta);
2966 } else if (old_state == IEEE80211_STA_ASSOC &&
2967 new_state == IEEE80211_STA_AUTHORIZED) {
2968
2969 /* we don't support TDLS during DCM */
2970 if (iwl_mvm_phy_ctx_count(mvm) > 1)
2971 iwl_mvm_teardown_tdls_peers(mvm);
2972
2973 if (sta->tdls)
2974 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
2975 NL80211_TDLS_ENABLE_LINK);
2976
2977 /* enable beacon filtering */
2978 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2979
2980 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2981 true);
2982
2983 ret = 0;
2984 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2985 new_state == IEEE80211_STA_ASSOC) {
2986 /* disable beacon filtering */
2987 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
2988 ret = 0;
2989 } else if (old_state == IEEE80211_STA_ASSOC &&
2990 new_state == IEEE80211_STA_AUTH) {
2991 if (vif->type == NL80211_IFTYPE_AP) {
2992 mvmvif->ap_assoc_sta_count--;
2993 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
2994 }
2995 ret = 0;
2996 } else if (old_state == IEEE80211_STA_AUTH &&
2997 new_state == IEEE80211_STA_NONE) {
2998 ret = 0;
2999 } else if (old_state == IEEE80211_STA_NONE &&
3000 new_state == IEEE80211_STA_NOTEXIST) {
3001 ret = iwl_mvm_rm_sta(mvm, vif, sta);
3002 if (sta->tdls) {
3003 iwl_mvm_recalc_tdls_state(mvm, vif, false);
3004 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
3005 NL80211_TDLS_DISABLE_LINK);
3006 }
3007 } else {
3008 ret = -EIO;
3009 }
3010 out_unlock:
3011 mutex_unlock(&mvm->mutex);
3012
3013 if (sta->tdls && ret == 0) {
3014 if (old_state == IEEE80211_STA_NOTEXIST &&
3015 new_state == IEEE80211_STA_NONE)
3016 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
3017 else if (old_state == IEEE80211_STA_NONE &&
3018 new_state == IEEE80211_STA_NOTEXIST)
3019 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
3020 }
3021
3022 return ret;
3023 }
3024
iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3025 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3026 {
3027 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3028
3029 mvm->rts_threshold = value;
3030
3031 return 0;
3032 }
3033
iwl_mvm_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)3034 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
3035 struct ieee80211_vif *vif,
3036 struct ieee80211_sta *sta, u32 changed)
3037 {
3038 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3039
3040 if (vif->type == NL80211_IFTYPE_STATION &&
3041 changed & IEEE80211_RC_NSS_CHANGED)
3042 iwl_mvm_sf_update(mvm, vif, false);
3043 }
3044
iwl_mvm_mac_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)3045 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
3046 struct ieee80211_vif *vif, u16 ac,
3047 const struct ieee80211_tx_queue_params *params)
3048 {
3049 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3050 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3051
3052 mvmvif->queue_params[ac] = *params;
3053
3054 /*
3055 * No need to update right away, we'll get BSS_CHANGED_QOS
3056 * The exception is P2P_DEVICE interface which needs immediate update.
3057 */
3058 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
3059 int ret;
3060
3061 mutex_lock(&mvm->mutex);
3062 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3063 mutex_unlock(&mvm->mutex);
3064 return ret;
3065 }
3066 return 0;
3067 }
3068
iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 req_duration)3069 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
3070 struct ieee80211_vif *vif,
3071 u16 req_duration)
3072 {
3073 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3074 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
3075 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
3076
3077 /*
3078 * iwl_mvm_protect_session() reads directly from the device
3079 * (the system time), so make sure it is available.
3080 */
3081 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
3082 return;
3083
3084 if (req_duration > duration)
3085 duration = req_duration;
3086
3087 mutex_lock(&mvm->mutex);
3088 /* Try really hard to protect the session and hear a beacon */
3089 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
3090 mutex_unlock(&mvm->mutex);
3091
3092 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
3093 }
3094
iwl_mvm_mac_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3095 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
3096 struct ieee80211_vif *vif,
3097 struct cfg80211_sched_scan_request *req,
3098 struct ieee80211_scan_ies *ies)
3099 {
3100 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3101
3102 int ret;
3103
3104 mutex_lock(&mvm->mutex);
3105
3106 if (!vif->bss_conf.idle) {
3107 ret = -EBUSY;
3108 goto out;
3109 }
3110
3111 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
3112
3113 out:
3114 mutex_unlock(&mvm->mutex);
3115 return ret;
3116 }
3117
iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3118 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
3119 struct ieee80211_vif *vif)
3120 {
3121 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3122 int ret;
3123
3124 mutex_lock(&mvm->mutex);
3125
3126 /* Due to a race condition, it's possible that mac80211 asks
3127 * us to stop a sched_scan when it's already stopped. This
3128 * can happen, for instance, if we stopped the scan ourselves,
3129 * called ieee80211_sched_scan_stopped() and the userspace called
3130 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
3131 * could run. To handle this, simply return if the scan is
3132 * not running.
3133 */
3134 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
3135 mutex_unlock(&mvm->mutex);
3136 return 0;
3137 }
3138
3139 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
3140 mutex_unlock(&mvm->mutex);
3141 iwl_mvm_wait_for_async_handlers(mvm);
3142
3143 return ret;
3144 }
3145
iwl_mvm_mac_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)3146 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3147 enum set_key_cmd cmd,
3148 struct ieee80211_vif *vif,
3149 struct ieee80211_sta *sta,
3150 struct ieee80211_key_conf *key)
3151 {
3152 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3153 struct iwl_mvm_sta *mvmsta;
3154 struct iwl_mvm_key_pn *ptk_pn;
3155 int keyidx = key->keyidx;
3156 int ret;
3157 u8 key_offset;
3158
3159 if (iwlwifi_mod_params.swcrypto) {
3160 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
3161 return -EOPNOTSUPP;
3162 }
3163
3164 switch (key->cipher) {
3165 case WLAN_CIPHER_SUITE_TKIP:
3166 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3167 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3168 break;
3169 case WLAN_CIPHER_SUITE_CCMP:
3170 case WLAN_CIPHER_SUITE_GCMP:
3171 case WLAN_CIPHER_SUITE_GCMP_256:
3172 if (!iwl_mvm_has_new_tx_api(mvm))
3173 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3174 break;
3175 case WLAN_CIPHER_SUITE_AES_CMAC:
3176 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3177 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3178 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
3179 break;
3180 case WLAN_CIPHER_SUITE_WEP40:
3181 case WLAN_CIPHER_SUITE_WEP104:
3182 /* For non-client mode, only use WEP keys for TX as we probably
3183 * don't have a station yet anyway and would then have to keep
3184 * track of the keys, linking them to each of the clients/peers
3185 * as they appear. For now, don't do that, for performance WEP
3186 * offload doesn't really matter much, but we need it for some
3187 * other offload features in client mode.
3188 */
3189 if (vif->type != NL80211_IFTYPE_STATION)
3190 return 0;
3191 break;
3192 default:
3193 /* currently FW supports only one optional cipher scheme */
3194 if (hw->n_cipher_schemes &&
3195 hw->cipher_schemes->cipher == key->cipher)
3196 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3197 else
3198 return -EOPNOTSUPP;
3199 }
3200
3201 mutex_lock(&mvm->mutex);
3202
3203 switch (cmd) {
3204 case SET_KEY:
3205 if ((vif->type == NL80211_IFTYPE_ADHOC ||
3206 vif->type == NL80211_IFTYPE_AP) && !sta) {
3207 /*
3208 * GTK on AP interface is a TX-only key, return 0;
3209 * on IBSS they're per-station and because we're lazy
3210 * we don't support them for RX, so do the same.
3211 * CMAC/GMAC in AP/IBSS modes must be done in software.
3212 */
3213 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3214 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3215 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3216 ret = -EOPNOTSUPP;
3217 else
3218 ret = 0;
3219
3220 if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
3221 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
3222 !iwl_mvm_has_new_tx_api(mvm)) {
3223 key->hw_key_idx = STA_KEY_IDX_INVALID;
3224 break;
3225 }
3226 }
3227
3228 /* During FW restart, in order to restore the state as it was,
3229 * don't try to reprogram keys we previously failed for.
3230 */
3231 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3232 key->hw_key_idx == STA_KEY_IDX_INVALID) {
3233 IWL_DEBUG_MAC80211(mvm,
3234 "skip invalid idx key programming during restart\n");
3235 ret = 0;
3236 break;
3237 }
3238
3239 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
3240 sta && iwl_mvm_has_new_rx_api(mvm) &&
3241 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3242 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3243 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3244 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3245 struct ieee80211_key_seq seq;
3246 int tid, q;
3247
3248 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3249 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
3250 ptk_pn = kzalloc(struct_size(ptk_pn, q,
3251 mvm->trans->num_rx_queues),
3252 GFP_KERNEL);
3253 if (!ptk_pn) {
3254 ret = -ENOMEM;
3255 break;
3256 }
3257
3258 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
3259 ieee80211_get_key_rx_seq(key, tid, &seq);
3260 for (q = 0; q < mvm->trans->num_rx_queues; q++)
3261 memcpy(ptk_pn->q[q].pn[tid],
3262 seq.ccmp.pn,
3263 IEEE80211_CCMP_PN_LEN);
3264 }
3265
3266 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
3267 }
3268
3269 /* in HW restart reuse the index, otherwise request a new one */
3270 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
3271 key_offset = key->hw_key_idx;
3272 else
3273 key_offset = STA_KEY_IDX_INVALID;
3274
3275 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
3276 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3277 if (ret) {
3278 IWL_WARN(mvm, "set key failed\n");
3279 /*
3280 * can't add key for RX, but we don't need it
3281 * in the device for TX so still return 0
3282 */
3283 key->hw_key_idx = STA_KEY_IDX_INVALID;
3284 ret = 0;
3285 }
3286
3287 break;
3288 case DISABLE_KEY:
3289 if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
3290 ret = 0;
3291 break;
3292 }
3293
3294 if (sta && iwl_mvm_has_new_rx_api(mvm) &&
3295 key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
3296 (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
3297 key->cipher == WLAN_CIPHER_SUITE_GCMP ||
3298 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
3299 mvmsta = iwl_mvm_sta_from_mac80211(sta);
3300 ptk_pn = rcu_dereference_protected(
3301 mvmsta->ptk_pn[keyidx],
3302 lockdep_is_held(&mvm->mutex));
3303 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
3304 if (ptk_pn)
3305 kfree_rcu(ptk_pn, rcu_head);
3306 }
3307
3308 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
3309 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
3310 break;
3311 default:
3312 ret = -EINVAL;
3313 }
3314
3315 mutex_unlock(&mvm->mutex);
3316 return ret;
3317 }
3318
iwl_mvm_mac_update_tkip_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_key_conf * keyconf,struct ieee80211_sta * sta,u32 iv32,u16 * phase1key)3319 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
3320 struct ieee80211_vif *vif,
3321 struct ieee80211_key_conf *keyconf,
3322 struct ieee80211_sta *sta,
3323 u32 iv32, u16 *phase1key)
3324 {
3325 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3326
3327 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
3328 return;
3329
3330 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
3331 }
3332
3333
iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)3334 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
3335 struct iwl_rx_packet *pkt, void *data)
3336 {
3337 struct iwl_mvm *mvm =
3338 container_of(notif_wait, struct iwl_mvm, notif_wait);
3339 struct iwl_hs20_roc_res *resp;
3340 int resp_len = iwl_rx_packet_payload_len(pkt);
3341 struct iwl_mvm_time_event_data *te_data = data;
3342
3343 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
3344 return true;
3345
3346 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
3347 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
3348 return true;
3349 }
3350
3351 resp = (void *)pkt->data;
3352
3353 IWL_DEBUG_TE(mvm,
3354 "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
3355 resp->status, resp->event_unique_id);
3356
3357 te_data->uid = le32_to_cpu(resp->event_unique_id);
3358 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
3359 te_data->uid);
3360
3361 spin_lock_bh(&mvm->time_event_lock);
3362 list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
3363 spin_unlock_bh(&mvm->time_event_lock);
3364
3365 return true;
3366 }
3367
3368 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
3369 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
3370 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
3371 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
3372 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
iwl_mvm_send_aux_roc_cmd(struct iwl_mvm * mvm,struct ieee80211_channel * channel,struct ieee80211_vif * vif,int duration)3373 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
3374 struct ieee80211_channel *channel,
3375 struct ieee80211_vif *vif,
3376 int duration)
3377 {
3378 int res, time_reg = DEVICE_SYSTEM_TIME_REG;
3379 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3380 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
3381 static const u16 time_event_response[] = { HOT_SPOT_CMD };
3382 struct iwl_notification_wait wait_time_event;
3383 u32 dtim_interval = vif->bss_conf.dtim_period *
3384 vif->bss_conf.beacon_int;
3385 u32 req_dur, delay;
3386 struct iwl_hs20_roc_req aux_roc_req = {
3387 .action = cpu_to_le32(FW_CTXT_ACTION_ADD),
3388 .id_and_color =
3389 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
3390 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
3391 /* Set the channel info data */
3392 .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
3393 PHY_BAND_24 : PHY_BAND_5,
3394 .channel_info.channel = channel->hw_value,
3395 .channel_info.width = PHY_VHT_CHANNEL_MODE20,
3396 /* Set the time and duration */
3397 .apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
3398 };
3399
3400 delay = AUX_ROC_MIN_DELAY;
3401 req_dur = MSEC_TO_TU(duration);
3402
3403 /*
3404 * If we are associated we want the delay time to be at least one
3405 * dtim interval so that the FW can wait until after the DTIM and
3406 * then start the time event, this will potentially allow us to
3407 * remain off-channel for the max duration.
3408 * Since we want to use almost a whole dtim interval we would also
3409 * like the delay to be for 2-3 dtim intervals, in case there are
3410 * other time events with higher priority.
3411 */
3412 if (vif->bss_conf.assoc) {
3413 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
3414 /* We cannot remain off-channel longer than the DTIM interval */
3415 if (dtim_interval <= req_dur) {
3416 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
3417 if (req_dur <= AUX_ROC_MIN_DURATION)
3418 req_dur = dtim_interval -
3419 AUX_ROC_MIN_SAFETY_BUFFER;
3420 }
3421 }
3422
3423 aux_roc_req.duration = cpu_to_le32(req_dur);
3424 aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
3425
3426 IWL_DEBUG_TE(mvm,
3427 "ROC: Requesting to remain on channel %u for %ums\n",
3428 channel->hw_value, req_dur);
3429 IWL_DEBUG_TE(mvm,
3430 "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
3431 duration, delay, dtim_interval);
3432
3433 /* Set the node address */
3434 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
3435
3436 lockdep_assert_held(&mvm->mutex);
3437
3438 spin_lock_bh(&mvm->time_event_lock);
3439
3440 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
3441 spin_unlock_bh(&mvm->time_event_lock);
3442 return -EIO;
3443 }
3444
3445 te_data->vif = vif;
3446 te_data->duration = duration;
3447 te_data->id = HOT_SPOT_CMD;
3448
3449 spin_unlock_bh(&mvm->time_event_lock);
3450
3451 /*
3452 * Use a notification wait, which really just processes the
3453 * command response and doesn't wait for anything, in order
3454 * to be able to process the response and get the UID inside
3455 * the RX path. Using CMD_WANT_SKB doesn't work because it
3456 * stores the buffer and then wakes up this thread, by which
3457 * time another notification (that the time event started)
3458 * might already be processed unsuccessfully.
3459 */
3460 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
3461 time_event_response,
3462 ARRAY_SIZE(time_event_response),
3463 iwl_mvm_rx_aux_roc, te_data);
3464
3465 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
3466 &aux_roc_req);
3467
3468 if (res) {
3469 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
3470 iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
3471 goto out_clear_te;
3472 }
3473
3474 /* No need to wait for anything, so just pass 1 (0 isn't valid) */
3475 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
3476 /* should never fail */
3477 WARN_ON_ONCE(res);
3478
3479 if (res) {
3480 out_clear_te:
3481 spin_lock_bh(&mvm->time_event_lock);
3482 iwl_mvm_te_clear_data(mvm, te_data);
3483 spin_unlock_bh(&mvm->time_event_lock);
3484 }
3485
3486 return res;
3487 }
3488
iwl_mvm_roc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * channel,int duration,enum ieee80211_roc_type type)3489 static int iwl_mvm_roc(struct ieee80211_hw *hw,
3490 struct ieee80211_vif *vif,
3491 struct ieee80211_channel *channel,
3492 int duration,
3493 enum ieee80211_roc_type type)
3494 {
3495 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3496 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3497 struct cfg80211_chan_def chandef;
3498 struct iwl_mvm_phy_ctxt *phy_ctxt;
3499 int ret, i;
3500
3501 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
3502 duration, type);
3503
3504 /*
3505 * Flush the done work, just in case it's still pending, so that
3506 * the work it does can complete and we can accept new frames.
3507 */
3508 flush_work(&mvm->roc_done_wk);
3509
3510 mutex_lock(&mvm->mutex);
3511
3512 switch (vif->type) {
3513 case NL80211_IFTYPE_STATION:
3514 if (fw_has_capa(&mvm->fw->ucode_capa,
3515 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
3516 /* Use aux roc framework (HS20) */
3517 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
3518 vif, duration);
3519 goto out_unlock;
3520 }
3521 IWL_ERR(mvm, "hotspot not supported\n");
3522 ret = -EINVAL;
3523 goto out_unlock;
3524 case NL80211_IFTYPE_P2P_DEVICE:
3525 /* handle below */
3526 break;
3527 default:
3528 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
3529 ret = -EINVAL;
3530 goto out_unlock;
3531 }
3532
3533 for (i = 0; i < NUM_PHY_CTX; i++) {
3534 phy_ctxt = &mvm->phy_ctxts[i];
3535 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
3536 continue;
3537
3538 if (phy_ctxt->ref && channel == phy_ctxt->channel) {
3539 /*
3540 * Unbind the P2P_DEVICE from the current PHY context,
3541 * and if the PHY context is not used remove it.
3542 */
3543 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3544 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3545 goto out_unlock;
3546
3547 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3548
3549 /* Bind the P2P_DEVICE to the current PHY Context */
3550 mvmvif->phy_ctxt = phy_ctxt;
3551
3552 ret = iwl_mvm_binding_add_vif(mvm, vif);
3553 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3554 goto out_unlock;
3555
3556 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3557 goto schedule_time_event;
3558 }
3559 }
3560
3561 /* Need to update the PHY context only if the ROC channel changed */
3562 if (channel == mvmvif->phy_ctxt->channel)
3563 goto schedule_time_event;
3564
3565 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
3566
3567 /*
3568 * Change the PHY context configuration as it is currently referenced
3569 * only by the P2P Device MAC
3570 */
3571 if (mvmvif->phy_ctxt->ref == 1) {
3572 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
3573 &chandef, 1, 1);
3574 if (ret)
3575 goto out_unlock;
3576 } else {
3577 /*
3578 * The PHY context is shared with other MACs. Need to remove the
3579 * P2P Device from the binding, allocate an new PHY context and
3580 * create a new binding
3581 */
3582 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3583 if (!phy_ctxt) {
3584 ret = -ENOSPC;
3585 goto out_unlock;
3586 }
3587
3588 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
3589 1, 1);
3590 if (ret) {
3591 IWL_ERR(mvm, "Failed to change PHY context\n");
3592 goto out_unlock;
3593 }
3594
3595 /* Unbind the P2P_DEVICE from the current PHY context */
3596 ret = iwl_mvm_binding_remove_vif(mvm, vif);
3597 if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
3598 goto out_unlock;
3599
3600 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
3601
3602 /* Bind the P2P_DEVICE to the new allocated PHY context */
3603 mvmvif->phy_ctxt = phy_ctxt;
3604
3605 ret = iwl_mvm_binding_add_vif(mvm, vif);
3606 if (WARN(ret, "Failed binding P2P_DEVICE\n"))
3607 goto out_unlock;
3608
3609 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
3610 }
3611
3612 schedule_time_event:
3613 /* Schedule the time events */
3614 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
3615
3616 out_unlock:
3617 mutex_unlock(&mvm->mutex);
3618 IWL_DEBUG_MAC80211(mvm, "leave\n");
3619 return ret;
3620 }
3621
iwl_mvm_cancel_roc(struct ieee80211_hw * hw)3622 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
3623 {
3624 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3625
3626 IWL_DEBUG_MAC80211(mvm, "enter\n");
3627
3628 mutex_lock(&mvm->mutex);
3629 iwl_mvm_stop_roc(mvm);
3630 mutex_unlock(&mvm->mutex);
3631
3632 IWL_DEBUG_MAC80211(mvm, "leave\n");
3633 return 0;
3634 }
3635
__iwl_mvm_add_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3636 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
3637 struct ieee80211_chanctx_conf *ctx)
3638 {
3639 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3640 struct iwl_mvm_phy_ctxt *phy_ctxt;
3641 int ret;
3642
3643 lockdep_assert_held(&mvm->mutex);
3644
3645 IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
3646
3647 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
3648 if (!phy_ctxt) {
3649 ret = -ENOSPC;
3650 goto out;
3651 }
3652
3653 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3654 ctx->rx_chains_static,
3655 ctx->rx_chains_dynamic);
3656 if (ret) {
3657 IWL_ERR(mvm, "Failed to add PHY context\n");
3658 goto out;
3659 }
3660
3661 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
3662 *phy_ctxt_id = phy_ctxt->id;
3663 out:
3664 return ret;
3665 }
3666
iwl_mvm_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3667 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
3668 struct ieee80211_chanctx_conf *ctx)
3669 {
3670 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3671 int ret;
3672
3673 mutex_lock(&mvm->mutex);
3674 ret = __iwl_mvm_add_chanctx(mvm, ctx);
3675 mutex_unlock(&mvm->mutex);
3676
3677 return ret;
3678 }
3679
__iwl_mvm_remove_chanctx(struct iwl_mvm * mvm,struct ieee80211_chanctx_conf * ctx)3680 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
3681 struct ieee80211_chanctx_conf *ctx)
3682 {
3683 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3684 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3685
3686 lockdep_assert_held(&mvm->mutex);
3687
3688 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
3689 }
3690
iwl_mvm_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)3691 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
3692 struct ieee80211_chanctx_conf *ctx)
3693 {
3694 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3695
3696 mutex_lock(&mvm->mutex);
3697 __iwl_mvm_remove_chanctx(mvm, ctx);
3698 mutex_unlock(&mvm->mutex);
3699 }
3700
iwl_mvm_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)3701 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
3702 struct ieee80211_chanctx_conf *ctx,
3703 u32 changed)
3704 {
3705 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3706 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3707 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3708
3709 if (WARN_ONCE((phy_ctxt->ref > 1) &&
3710 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
3711 IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
3712 IEEE80211_CHANCTX_CHANGE_RADAR |
3713 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
3714 "Cannot change PHY. Ref=%d, changed=0x%X\n",
3715 phy_ctxt->ref, changed))
3716 return;
3717
3718 mutex_lock(&mvm->mutex);
3719
3720 /* we are only changing the min_width, may be a noop */
3721 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
3722 if (phy_ctxt->width == ctx->min_def.width)
3723 goto out_unlock;
3724
3725 /* we are just toggling between 20_NOHT and 20 */
3726 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
3727 ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
3728 goto out_unlock;
3729 }
3730
3731 iwl_mvm_bt_coex_vif_change(mvm);
3732 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
3733 ctx->rx_chains_static,
3734 ctx->rx_chains_dynamic);
3735
3736 out_unlock:
3737 mutex_unlock(&mvm->mutex);
3738 }
3739
__iwl_mvm_assign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3740 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
3741 struct ieee80211_vif *vif,
3742 struct ieee80211_chanctx_conf *ctx,
3743 bool switching_chanctx)
3744 {
3745 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
3746 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
3747 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3748 int ret;
3749
3750 lockdep_assert_held(&mvm->mutex);
3751
3752 mvmvif->phy_ctxt = phy_ctxt;
3753
3754 switch (vif->type) {
3755 case NL80211_IFTYPE_AP:
3756 /* only needed if we're switching chanctx (i.e. during CSA) */
3757 if (switching_chanctx) {
3758 mvmvif->ap_ibss_active = true;
3759 break;
3760 }
3761 case NL80211_IFTYPE_ADHOC:
3762 /*
3763 * The AP binding flow is handled as part of the start_ap flow
3764 * (in bss_info_changed), similarly for IBSS.
3765 */
3766 ret = 0;
3767 goto out;
3768 case NL80211_IFTYPE_STATION:
3769 mvmvif->csa_bcn_pending = false;
3770 break;
3771 case NL80211_IFTYPE_MONITOR:
3772 /* always disable PS when a monitor interface is active */
3773 mvmvif->ps_disabled = true;
3774 break;
3775 default:
3776 ret = -EINVAL;
3777 goto out;
3778 }
3779
3780 ret = iwl_mvm_binding_add_vif(mvm, vif);
3781 if (ret)
3782 goto out;
3783
3784 /*
3785 * Power state must be updated before quotas,
3786 * otherwise fw will complain.
3787 */
3788 iwl_mvm_power_update_mac(mvm);
3789
3790 /* Setting the quota at this stage is only required for monitor
3791 * interfaces. For the other types, the bss_info changed flow
3792 * will handle quota settings.
3793 */
3794 if (vif->type == NL80211_IFTYPE_MONITOR) {
3795 mvmvif->monitor_active = true;
3796 ret = iwl_mvm_update_quotas(mvm, false, NULL);
3797 if (ret)
3798 goto out_remove_binding;
3799
3800 ret = iwl_mvm_add_snif_sta(mvm, vif);
3801 if (ret)
3802 goto out_remove_binding;
3803
3804 }
3805
3806 /* Handle binding during CSA */
3807 if (vif->type == NL80211_IFTYPE_AP) {
3808 iwl_mvm_update_quotas(mvm, false, NULL);
3809 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
3810 }
3811
3812 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
3813 u32 duration = 3 * vif->bss_conf.beacon_int;
3814
3815 /* iwl_mvm_protect_session() reads directly from the
3816 * device (the system time), so make sure it is
3817 * available.
3818 */
3819 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
3820 if (ret)
3821 goto out_remove_binding;
3822
3823 /* Protect the session to make sure we hear the first
3824 * beacon on the new channel.
3825 */
3826 mvmvif->csa_bcn_pending = true;
3827 iwl_mvm_protect_session(mvm, vif, duration, duration,
3828 vif->bss_conf.beacon_int / 2,
3829 true);
3830
3831 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
3832
3833 iwl_mvm_update_quotas(mvm, false, NULL);
3834 }
3835
3836 goto out;
3837
3838 out_remove_binding:
3839 iwl_mvm_binding_remove_vif(mvm, vif);
3840 iwl_mvm_power_update_mac(mvm);
3841 out:
3842 if (ret)
3843 mvmvif->phy_ctxt = NULL;
3844 return ret;
3845 }
iwl_mvm_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3846 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
3847 struct ieee80211_vif *vif,
3848 struct ieee80211_chanctx_conf *ctx)
3849 {
3850 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3851 int ret;
3852
3853 mutex_lock(&mvm->mutex);
3854 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
3855 mutex_unlock(&mvm->mutex);
3856
3857 return ret;
3858 }
3859
__iwl_mvm_unassign_vif_chanctx(struct iwl_mvm * mvm,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx,bool switching_chanctx)3860 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
3861 struct ieee80211_vif *vif,
3862 struct ieee80211_chanctx_conf *ctx,
3863 bool switching_chanctx)
3864 {
3865 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3866 struct ieee80211_vif *disabled_vif = NULL;
3867
3868 lockdep_assert_held(&mvm->mutex);
3869
3870 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
3871
3872 switch (vif->type) {
3873 case NL80211_IFTYPE_ADHOC:
3874 goto out;
3875 case NL80211_IFTYPE_MONITOR:
3876 mvmvif->monitor_active = false;
3877 mvmvif->ps_disabled = false;
3878 iwl_mvm_rm_snif_sta(mvm, vif);
3879 break;
3880 case NL80211_IFTYPE_AP:
3881 /* This part is triggered only during CSA */
3882 if (!switching_chanctx || !mvmvif->ap_ibss_active)
3883 goto out;
3884
3885 mvmvif->csa_countdown = false;
3886
3887 /* Set CS bit on all the stations */
3888 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
3889
3890 /* Save blocked iface, the timeout is set on the next beacon */
3891 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
3892
3893 mvmvif->ap_ibss_active = false;
3894 break;
3895 case NL80211_IFTYPE_STATION:
3896 if (!switching_chanctx)
3897 break;
3898
3899 disabled_vif = vif;
3900
3901 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
3902 break;
3903 default:
3904 break;
3905 }
3906
3907 iwl_mvm_update_quotas(mvm, false, disabled_vif);
3908 iwl_mvm_binding_remove_vif(mvm, vif);
3909
3910 out:
3911 mvmvif->phy_ctxt = NULL;
3912 iwl_mvm_power_update_mac(mvm);
3913 }
3914
iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)3915 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
3916 struct ieee80211_vif *vif,
3917 struct ieee80211_chanctx_conf *ctx)
3918 {
3919 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3920
3921 mutex_lock(&mvm->mutex);
3922 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
3923 mutex_unlock(&mvm->mutex);
3924 }
3925
3926 static int
iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3927 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
3928 struct ieee80211_vif_chanctx_switch *vifs)
3929 {
3930 int ret;
3931
3932 mutex_lock(&mvm->mutex);
3933 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3934 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
3935
3936 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
3937 if (ret) {
3938 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
3939 goto out_reassign;
3940 }
3941
3942 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3943 true);
3944 if (ret) {
3945 IWL_ERR(mvm,
3946 "failed to assign new_ctx during channel switch\n");
3947 goto out_remove;
3948 }
3949
3950 /* we don't support TDLS during DCM - can be caused by channel switch */
3951 if (iwl_mvm_phy_ctx_count(mvm) > 1)
3952 iwl_mvm_teardown_tdls_peers(mvm);
3953
3954 goto out;
3955
3956 out_remove:
3957 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
3958
3959 out_reassign:
3960 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
3961 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
3962 goto out_restart;
3963 }
3964
3965 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
3966 true)) {
3967 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
3968 goto out_restart;
3969 }
3970
3971 goto out;
3972
3973 out_restart:
3974 /* things keep failing, better restart the hw */
3975 iwl_mvm_nic_restart(mvm, false);
3976
3977 out:
3978 mutex_unlock(&mvm->mutex);
3979
3980 return ret;
3981 }
3982
3983 static int
iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm * mvm,struct ieee80211_vif_chanctx_switch * vifs)3984 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
3985 struct ieee80211_vif_chanctx_switch *vifs)
3986 {
3987 int ret;
3988
3989 mutex_lock(&mvm->mutex);
3990 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
3991
3992 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
3993 true);
3994 if (ret) {
3995 IWL_ERR(mvm,
3996 "failed to assign new_ctx during channel switch\n");
3997 goto out_reassign;
3998 }
3999
4000 goto out;
4001
4002 out_reassign:
4003 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
4004 true)) {
4005 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
4006 goto out_restart;
4007 }
4008
4009 goto out;
4010
4011 out_restart:
4012 /* things keep failing, better restart the hw */
4013 iwl_mvm_nic_restart(mvm, false);
4014
4015 out:
4016 mutex_unlock(&mvm->mutex);
4017
4018 return ret;
4019 }
4020
iwl_mvm_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4021 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
4022 struct ieee80211_vif_chanctx_switch *vifs,
4023 int n_vifs,
4024 enum ieee80211_chanctx_switch_mode mode)
4025 {
4026 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4027 int ret;
4028
4029 /* we only support a single-vif right now */
4030 if (n_vifs > 1)
4031 return -EOPNOTSUPP;
4032
4033 switch (mode) {
4034 case CHANCTX_SWMODE_SWAP_CONTEXTS:
4035 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
4036 break;
4037 case CHANCTX_SWMODE_REASSIGN_VIF:
4038 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
4039 break;
4040 default:
4041 ret = -EOPNOTSUPP;
4042 break;
4043 }
4044
4045 return ret;
4046 }
4047
iwl_mvm_tx_last_beacon(struct ieee80211_hw * hw)4048 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
4049 {
4050 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4051
4052 return mvm->ibss_manager;
4053 }
4054
iwl_mvm_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)4055 static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
4056 struct ieee80211_sta *sta,
4057 bool set)
4058 {
4059 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4060 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4061
4062 if (!mvm_sta || !mvm_sta->vif) {
4063 IWL_ERR(mvm, "Station is not associated to a vif\n");
4064 return -EINVAL;
4065 }
4066
4067 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
4068 }
4069
4070 #ifdef CONFIG_NL80211_TESTMODE
4071 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
4072 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
4073 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
4074 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
4075 };
4076
__iwl_mvm_mac_testmode_cmd(struct iwl_mvm * mvm,struct ieee80211_vif * vif,void * data,int len)4077 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
4078 struct ieee80211_vif *vif,
4079 void *data, int len)
4080 {
4081 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
4082 int err;
4083 u32 noa_duration;
4084
4085 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
4086 NULL);
4087 if (err)
4088 return err;
4089
4090 if (!tb[IWL_MVM_TM_ATTR_CMD])
4091 return -EINVAL;
4092
4093 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
4094 case IWL_MVM_TM_CMD_SET_NOA:
4095 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
4096 !vif->bss_conf.enable_beacon ||
4097 !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
4098 return -EINVAL;
4099
4100 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
4101 if (noa_duration >= vif->bss_conf.beacon_int)
4102 return -EINVAL;
4103
4104 mvm->noa_duration = noa_duration;
4105 mvm->noa_vif = vif;
4106
4107 return iwl_mvm_update_quotas(mvm, true, NULL);
4108 case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
4109 /* must be associated client vif - ignore authorized */
4110 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
4111 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
4112 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
4113 return -EINVAL;
4114
4115 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
4116 return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4117 return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4118 }
4119
4120 return -EOPNOTSUPP;
4121 }
4122
iwl_mvm_mac_testmode_cmd(struct ieee80211_hw * hw,struct ieee80211_vif * vif,void * data,int len)4123 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
4124 struct ieee80211_vif *vif,
4125 void *data, int len)
4126 {
4127 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4128 int err;
4129
4130 mutex_lock(&mvm->mutex);
4131 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
4132 mutex_unlock(&mvm->mutex);
4133
4134 return err;
4135 }
4136 #endif
4137
iwl_mvm_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)4138 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
4139 struct ieee80211_vif *vif,
4140 struct ieee80211_channel_switch *chsw)
4141 {
4142 /* By implementing this operation, we prevent mac80211 from
4143 * starting its own channel switch timer, so that we can call
4144 * ieee80211_chswitch_done() ourselves at the right time
4145 * (which is when the absence time event starts).
4146 */
4147
4148 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
4149 "dummy channel switch op\n");
4150 }
4151
iwl_mvm_pre_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * chsw)4152 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
4153 struct ieee80211_vif *vif,
4154 struct ieee80211_channel_switch *chsw)
4155 {
4156 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4157 struct ieee80211_vif *csa_vif;
4158 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4159 u32 apply_time;
4160 int ret;
4161
4162 mutex_lock(&mvm->mutex);
4163
4164 mvmvif->csa_failed = false;
4165
4166 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
4167 chsw->chandef.center_freq1);
4168
4169 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
4170 ieee80211_vif_to_wdev(vif),
4171 FW_DBG_TRIGGER_CHANNEL_SWITCH);
4172
4173 switch (vif->type) {
4174 case NL80211_IFTYPE_AP:
4175 csa_vif =
4176 rcu_dereference_protected(mvm->csa_vif,
4177 lockdep_is_held(&mvm->mutex));
4178 if (WARN_ONCE(csa_vif && csa_vif->csa_active,
4179 "Another CSA is already in progress")) {
4180 ret = -EBUSY;
4181 goto out_unlock;
4182 }
4183
4184 /* we still didn't unblock tx. prevent new CS meanwhile */
4185 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
4186 lockdep_is_held(&mvm->mutex))) {
4187 ret = -EBUSY;
4188 goto out_unlock;
4189 }
4190
4191 rcu_assign_pointer(mvm->csa_vif, vif);
4192
4193 if (WARN_ONCE(mvmvif->csa_countdown,
4194 "Previous CSA countdown didn't complete")) {
4195 ret = -EBUSY;
4196 goto out_unlock;
4197 }
4198
4199 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
4200
4201 break;
4202 case NL80211_IFTYPE_STATION:
4203 /* Schedule the time event to a bit before beacon 1,
4204 * to make sure we're in the new channel when the
4205 * GO/AP arrives. In case count <= 1 immediately schedule the
4206 * TE (this might result with some packet loss or connection
4207 * loss).
4208 */
4209 if (chsw->count <= 1)
4210 apply_time = 0;
4211 else
4212 apply_time = chsw->device_timestamp +
4213 ((vif->bss_conf.beacon_int * (chsw->count - 1) -
4214 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
4215
4216 if (chsw->block_tx)
4217 iwl_mvm_csa_client_absent(mvm, vif);
4218
4219 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
4220 apply_time);
4221 if (mvmvif->bf_data.bf_enabled) {
4222 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
4223 if (ret)
4224 goto out_unlock;
4225 }
4226
4227 break;
4228 default:
4229 break;
4230 }
4231
4232 mvmvif->ps_disabled = true;
4233
4234 ret = iwl_mvm_power_update_ps(mvm);
4235 if (ret)
4236 goto out_unlock;
4237
4238 /* we won't be on this channel any longer */
4239 iwl_mvm_teardown_tdls_peers(mvm);
4240
4241 out_unlock:
4242 mutex_unlock(&mvm->mutex);
4243
4244 return ret;
4245 }
4246
iwl_mvm_post_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4247 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
4248 struct ieee80211_vif *vif)
4249 {
4250 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4251 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4252 int ret;
4253
4254 mutex_lock(&mvm->mutex);
4255
4256 if (mvmvif->csa_failed) {
4257 mvmvif->csa_failed = false;
4258 ret = -EIO;
4259 goto out_unlock;
4260 }
4261
4262 if (vif->type == NL80211_IFTYPE_STATION) {
4263 struct iwl_mvm_sta *mvmsta;
4264
4265 mvmvif->csa_bcn_pending = false;
4266 mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
4267 mvmvif->ap_sta_id);
4268
4269 if (WARN_ON(!mvmsta)) {
4270 ret = -EIO;
4271 goto out_unlock;
4272 }
4273
4274 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
4275
4276 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
4277
4278 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
4279 if (ret)
4280 goto out_unlock;
4281
4282 iwl_mvm_stop_session_protection(mvm, vif);
4283 }
4284
4285 mvmvif->ps_disabled = false;
4286
4287 ret = iwl_mvm_power_update_ps(mvm);
4288
4289 out_unlock:
4290 mutex_unlock(&mvm->mutex);
4291
4292 return ret;
4293 }
4294
iwl_mvm_flush_no_vif(struct iwl_mvm * mvm,u32 queues,bool drop)4295 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
4296 {
4297 int i;
4298
4299 if (!iwl_mvm_has_new_tx_api(mvm)) {
4300 if (drop) {
4301 mutex_lock(&mvm->mutex);
4302 iwl_mvm_flush_tx_path(mvm,
4303 iwl_mvm_flushable_queues(mvm) & queues, 0);
4304 mutex_unlock(&mvm->mutex);
4305 } else {
4306 iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
4307 }
4308 return;
4309 }
4310
4311 mutex_lock(&mvm->mutex);
4312 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4313 struct ieee80211_sta *sta;
4314
4315 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4316 lockdep_is_held(&mvm->mutex));
4317 if (IS_ERR_OR_NULL(sta))
4318 continue;
4319
4320 if (drop)
4321 iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
4322 else
4323 iwl_mvm_wait_sta_queues_empty(mvm,
4324 iwl_mvm_sta_from_mac80211(sta));
4325 }
4326 mutex_unlock(&mvm->mutex);
4327 }
4328
iwl_mvm_mac_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)4329 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
4330 struct ieee80211_vif *vif, u32 queues, bool drop)
4331 {
4332 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4333 struct iwl_mvm_vif *mvmvif;
4334 struct iwl_mvm_sta *mvmsta;
4335 struct ieee80211_sta *sta;
4336 int i;
4337 u32 msk = 0;
4338
4339 if (!vif) {
4340 iwl_mvm_flush_no_vif(mvm, queues, drop);
4341 return;
4342 }
4343
4344 if (vif->type != NL80211_IFTYPE_STATION)
4345 return;
4346
4347 /* Make sure we're done with the deferred traffic before flushing */
4348 flush_work(&mvm->add_stream_wk);
4349
4350 mutex_lock(&mvm->mutex);
4351 mvmvif = iwl_mvm_vif_from_mac80211(vif);
4352
4353 /* flush the AP-station and all TDLS peers */
4354 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4355 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
4356 lockdep_is_held(&mvm->mutex));
4357 if (IS_ERR_OR_NULL(sta))
4358 continue;
4359
4360 mvmsta = iwl_mvm_sta_from_mac80211(sta);
4361 if (mvmsta->vif != vif)
4362 continue;
4363
4364 /* make sure only TDLS peers or the AP are flushed */
4365 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
4366
4367 if (drop) {
4368 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
4369 IWL_ERR(mvm, "flush request fail\n");
4370 } else {
4371 msk |= mvmsta->tfd_queue_msk;
4372 if (iwl_mvm_has_new_tx_api(mvm))
4373 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
4374 }
4375 }
4376
4377 mutex_unlock(&mvm->mutex);
4378
4379 /* this can take a while, and we may need/want other operations
4380 * to succeed while doing this, so do it without the mutex held
4381 */
4382 if (!drop && !iwl_mvm_has_new_tx_api(mvm))
4383 iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
4384 }
4385
iwl_mvm_mac_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4386 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
4387 struct survey_info *survey)
4388 {
4389 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4390 int ret;
4391
4392 memset(survey, 0, sizeof(*survey));
4393
4394 /* only support global statistics right now */
4395 if (idx != 0)
4396 return -ENOENT;
4397
4398 if (!fw_has_capa(&mvm->fw->ucode_capa,
4399 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
4400 return -ENOENT;
4401
4402 mutex_lock(&mvm->mutex);
4403
4404 if (iwl_mvm_firmware_running(mvm)) {
4405 ret = iwl_mvm_request_statistics(mvm, false);
4406 if (ret)
4407 goto out;
4408 }
4409
4410 survey->filled = SURVEY_INFO_TIME |
4411 SURVEY_INFO_TIME_RX |
4412 SURVEY_INFO_TIME_TX |
4413 SURVEY_INFO_TIME_SCAN;
4414 survey->time = mvm->accu_radio_stats.on_time_rf +
4415 mvm->radio_stats.on_time_rf;
4416 do_div(survey->time, USEC_PER_MSEC);
4417
4418 survey->time_rx = mvm->accu_radio_stats.rx_time +
4419 mvm->radio_stats.rx_time;
4420 do_div(survey->time_rx, USEC_PER_MSEC);
4421
4422 survey->time_tx = mvm->accu_radio_stats.tx_time +
4423 mvm->radio_stats.tx_time;
4424 do_div(survey->time_tx, USEC_PER_MSEC);
4425
4426 survey->time_scan = mvm->accu_radio_stats.on_time_scan +
4427 mvm->radio_stats.on_time_scan;
4428 do_div(survey->time_scan, USEC_PER_MSEC);
4429
4430 ret = 0;
4431 out:
4432 mutex_unlock(&mvm->mutex);
4433 return ret;
4434 }
4435
iwl_mvm_mac_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)4436 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
4437 struct ieee80211_vif *vif,
4438 struct ieee80211_sta *sta,
4439 struct station_info *sinfo)
4440 {
4441 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4442 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4443 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
4444
4445 if (mvmsta->avg_energy) {
4446 sinfo->signal_avg = mvmsta->avg_energy;
4447 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
4448 }
4449
4450 /* if beacon filtering isn't on mac80211 does it anyway */
4451 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
4452 return;
4453
4454 if (!vif->bss_conf.assoc)
4455 return;
4456
4457 mutex_lock(&mvm->mutex);
4458
4459 if (mvmvif->ap_sta_id != mvmsta->sta_id)
4460 goto unlock;
4461
4462 if (iwl_mvm_request_statistics(mvm, false))
4463 goto unlock;
4464
4465 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
4466 mvmvif->beacon_stats.accu_num_beacons;
4467 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
4468 if (mvmvif->beacon_stats.avg_signal) {
4469 /* firmware only reports a value after RXing a few beacons */
4470 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
4471 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
4472 }
4473 unlock:
4474 mutex_unlock(&mvm->mutex);
4475 }
4476
iwl_mvm_event_mlme_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4477 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
4478 struct ieee80211_vif *vif,
4479 const struct ieee80211_event *event)
4480 {
4481 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \
4482 do { \
4483 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \
4484 break; \
4485 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \
4486 } while (0)
4487
4488 struct iwl_fw_dbg_trigger_tlv *trig;
4489 struct iwl_fw_dbg_trigger_mlme *trig_mlme;
4490
4491 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
4492 return;
4493
4494 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
4495 trig_mlme = (void *)trig->data;
4496 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
4497 ieee80211_vif_to_wdev(vif), trig))
4498 return;
4499
4500 if (event->u.mlme.data == ASSOC_EVENT) {
4501 if (event->u.mlme.status == MLME_DENIED)
4502 CHECK_MLME_TRIGGER(stop_assoc_denied,
4503 "DENIED ASSOC: reason %d",
4504 event->u.mlme.reason);
4505 else if (event->u.mlme.status == MLME_TIMEOUT)
4506 CHECK_MLME_TRIGGER(stop_assoc_timeout,
4507 "ASSOC TIMEOUT");
4508 } else if (event->u.mlme.data == AUTH_EVENT) {
4509 if (event->u.mlme.status == MLME_DENIED)
4510 CHECK_MLME_TRIGGER(stop_auth_denied,
4511 "DENIED AUTH: reason %d",
4512 event->u.mlme.reason);
4513 else if (event->u.mlme.status == MLME_TIMEOUT)
4514 CHECK_MLME_TRIGGER(stop_auth_timeout,
4515 "AUTH TIMEOUT");
4516 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
4517 CHECK_MLME_TRIGGER(stop_rx_deauth,
4518 "DEAUTH RX %d", event->u.mlme.reason);
4519 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
4520 CHECK_MLME_TRIGGER(stop_tx_deauth,
4521 "DEAUTH TX %d", event->u.mlme.reason);
4522 }
4523 #undef CHECK_MLME_TRIGGER
4524 }
4525
iwl_mvm_event_bar_rx_callback(struct iwl_mvm * mvm,struct ieee80211_vif * vif,const struct ieee80211_event * event)4526 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
4527 struct ieee80211_vif *vif,
4528 const struct ieee80211_event *event)
4529 {
4530 struct iwl_fw_dbg_trigger_tlv *trig;
4531 struct iwl_fw_dbg_trigger_ba *ba_trig;
4532
4533 if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
4534 return;
4535
4536 trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
4537 ba_trig = (void *)trig->data;
4538 if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
4539 ieee80211_vif_to_wdev(vif), trig))
4540 return;
4541
4542 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
4543 return;
4544
4545 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
4546 "BAR received from %pM, tid %d, ssn %d",
4547 event->u.ba.sta->addr, event->u.ba.tid,
4548 event->u.ba.ssn);
4549 }
4550
iwl_mvm_mac_event_callback(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct ieee80211_event * event)4551 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
4552 struct ieee80211_vif *vif,
4553 const struct ieee80211_event *event)
4554 {
4555 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4556
4557 switch (event->type) {
4558 case MLME_EVENT:
4559 iwl_mvm_event_mlme_callback(mvm, vif, event);
4560 break;
4561 case BAR_RX_EVENT:
4562 iwl_mvm_event_bar_rx_callback(mvm, vif, event);
4563 break;
4564 case BA_FRAME_TIMEOUT:
4565 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
4566 event->u.ba.tid);
4567 break;
4568 default:
4569 break;
4570 }
4571 }
4572
iwl_mvm_sync_rx_queues_internal(struct iwl_mvm * mvm,struct iwl_mvm_internal_rxq_notif * notif,u32 size)4573 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
4574 struct iwl_mvm_internal_rxq_notif *notif,
4575 u32 size)
4576 {
4577 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
4578 int ret;
4579
4580 lockdep_assert_held(&mvm->mutex);
4581
4582 if (!iwl_mvm_has_new_rx_api(mvm))
4583 return;
4584
4585 notif->cookie = mvm->queue_sync_cookie;
4586
4587 if (notif->sync)
4588 atomic_set(&mvm->queue_sync_counter,
4589 mvm->trans->num_rx_queues);
4590
4591 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
4592 if (ret) {
4593 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
4594 goto out;
4595 }
4596
4597 if (notif->sync) {
4598 ret = wait_event_timeout(mvm->rx_sync_waitq,
4599 atomic_read(&mvm->queue_sync_counter) == 0 ||
4600 iwl_mvm_is_radio_killed(mvm),
4601 HZ);
4602 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
4603 }
4604
4605 out:
4606 atomic_set(&mvm->queue_sync_counter, 0);
4607 mvm->queue_sync_cookie++;
4608 }
4609
iwl_mvm_sync_rx_queues(struct ieee80211_hw * hw)4610 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
4611 {
4612 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
4613 struct iwl_mvm_internal_rxq_notif data = {
4614 .type = IWL_MVM_RXQ_EMPTY,
4615 .sync = 1,
4616 };
4617
4618 mutex_lock(&mvm->mutex);
4619 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
4620 mutex_unlock(&mvm->mutex);
4621 }
4622
4623 const struct ieee80211_ops iwl_mvm_hw_ops = {
4624 .tx = iwl_mvm_mac_tx,
4625 .ampdu_action = iwl_mvm_mac_ampdu_action,
4626 .start = iwl_mvm_mac_start,
4627 .reconfig_complete = iwl_mvm_mac_reconfig_complete,
4628 .stop = iwl_mvm_mac_stop,
4629 .add_interface = iwl_mvm_mac_add_interface,
4630 .remove_interface = iwl_mvm_mac_remove_interface,
4631 .config = iwl_mvm_mac_config,
4632 .prepare_multicast = iwl_mvm_prepare_multicast,
4633 .configure_filter = iwl_mvm_configure_filter,
4634 .config_iface_filter = iwl_mvm_config_iface_filter,
4635 .bss_info_changed = iwl_mvm_bss_info_changed,
4636 .hw_scan = iwl_mvm_mac_hw_scan,
4637 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
4638 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
4639 .sta_state = iwl_mvm_mac_sta_state,
4640 .sta_notify = iwl_mvm_mac_sta_notify,
4641 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
4642 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
4643 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
4644 .sta_rc_update = iwl_mvm_sta_rc_update,
4645 .conf_tx = iwl_mvm_mac_conf_tx,
4646 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
4647 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
4648 .flush = iwl_mvm_mac_flush,
4649 .sched_scan_start = iwl_mvm_mac_sched_scan_start,
4650 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
4651 .set_key = iwl_mvm_mac_set_key,
4652 .update_tkip_key = iwl_mvm_mac_update_tkip_key,
4653 .remain_on_channel = iwl_mvm_roc,
4654 .cancel_remain_on_channel = iwl_mvm_cancel_roc,
4655 .add_chanctx = iwl_mvm_add_chanctx,
4656 .remove_chanctx = iwl_mvm_remove_chanctx,
4657 .change_chanctx = iwl_mvm_change_chanctx,
4658 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
4659 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
4660 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
4661
4662 .start_ap = iwl_mvm_start_ap_ibss,
4663 .stop_ap = iwl_mvm_stop_ap_ibss,
4664 .join_ibss = iwl_mvm_start_ap_ibss,
4665 .leave_ibss = iwl_mvm_stop_ap_ibss,
4666
4667 .tx_last_beacon = iwl_mvm_tx_last_beacon,
4668
4669 .set_tim = iwl_mvm_set_tim,
4670
4671 .channel_switch = iwl_mvm_channel_switch,
4672 .pre_channel_switch = iwl_mvm_pre_channel_switch,
4673 .post_channel_switch = iwl_mvm_post_channel_switch,
4674
4675 .tdls_channel_switch = iwl_mvm_tdls_channel_switch,
4676 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
4677 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
4678
4679 .event_callback = iwl_mvm_mac_event_callback,
4680
4681 .sync_rx_queues = iwl_mvm_sync_rx_queues,
4682
4683 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
4684
4685 #ifdef CONFIG_PM_SLEEP
4686 /* look at d3.c */
4687 .suspend = iwl_mvm_suspend,
4688 .resume = iwl_mvm_resume,
4689 .set_wakeup = iwl_mvm_set_wakeup,
4690 .set_rekey_data = iwl_mvm_set_rekey_data,
4691 #if IS_ENABLED(CONFIG_IPV6)
4692 .ipv6_addr_change = iwl_mvm_ipv6_addr_change,
4693 #endif
4694 .set_default_unicast_key = iwl_mvm_set_default_unicast_key,
4695 #endif
4696 .get_survey = iwl_mvm_mac_get_survey,
4697 .sta_statistics = iwl_mvm_mac_sta_statistics,
4698 #ifdef CONFIG_IWLWIFI_DEBUGFS
4699 .sta_add_debugfs = iwl_mvm_sta_add_debugfs,
4700 #endif
4701 };
4702