1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include "mac.h"
19
20 #include <net/cfg80211.h>
21 #include <net/mac80211.h>
22 #include <linux/etherdevice.h>
23 #include <linux/acpi.h>
24
25 #include "hif.h"
26 #include "core.h"
27 #include "debug.h"
28 #include "wmi.h"
29 #include "htt.h"
30 #include "txrx.h"
31 #include "testmode.h"
32 #include "wmi.h"
33 #include "wmi-tlv.h"
34 #include "wmi-ops.h"
35 #include "wow.h"
36
37 /*********/
38 /* Rates */
39 /*********/
40
41 static struct ieee80211_rate ath10k_rates[] = {
42 { .bitrate = 10,
43 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
44 { .bitrate = 20,
45 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
46 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
47 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
48 { .bitrate = 55,
49 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
50 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
51 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
52 { .bitrate = 110,
53 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
54 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
55 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
56
57 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
58 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
59 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
60 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
61 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
62 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
63 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
64 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
65 };
66
67 static struct ieee80211_rate ath10k_rates_rev2[] = {
68 { .bitrate = 10,
69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
70 { .bitrate = 20,
71 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
72 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
73 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
74 { .bitrate = 55,
75 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
76 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
77 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
78 { .bitrate = 110,
79 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
80 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
81 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
82
83 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
84 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
85 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
86 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
87 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
88 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
89 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
90 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
91 };
92
93 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
94
95 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
96 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
97 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
98 #define ath10k_g_rates (ath10k_rates + 0)
99 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
100
101 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
102 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
103
ath10k_mac_bitrate_is_cck(int bitrate)104 static bool ath10k_mac_bitrate_is_cck(int bitrate)
105 {
106 switch (bitrate) {
107 case 10:
108 case 20:
109 case 55:
110 case 110:
111 return true;
112 }
113
114 return false;
115 }
116
ath10k_mac_bitrate_to_rate(int bitrate)117 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
118 {
119 return DIV_ROUND_UP(bitrate, 5) |
120 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
121 }
122
ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band * sband,u8 hw_rate,bool cck)123 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
124 u8 hw_rate, bool cck)
125 {
126 const struct ieee80211_rate *rate;
127 int i;
128
129 for (i = 0; i < sband->n_bitrates; i++) {
130 rate = &sband->bitrates[i];
131
132 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
133 continue;
134
135 if (rate->hw_value == hw_rate)
136 return i;
137 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
138 rate->hw_value_short == hw_rate)
139 return i;
140 }
141
142 return 0;
143 }
144
ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band * sband,u32 bitrate)145 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
146 u32 bitrate)
147 {
148 int i;
149
150 for (i = 0; i < sband->n_bitrates; i++)
151 if (sband->bitrates[i].bitrate == bitrate)
152 return i;
153
154 return 0;
155 }
156
ath10k_mac_get_max_vht_mcs_map(u16 mcs_map,int nss)157 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
158 {
159 switch ((mcs_map >> (2 * nss)) & 0x3) {
160 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
161 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
162 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
163 }
164 return 0;
165 }
166
167 static u32
ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])168 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
169 {
170 int nss;
171
172 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
173 if (ht_mcs_mask[nss])
174 return nss + 1;
175
176 return 1;
177 }
178
179 static u32
ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])180 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
181 {
182 int nss;
183
184 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
185 if (vht_mcs_mask[nss])
186 return nss + 1;
187
188 return 1;
189 }
190
ath10k_mac_ext_resource_config(struct ath10k * ar,u32 val)191 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
192 {
193 enum wmi_host_platform_type platform_type;
194 int ret;
195
196 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
197 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
198 else
199 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
200
201 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
202
203 if (ret && ret != -EOPNOTSUPP) {
204 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
205 return ret;
206 }
207
208 return 0;
209 }
210
211 /**********/
212 /* Crypto */
213 /**********/
214
ath10k_send_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)215 static int ath10k_send_key(struct ath10k_vif *arvif,
216 struct ieee80211_key_conf *key,
217 enum set_key_cmd cmd,
218 const u8 *macaddr, u32 flags)
219 {
220 struct ath10k *ar = arvif->ar;
221 struct wmi_vdev_install_key_arg arg = {
222 .vdev_id = arvif->vdev_id,
223 .key_idx = key->keyidx,
224 .key_len = key->keylen,
225 .key_data = key->key,
226 .key_flags = flags,
227 .macaddr = macaddr,
228 };
229
230 lockdep_assert_held(&arvif->ar->conf_mutex);
231
232 switch (key->cipher) {
233 case WLAN_CIPHER_SUITE_CCMP:
234 arg.key_cipher = WMI_CIPHER_AES_CCM;
235 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
236 break;
237 case WLAN_CIPHER_SUITE_TKIP:
238 arg.key_cipher = WMI_CIPHER_TKIP;
239 arg.key_txmic_len = 8;
240 arg.key_rxmic_len = 8;
241 break;
242 case WLAN_CIPHER_SUITE_WEP40:
243 case WLAN_CIPHER_SUITE_WEP104:
244 arg.key_cipher = WMI_CIPHER_WEP;
245 break;
246 case WLAN_CIPHER_SUITE_AES_CMAC:
247 WARN_ON(1);
248 return -EINVAL;
249 default:
250 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
251 return -EOPNOTSUPP;
252 }
253
254 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
255 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
256
257 if (cmd == DISABLE_KEY) {
258 arg.key_cipher = WMI_CIPHER_NONE;
259 arg.key_data = NULL;
260 }
261
262 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
263 }
264
ath10k_install_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)265 static int ath10k_install_key(struct ath10k_vif *arvif,
266 struct ieee80211_key_conf *key,
267 enum set_key_cmd cmd,
268 const u8 *macaddr, u32 flags)
269 {
270 struct ath10k *ar = arvif->ar;
271 int ret;
272 unsigned long time_left;
273
274 lockdep_assert_held(&ar->conf_mutex);
275
276 reinit_completion(&ar->install_key_done);
277
278 if (arvif->nohwcrypt)
279 return 1;
280
281 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
282 if (ret)
283 return ret;
284
285 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
286 if (time_left == 0)
287 return -ETIMEDOUT;
288
289 return 0;
290 }
291
ath10k_install_peer_wep_keys(struct ath10k_vif * arvif,const u8 * addr)292 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
293 const u8 *addr)
294 {
295 struct ath10k *ar = arvif->ar;
296 struct ath10k_peer *peer;
297 int ret;
298 int i;
299 u32 flags;
300
301 lockdep_assert_held(&ar->conf_mutex);
302
303 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
304 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
305 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
306 return -EINVAL;
307
308 spin_lock_bh(&ar->data_lock);
309 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
310 spin_unlock_bh(&ar->data_lock);
311
312 if (!peer)
313 return -ENOENT;
314
315 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
316 if (arvif->wep_keys[i] == NULL)
317 continue;
318
319 switch (arvif->vif->type) {
320 case NL80211_IFTYPE_AP:
321 flags = WMI_KEY_PAIRWISE;
322
323 if (arvif->def_wep_key_idx == i)
324 flags |= WMI_KEY_TX_USAGE;
325
326 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
327 SET_KEY, addr, flags);
328 if (ret < 0)
329 return ret;
330 break;
331 case NL80211_IFTYPE_ADHOC:
332 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
333 SET_KEY, addr,
334 WMI_KEY_PAIRWISE);
335 if (ret < 0)
336 return ret;
337
338 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
339 SET_KEY, addr, WMI_KEY_GROUP);
340 if (ret < 0)
341 return ret;
342 break;
343 default:
344 WARN_ON(1);
345 return -EINVAL;
346 }
347
348 spin_lock_bh(&ar->data_lock);
349 peer->keys[i] = arvif->wep_keys[i];
350 spin_unlock_bh(&ar->data_lock);
351 }
352
353 /* In some cases (notably with static WEP IBSS with multiple keys)
354 * multicast Tx becomes broken. Both pairwise and groupwise keys are
355 * installed already. Using WMI_KEY_TX_USAGE in different combinations
356 * didn't seem help. Using def_keyid vdev parameter seems to be
357 * effective so use that.
358 *
359 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
360 */
361 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
362 return 0;
363
364 if (arvif->def_wep_key_idx == -1)
365 return 0;
366
367 ret = ath10k_wmi_vdev_set_param(arvif->ar,
368 arvif->vdev_id,
369 arvif->ar->wmi.vdev_param->def_keyid,
370 arvif->def_wep_key_idx);
371 if (ret) {
372 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
373 arvif->vdev_id, ret);
374 return ret;
375 }
376
377 return 0;
378 }
379
ath10k_clear_peer_keys(struct ath10k_vif * arvif,const u8 * addr)380 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
381 const u8 *addr)
382 {
383 struct ath10k *ar = arvif->ar;
384 struct ath10k_peer *peer;
385 int first_errno = 0;
386 int ret;
387 int i;
388 u32 flags = 0;
389
390 lockdep_assert_held(&ar->conf_mutex);
391
392 spin_lock_bh(&ar->data_lock);
393 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
394 spin_unlock_bh(&ar->data_lock);
395
396 if (!peer)
397 return -ENOENT;
398
399 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
400 if (peer->keys[i] == NULL)
401 continue;
402
403 /* key flags are not required to delete the key */
404 ret = ath10k_install_key(arvif, peer->keys[i],
405 DISABLE_KEY, addr, flags);
406 if (ret < 0 && first_errno == 0)
407 first_errno = ret;
408
409 if (ret < 0)
410 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
411 i, ret);
412
413 spin_lock_bh(&ar->data_lock);
414 peer->keys[i] = NULL;
415 spin_unlock_bh(&ar->data_lock);
416 }
417
418 return first_errno;
419 }
420
ath10k_mac_is_peer_wep_key_set(struct ath10k * ar,const u8 * addr,u8 keyidx)421 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
422 u8 keyidx)
423 {
424 struct ath10k_peer *peer;
425 int i;
426
427 lockdep_assert_held(&ar->data_lock);
428
429 /* We don't know which vdev this peer belongs to,
430 * since WMI doesn't give us that information.
431 *
432 * FIXME: multi-bss needs to be handled.
433 */
434 peer = ath10k_peer_find(ar, 0, addr);
435 if (!peer)
436 return false;
437
438 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
439 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
440 return true;
441 }
442
443 return false;
444 }
445
ath10k_clear_vdev_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)446 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
447 struct ieee80211_key_conf *key)
448 {
449 struct ath10k *ar = arvif->ar;
450 struct ath10k_peer *peer;
451 u8 addr[ETH_ALEN];
452 int first_errno = 0;
453 int ret;
454 int i;
455 u32 flags = 0;
456
457 lockdep_assert_held(&ar->conf_mutex);
458
459 for (;;) {
460 /* since ath10k_install_key we can't hold data_lock all the
461 * time, so we try to remove the keys incrementally
462 */
463 spin_lock_bh(&ar->data_lock);
464 i = 0;
465 list_for_each_entry(peer, &ar->peers, list) {
466 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
467 if (peer->keys[i] == key) {
468 ether_addr_copy(addr, peer->addr);
469 peer->keys[i] = NULL;
470 break;
471 }
472 }
473
474 if (i < ARRAY_SIZE(peer->keys))
475 break;
476 }
477 spin_unlock_bh(&ar->data_lock);
478
479 if (i == ARRAY_SIZE(peer->keys))
480 break;
481 /* key flags are not required to delete the key */
482 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
483 if (ret < 0 && first_errno == 0)
484 first_errno = ret;
485
486 if (ret)
487 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
488 addr, ret);
489 }
490
491 return first_errno;
492 }
493
ath10k_mac_vif_update_wep_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)494 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
495 struct ieee80211_key_conf *key)
496 {
497 struct ath10k *ar = arvif->ar;
498 struct ath10k_peer *peer;
499 int ret;
500
501 lockdep_assert_held(&ar->conf_mutex);
502
503 list_for_each_entry(peer, &ar->peers, list) {
504 if (ether_addr_equal(peer->addr, arvif->vif->addr))
505 continue;
506
507 if (ether_addr_equal(peer->addr, arvif->bssid))
508 continue;
509
510 if (peer->keys[key->keyidx] == key)
511 continue;
512
513 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
514 arvif->vdev_id, key->keyidx);
515
516 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
517 if (ret) {
518 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
519 arvif->vdev_id, peer->addr, ret);
520 return ret;
521 }
522 }
523
524 return 0;
525 }
526
527 /*********************/
528 /* General utilities */
529 /*********************/
530
531 static inline enum wmi_phy_mode
chan_to_phymode(const struct cfg80211_chan_def * chandef)532 chan_to_phymode(const struct cfg80211_chan_def *chandef)
533 {
534 enum wmi_phy_mode phymode = MODE_UNKNOWN;
535
536 switch (chandef->chan->band) {
537 case NL80211_BAND_2GHZ:
538 switch (chandef->width) {
539 case NL80211_CHAN_WIDTH_20_NOHT:
540 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
541 phymode = MODE_11B;
542 else
543 phymode = MODE_11G;
544 break;
545 case NL80211_CHAN_WIDTH_20:
546 phymode = MODE_11NG_HT20;
547 break;
548 case NL80211_CHAN_WIDTH_40:
549 phymode = MODE_11NG_HT40;
550 break;
551 case NL80211_CHAN_WIDTH_5:
552 case NL80211_CHAN_WIDTH_10:
553 case NL80211_CHAN_WIDTH_80:
554 case NL80211_CHAN_WIDTH_80P80:
555 case NL80211_CHAN_WIDTH_160:
556 phymode = MODE_UNKNOWN;
557 break;
558 }
559 break;
560 case NL80211_BAND_5GHZ:
561 switch (chandef->width) {
562 case NL80211_CHAN_WIDTH_20_NOHT:
563 phymode = MODE_11A;
564 break;
565 case NL80211_CHAN_WIDTH_20:
566 phymode = MODE_11NA_HT20;
567 break;
568 case NL80211_CHAN_WIDTH_40:
569 phymode = MODE_11NA_HT40;
570 break;
571 case NL80211_CHAN_WIDTH_80:
572 phymode = MODE_11AC_VHT80;
573 break;
574 case NL80211_CHAN_WIDTH_160:
575 phymode = MODE_11AC_VHT160;
576 break;
577 case NL80211_CHAN_WIDTH_80P80:
578 phymode = MODE_11AC_VHT80_80;
579 break;
580 case NL80211_CHAN_WIDTH_5:
581 case NL80211_CHAN_WIDTH_10:
582 phymode = MODE_UNKNOWN;
583 break;
584 }
585 break;
586 default:
587 break;
588 }
589
590 WARN_ON(phymode == MODE_UNKNOWN);
591 return phymode;
592 }
593
ath10k_parse_mpdudensity(u8 mpdudensity)594 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
595 {
596 /*
597 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
598 * 0 for no restriction
599 * 1 for 1/4 us
600 * 2 for 1/2 us
601 * 3 for 1 us
602 * 4 for 2 us
603 * 5 for 4 us
604 * 6 for 8 us
605 * 7 for 16 us
606 */
607 switch (mpdudensity) {
608 case 0:
609 return 0;
610 case 1:
611 case 2:
612 case 3:
613 /* Our lower layer calculations limit our precision to
614 * 1 microsecond
615 */
616 return 1;
617 case 4:
618 return 2;
619 case 5:
620 return 4;
621 case 6:
622 return 8;
623 case 7:
624 return 16;
625 default:
626 return 0;
627 }
628 }
629
ath10k_mac_vif_chan(struct ieee80211_vif * vif,struct cfg80211_chan_def * def)630 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
631 struct cfg80211_chan_def *def)
632 {
633 struct ieee80211_chanctx_conf *conf;
634
635 rcu_read_lock();
636 conf = rcu_dereference(vif->chanctx_conf);
637 if (!conf) {
638 rcu_read_unlock();
639 return -ENOENT;
640 }
641
642 *def = conf->def;
643 rcu_read_unlock();
644
645 return 0;
646 }
647
ath10k_mac_num_chanctxs_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)648 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
649 struct ieee80211_chanctx_conf *conf,
650 void *data)
651 {
652 int *num = data;
653
654 (*num)++;
655 }
656
ath10k_mac_num_chanctxs(struct ath10k * ar)657 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
658 {
659 int num = 0;
660
661 ieee80211_iter_chan_contexts_atomic(ar->hw,
662 ath10k_mac_num_chanctxs_iter,
663 &num);
664
665 return num;
666 }
667
668 static void
ath10k_mac_get_any_chandef_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)669 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
670 struct ieee80211_chanctx_conf *conf,
671 void *data)
672 {
673 struct cfg80211_chan_def **def = data;
674
675 *def = &conf->def;
676 }
677
ath10k_peer_create(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 vdev_id,const u8 * addr,enum wmi_peer_type peer_type)678 static int ath10k_peer_create(struct ath10k *ar,
679 struct ieee80211_vif *vif,
680 struct ieee80211_sta *sta,
681 u32 vdev_id,
682 const u8 *addr,
683 enum wmi_peer_type peer_type)
684 {
685 struct ath10k_vif *arvif;
686 struct ath10k_peer *peer;
687 int num_peers = 0;
688 int ret;
689
690 lockdep_assert_held(&ar->conf_mutex);
691
692 num_peers = ar->num_peers;
693
694 /* Each vdev consumes a peer entry as well */
695 list_for_each_entry(arvif, &ar->arvifs, list)
696 num_peers++;
697
698 if (num_peers >= ar->max_num_peers)
699 return -ENOBUFS;
700
701 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
702 if (ret) {
703 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
704 addr, vdev_id, ret);
705 return ret;
706 }
707
708 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
709 if (ret) {
710 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
711 addr, vdev_id, ret);
712 return ret;
713 }
714
715 spin_lock_bh(&ar->data_lock);
716
717 peer = ath10k_peer_find(ar, vdev_id, addr);
718 if (!peer) {
719 spin_unlock_bh(&ar->data_lock);
720 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
721 addr, vdev_id);
722 ath10k_wmi_peer_delete(ar, vdev_id, addr);
723 return -ENOENT;
724 }
725
726 peer->vif = vif;
727 peer->sta = sta;
728
729 spin_unlock_bh(&ar->data_lock);
730
731 ar->num_peers++;
732
733 return 0;
734 }
735
ath10k_mac_set_kickout(struct ath10k_vif * arvif)736 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
737 {
738 struct ath10k *ar = arvif->ar;
739 u32 param;
740 int ret;
741
742 param = ar->wmi.pdev_param->sta_kickout_th;
743 ret = ath10k_wmi_pdev_set_param(ar, param,
744 ATH10K_KICKOUT_THRESHOLD);
745 if (ret) {
746 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
747 arvif->vdev_id, ret);
748 return ret;
749 }
750
751 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
752 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
753 ATH10K_KEEPALIVE_MIN_IDLE);
754 if (ret) {
755 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
756 arvif->vdev_id, ret);
757 return ret;
758 }
759
760 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
761 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
762 ATH10K_KEEPALIVE_MAX_IDLE);
763 if (ret) {
764 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
765 arvif->vdev_id, ret);
766 return ret;
767 }
768
769 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
770 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
771 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
772 if (ret) {
773 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
774 arvif->vdev_id, ret);
775 return ret;
776 }
777
778 return 0;
779 }
780
ath10k_mac_set_rts(struct ath10k_vif * arvif,u32 value)781 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
782 {
783 struct ath10k *ar = arvif->ar;
784 u32 vdev_param;
785
786 vdev_param = ar->wmi.vdev_param->rts_threshold;
787 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
788 }
789
ath10k_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 * addr)790 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
791 {
792 int ret;
793
794 lockdep_assert_held(&ar->conf_mutex);
795
796 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
797 if (ret)
798 return ret;
799
800 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
801 if (ret)
802 return ret;
803
804 ar->num_peers--;
805
806 return 0;
807 }
808
ath10k_peer_cleanup(struct ath10k * ar,u32 vdev_id)809 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
810 {
811 struct ath10k_peer *peer, *tmp;
812 int peer_id;
813 int i;
814
815 lockdep_assert_held(&ar->conf_mutex);
816
817 spin_lock_bh(&ar->data_lock);
818 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
819 if (peer->vdev_id != vdev_id)
820 continue;
821
822 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
823 peer->addr, vdev_id);
824
825 for_each_set_bit(peer_id, peer->peer_ids,
826 ATH10K_MAX_NUM_PEER_IDS) {
827 ar->peer_map[peer_id] = NULL;
828 }
829
830 /* Double check that peer is properly un-referenced from
831 * the peer_map
832 */
833 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
834 if (ar->peer_map[i] == peer) {
835 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
836 peer->addr, peer, i);
837 ar->peer_map[i] = NULL;
838 }
839 }
840
841 list_del(&peer->list);
842 kfree(peer);
843 ar->num_peers--;
844 }
845 spin_unlock_bh(&ar->data_lock);
846 }
847
ath10k_peer_cleanup_all(struct ath10k * ar)848 static void ath10k_peer_cleanup_all(struct ath10k *ar)
849 {
850 struct ath10k_peer *peer, *tmp;
851 int i;
852
853 lockdep_assert_held(&ar->conf_mutex);
854
855 spin_lock_bh(&ar->data_lock);
856 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
857 list_del(&peer->list);
858 kfree(peer);
859 }
860
861 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
862 ar->peer_map[i] = NULL;
863
864 spin_unlock_bh(&ar->data_lock);
865
866 ar->num_peers = 0;
867 ar->num_stations = 0;
868 }
869
ath10k_mac_tdls_peer_update(struct ath10k * ar,u32 vdev_id,struct ieee80211_sta * sta,enum wmi_tdls_peer_state state)870 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
871 struct ieee80211_sta *sta,
872 enum wmi_tdls_peer_state state)
873 {
874 int ret;
875 struct wmi_tdls_peer_update_cmd_arg arg = {};
876 struct wmi_tdls_peer_capab_arg cap = {};
877 struct wmi_channel_arg chan_arg = {};
878
879 lockdep_assert_held(&ar->conf_mutex);
880
881 arg.vdev_id = vdev_id;
882 arg.peer_state = state;
883 ether_addr_copy(arg.addr, sta->addr);
884
885 cap.peer_max_sp = sta->max_sp;
886 cap.peer_uapsd_queues = sta->uapsd_queues;
887
888 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
889 !sta->tdls_initiator)
890 cap.is_peer_responder = 1;
891
892 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
893 if (ret) {
894 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
895 arg.addr, vdev_id, ret);
896 return ret;
897 }
898
899 return 0;
900 }
901
902 /************************/
903 /* Interface management */
904 /************************/
905
ath10k_mac_vif_beacon_free(struct ath10k_vif * arvif)906 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
907 {
908 struct ath10k *ar = arvif->ar;
909
910 lockdep_assert_held(&ar->data_lock);
911
912 if (!arvif->beacon)
913 return;
914
915 if (!arvif->beacon_buf)
916 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
917 arvif->beacon->len, DMA_TO_DEVICE);
918
919 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
920 arvif->beacon_state != ATH10K_BEACON_SENT))
921 return;
922
923 dev_kfree_skb_any(arvif->beacon);
924
925 arvif->beacon = NULL;
926 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
927 }
928
ath10k_mac_vif_beacon_cleanup(struct ath10k_vif * arvif)929 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
930 {
931 struct ath10k *ar = arvif->ar;
932
933 lockdep_assert_held(&ar->data_lock);
934
935 ath10k_mac_vif_beacon_free(arvif);
936
937 if (arvif->beacon_buf) {
938 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
939 arvif->beacon_buf, arvif->beacon_paddr);
940 arvif->beacon_buf = NULL;
941 }
942 }
943
ath10k_vdev_setup_sync(struct ath10k * ar)944 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
945 {
946 unsigned long time_left;
947
948 lockdep_assert_held(&ar->conf_mutex);
949
950 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
951 return -ESHUTDOWN;
952
953 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
954 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
955 if (time_left == 0)
956 return -ETIMEDOUT;
957
958 return ar->last_wmi_vdev_start_status;
959 }
960
ath10k_monitor_vdev_start(struct ath10k * ar,int vdev_id)961 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
962 {
963 struct cfg80211_chan_def *chandef = NULL;
964 struct ieee80211_channel *channel = NULL;
965 struct wmi_vdev_start_request_arg arg = {};
966 int ret = 0;
967
968 lockdep_assert_held(&ar->conf_mutex);
969
970 ieee80211_iter_chan_contexts_atomic(ar->hw,
971 ath10k_mac_get_any_chandef_iter,
972 &chandef);
973 if (WARN_ON_ONCE(!chandef))
974 return -ENOENT;
975
976 channel = chandef->chan;
977
978 arg.vdev_id = vdev_id;
979 arg.channel.freq = channel->center_freq;
980 arg.channel.band_center_freq1 = chandef->center_freq1;
981 arg.channel.band_center_freq2 = chandef->center_freq2;
982
983 /* TODO setup this dynamically, what in case we
984 * don't have any vifs?
985 */
986 arg.channel.mode = chan_to_phymode(chandef);
987 arg.channel.chan_radar =
988 !!(channel->flags & IEEE80211_CHAN_RADAR);
989
990 arg.channel.min_power = 0;
991 arg.channel.max_power = channel->max_power * 2;
992 arg.channel.max_reg_power = channel->max_reg_power * 2;
993 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
994
995 reinit_completion(&ar->vdev_setup_done);
996
997 ret = ath10k_wmi_vdev_start(ar, &arg);
998 if (ret) {
999 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
1000 vdev_id, ret);
1001 return ret;
1002 }
1003
1004 ret = ath10k_vdev_setup_sync(ar);
1005 if (ret) {
1006 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
1007 vdev_id, ret);
1008 return ret;
1009 }
1010
1011 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1012 if (ret) {
1013 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1014 vdev_id, ret);
1015 goto vdev_stop;
1016 }
1017
1018 ar->monitor_vdev_id = vdev_id;
1019
1020 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1021 ar->monitor_vdev_id);
1022 return 0;
1023
1024 vdev_stop:
1025 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1026 if (ret)
1027 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1028 ar->monitor_vdev_id, ret);
1029
1030 return ret;
1031 }
1032
ath10k_monitor_vdev_stop(struct ath10k * ar)1033 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1034 {
1035 int ret = 0;
1036
1037 lockdep_assert_held(&ar->conf_mutex);
1038
1039 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1040 if (ret)
1041 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1042 ar->monitor_vdev_id, ret);
1043
1044 reinit_completion(&ar->vdev_setup_done);
1045
1046 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1047 if (ret)
1048 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1049 ar->monitor_vdev_id, ret);
1050
1051 ret = ath10k_vdev_setup_sync(ar);
1052 if (ret)
1053 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1054 ar->monitor_vdev_id, ret);
1055
1056 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1057 ar->monitor_vdev_id);
1058 return ret;
1059 }
1060
ath10k_monitor_vdev_create(struct ath10k * ar)1061 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1062 {
1063 int bit, ret = 0;
1064
1065 lockdep_assert_held(&ar->conf_mutex);
1066
1067 if (ar->free_vdev_map == 0) {
1068 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1069 return -ENOMEM;
1070 }
1071
1072 bit = __ffs64(ar->free_vdev_map);
1073
1074 ar->monitor_vdev_id = bit;
1075
1076 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1077 WMI_VDEV_TYPE_MONITOR,
1078 0, ar->mac_addr);
1079 if (ret) {
1080 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1081 ar->monitor_vdev_id, ret);
1082 return ret;
1083 }
1084
1085 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1086 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1087 ar->monitor_vdev_id);
1088
1089 return 0;
1090 }
1091
ath10k_monitor_vdev_delete(struct ath10k * ar)1092 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1093 {
1094 int ret = 0;
1095
1096 lockdep_assert_held(&ar->conf_mutex);
1097
1098 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1099 if (ret) {
1100 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1101 ar->monitor_vdev_id, ret);
1102 return ret;
1103 }
1104
1105 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1106
1107 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1108 ar->monitor_vdev_id);
1109 return ret;
1110 }
1111
ath10k_monitor_start(struct ath10k * ar)1112 static int ath10k_monitor_start(struct ath10k *ar)
1113 {
1114 int ret;
1115
1116 lockdep_assert_held(&ar->conf_mutex);
1117
1118 ret = ath10k_monitor_vdev_create(ar);
1119 if (ret) {
1120 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1121 return ret;
1122 }
1123
1124 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1125 if (ret) {
1126 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1127 ath10k_monitor_vdev_delete(ar);
1128 return ret;
1129 }
1130
1131 ar->monitor_started = true;
1132 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1133
1134 return 0;
1135 }
1136
ath10k_monitor_stop(struct ath10k * ar)1137 static int ath10k_monitor_stop(struct ath10k *ar)
1138 {
1139 int ret;
1140
1141 lockdep_assert_held(&ar->conf_mutex);
1142
1143 ret = ath10k_monitor_vdev_stop(ar);
1144 if (ret) {
1145 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1146 return ret;
1147 }
1148
1149 ret = ath10k_monitor_vdev_delete(ar);
1150 if (ret) {
1151 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1152 return ret;
1153 }
1154
1155 ar->monitor_started = false;
1156 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1157
1158 return 0;
1159 }
1160
ath10k_mac_monitor_vdev_is_needed(struct ath10k * ar)1161 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1162 {
1163 int num_ctx;
1164
1165 /* At least one chanctx is required to derive a channel to start
1166 * monitor vdev on.
1167 */
1168 num_ctx = ath10k_mac_num_chanctxs(ar);
1169 if (num_ctx == 0)
1170 return false;
1171
1172 /* If there's already an existing special monitor interface then don't
1173 * bother creating another monitor vdev.
1174 */
1175 if (ar->monitor_arvif)
1176 return false;
1177
1178 return ar->monitor ||
1179 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1180 ar->running_fw->fw_file.fw_features) &&
1181 (ar->filter_flags & FIF_OTHER_BSS)) ||
1182 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1183 }
1184
ath10k_mac_monitor_vdev_is_allowed(struct ath10k * ar)1185 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1186 {
1187 int num_ctx;
1188
1189 num_ctx = ath10k_mac_num_chanctxs(ar);
1190
1191 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1192 * shouldn't allow this but make sure to prevent handling the following
1193 * case anyway since multi-channel DFS hasn't been tested at all.
1194 */
1195 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1196 return false;
1197
1198 return true;
1199 }
1200
ath10k_monitor_recalc(struct ath10k * ar)1201 static int ath10k_monitor_recalc(struct ath10k *ar)
1202 {
1203 bool needed;
1204 bool allowed;
1205 int ret;
1206
1207 lockdep_assert_held(&ar->conf_mutex);
1208
1209 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1210 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1211
1212 ath10k_dbg(ar, ATH10K_DBG_MAC,
1213 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1214 ar->monitor_started, needed, allowed);
1215
1216 if (WARN_ON(needed && !allowed)) {
1217 if (ar->monitor_started) {
1218 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1219
1220 ret = ath10k_monitor_stop(ar);
1221 if (ret)
1222 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1223 ret);
1224 /* not serious */
1225 }
1226
1227 return -EPERM;
1228 }
1229
1230 if (needed == ar->monitor_started)
1231 return 0;
1232
1233 if (needed)
1234 return ath10k_monitor_start(ar);
1235 else
1236 return ath10k_monitor_stop(ar);
1237 }
1238
ath10k_mac_can_set_cts_prot(struct ath10k_vif * arvif)1239 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
1240 {
1241 struct ath10k *ar = arvif->ar;
1242
1243 lockdep_assert_held(&ar->conf_mutex);
1244
1245 if (!arvif->is_started) {
1246 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
1247 return false;
1248 }
1249
1250 return true;
1251 }
1252
ath10k_mac_set_cts_prot(struct ath10k_vif * arvif)1253 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
1254 {
1255 struct ath10k *ar = arvif->ar;
1256 u32 vdev_param;
1257
1258 lockdep_assert_held(&ar->conf_mutex);
1259
1260 vdev_param = ar->wmi.vdev_param->protection_mode;
1261
1262 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
1263 arvif->vdev_id, arvif->use_cts_prot);
1264
1265 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1266 arvif->use_cts_prot ? 1 : 0);
1267 }
1268
ath10k_recalc_rtscts_prot(struct ath10k_vif * arvif)1269 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1270 {
1271 struct ath10k *ar = arvif->ar;
1272 u32 vdev_param, rts_cts = 0;
1273
1274 lockdep_assert_held(&ar->conf_mutex);
1275
1276 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1277
1278 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1279
1280 if (arvif->num_legacy_stations > 0)
1281 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1282 WMI_RTSCTS_PROFILE);
1283 else
1284 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1285 WMI_RTSCTS_PROFILE);
1286
1287 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
1288 arvif->vdev_id, rts_cts);
1289
1290 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1291 rts_cts);
1292 }
1293
ath10k_start_cac(struct ath10k * ar)1294 static int ath10k_start_cac(struct ath10k *ar)
1295 {
1296 int ret;
1297
1298 lockdep_assert_held(&ar->conf_mutex);
1299
1300 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1301
1302 ret = ath10k_monitor_recalc(ar);
1303 if (ret) {
1304 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1305 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1306 return ret;
1307 }
1308
1309 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1310 ar->monitor_vdev_id);
1311
1312 return 0;
1313 }
1314
ath10k_stop_cac(struct ath10k * ar)1315 static int ath10k_stop_cac(struct ath10k *ar)
1316 {
1317 lockdep_assert_held(&ar->conf_mutex);
1318
1319 /* CAC is not running - do nothing */
1320 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1321 return 0;
1322
1323 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1324 ath10k_monitor_stop(ar);
1325
1326 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1327
1328 return 0;
1329 }
1330
ath10k_mac_has_radar_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1331 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1332 struct ieee80211_chanctx_conf *conf,
1333 void *data)
1334 {
1335 bool *ret = data;
1336
1337 if (!*ret && conf->radar_enabled)
1338 *ret = true;
1339 }
1340
ath10k_mac_has_radar_enabled(struct ath10k * ar)1341 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1342 {
1343 bool has_radar = false;
1344
1345 ieee80211_iter_chan_contexts_atomic(ar->hw,
1346 ath10k_mac_has_radar_iter,
1347 &has_radar);
1348
1349 return has_radar;
1350 }
1351
ath10k_recalc_radar_detection(struct ath10k * ar)1352 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1353 {
1354 int ret;
1355
1356 lockdep_assert_held(&ar->conf_mutex);
1357
1358 ath10k_stop_cac(ar);
1359
1360 if (!ath10k_mac_has_radar_enabled(ar))
1361 return;
1362
1363 if (ar->num_started_vdevs > 0)
1364 return;
1365
1366 ret = ath10k_start_cac(ar);
1367 if (ret) {
1368 /*
1369 * Not possible to start CAC on current channel so starting
1370 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1371 * by indicating that radar was detected.
1372 */
1373 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1374 ieee80211_radar_detected(ar->hw);
1375 }
1376 }
1377
ath10k_vdev_stop(struct ath10k_vif * arvif)1378 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1379 {
1380 struct ath10k *ar = arvif->ar;
1381 int ret;
1382
1383 lockdep_assert_held(&ar->conf_mutex);
1384
1385 reinit_completion(&ar->vdev_setup_done);
1386
1387 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1388 if (ret) {
1389 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1390 arvif->vdev_id, ret);
1391 return ret;
1392 }
1393
1394 ret = ath10k_vdev_setup_sync(ar);
1395 if (ret) {
1396 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
1397 arvif->vdev_id, ret);
1398 return ret;
1399 }
1400
1401 WARN_ON(ar->num_started_vdevs == 0);
1402
1403 if (ar->num_started_vdevs != 0) {
1404 ar->num_started_vdevs--;
1405 ath10k_recalc_radar_detection(ar);
1406 }
1407
1408 return ret;
1409 }
1410
ath10k_vdev_start_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * chandef,bool restart)1411 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1412 const struct cfg80211_chan_def *chandef,
1413 bool restart)
1414 {
1415 struct ath10k *ar = arvif->ar;
1416 struct wmi_vdev_start_request_arg arg = {};
1417 int ret = 0;
1418
1419 lockdep_assert_held(&ar->conf_mutex);
1420
1421 reinit_completion(&ar->vdev_setup_done);
1422
1423 arg.vdev_id = arvif->vdev_id;
1424 arg.dtim_period = arvif->dtim_period;
1425 arg.bcn_intval = arvif->beacon_interval;
1426
1427 arg.channel.freq = chandef->chan->center_freq;
1428 arg.channel.band_center_freq1 = chandef->center_freq1;
1429 arg.channel.band_center_freq2 = chandef->center_freq2;
1430 arg.channel.mode = chan_to_phymode(chandef);
1431
1432 arg.channel.min_power = 0;
1433 arg.channel.max_power = chandef->chan->max_power * 2;
1434 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1435 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1436
1437 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1438 arg.ssid = arvif->u.ap.ssid;
1439 arg.ssid_len = arvif->u.ap.ssid_len;
1440 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1441
1442 /* For now allow DFS for AP mode */
1443 arg.channel.chan_radar =
1444 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1445 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1446 arg.ssid = arvif->vif->bss_conf.ssid;
1447 arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1448 }
1449
1450 ath10k_dbg(ar, ATH10K_DBG_MAC,
1451 "mac vdev %d start center_freq %d phymode %s\n",
1452 arg.vdev_id, arg.channel.freq,
1453 ath10k_wmi_phymode_str(arg.channel.mode));
1454
1455 if (restart)
1456 ret = ath10k_wmi_vdev_restart(ar, &arg);
1457 else
1458 ret = ath10k_wmi_vdev_start(ar, &arg);
1459
1460 if (ret) {
1461 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1462 arg.vdev_id, ret);
1463 return ret;
1464 }
1465
1466 ret = ath10k_vdev_setup_sync(ar);
1467 if (ret) {
1468 ath10k_warn(ar,
1469 "failed to synchronize setup for vdev %i restart %d: %d\n",
1470 arg.vdev_id, restart, ret);
1471 return ret;
1472 }
1473
1474 ar->num_started_vdevs++;
1475 ath10k_recalc_radar_detection(ar);
1476
1477 return ret;
1478 }
1479
ath10k_vdev_start(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1480 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1481 const struct cfg80211_chan_def *def)
1482 {
1483 return ath10k_vdev_start_restart(arvif, def, false);
1484 }
1485
ath10k_vdev_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1486 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1487 const struct cfg80211_chan_def *def)
1488 {
1489 return ath10k_vdev_start_restart(arvif, def, true);
1490 }
1491
ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif * arvif,struct sk_buff * bcn)1492 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1493 struct sk_buff *bcn)
1494 {
1495 struct ath10k *ar = arvif->ar;
1496 struct ieee80211_mgmt *mgmt;
1497 const u8 *p2p_ie;
1498 int ret;
1499
1500 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1501 return 0;
1502
1503 mgmt = (void *)bcn->data;
1504 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1505 mgmt->u.beacon.variable,
1506 bcn->len - (mgmt->u.beacon.variable -
1507 bcn->data));
1508 if (!p2p_ie)
1509 return -ENOENT;
1510
1511 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1512 if (ret) {
1513 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1514 arvif->vdev_id, ret);
1515 return ret;
1516 }
1517
1518 return 0;
1519 }
1520
ath10k_mac_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,size_t ie_offset)1521 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1522 u8 oui_type, size_t ie_offset)
1523 {
1524 size_t len;
1525 const u8 *next;
1526 const u8 *end;
1527 u8 *ie;
1528
1529 if (WARN_ON(skb->len < ie_offset))
1530 return -EINVAL;
1531
1532 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1533 skb->data + ie_offset,
1534 skb->len - ie_offset);
1535 if (!ie)
1536 return -ENOENT;
1537
1538 len = ie[1] + 2;
1539 end = skb->data + skb->len;
1540 next = ie + len;
1541
1542 if (WARN_ON(next > end))
1543 return -EINVAL;
1544
1545 memmove(ie, next, end - next);
1546 skb_trim(skb, skb->len - len);
1547
1548 return 0;
1549 }
1550
ath10k_mac_setup_bcn_tmpl(struct ath10k_vif * arvif)1551 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1552 {
1553 struct ath10k *ar = arvif->ar;
1554 struct ieee80211_hw *hw = ar->hw;
1555 struct ieee80211_vif *vif = arvif->vif;
1556 struct ieee80211_mutable_offsets offs = {};
1557 struct sk_buff *bcn;
1558 int ret;
1559
1560 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1561 return 0;
1562
1563 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1564 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1565 return 0;
1566
1567 bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1568 if (!bcn) {
1569 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1570 return -EPERM;
1571 }
1572
1573 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1574 if (ret) {
1575 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1576 kfree_skb(bcn);
1577 return ret;
1578 }
1579
1580 /* P2P IE is inserted by firmware automatically (as configured above)
1581 * so remove it from the base beacon template to avoid duplicate P2P
1582 * IEs in beacon frames.
1583 */
1584 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1585 offsetof(struct ieee80211_mgmt,
1586 u.beacon.variable));
1587
1588 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1589 0, NULL, 0);
1590 kfree_skb(bcn);
1591
1592 if (ret) {
1593 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1594 ret);
1595 return ret;
1596 }
1597
1598 return 0;
1599 }
1600
ath10k_mac_setup_prb_tmpl(struct ath10k_vif * arvif)1601 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1602 {
1603 struct ath10k *ar = arvif->ar;
1604 struct ieee80211_hw *hw = ar->hw;
1605 struct ieee80211_vif *vif = arvif->vif;
1606 struct sk_buff *prb;
1607 int ret;
1608
1609 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1610 return 0;
1611
1612 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1613 return 0;
1614
1615 /* For mesh, probe response and beacon share the same template */
1616 if (ieee80211_vif_is_mesh(vif))
1617 return 0;
1618
1619 prb = ieee80211_proberesp_get(hw, vif);
1620 if (!prb) {
1621 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1622 return -EPERM;
1623 }
1624
1625 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1626 kfree_skb(prb);
1627
1628 if (ret) {
1629 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1630 ret);
1631 return ret;
1632 }
1633
1634 return 0;
1635 }
1636
ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif * arvif)1637 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1638 {
1639 struct ath10k *ar = arvif->ar;
1640 struct cfg80211_chan_def def;
1641 int ret;
1642
1643 /* When originally vdev is started during assign_vif_chanctx() some
1644 * information is missing, notably SSID. Firmware revisions with beacon
1645 * offloading require the SSID to be provided during vdev (re)start to
1646 * handle hidden SSID properly.
1647 *
1648 * Vdev restart must be done after vdev has been both started and
1649 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1650 * deliver vdev restart response event causing timeouts during vdev
1651 * syncing in ath10k.
1652 *
1653 * Note: The vdev down/up and template reinstallation could be skipped
1654 * since only wmi-tlv firmware are known to have beacon offload and
1655 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1656 * response delivery. It's probably more robust to keep it as is.
1657 */
1658 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1659 return 0;
1660
1661 if (WARN_ON(!arvif->is_started))
1662 return -EINVAL;
1663
1664 if (WARN_ON(!arvif->is_up))
1665 return -EINVAL;
1666
1667 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1668 return -EINVAL;
1669
1670 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1671 if (ret) {
1672 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1673 arvif->vdev_id, ret);
1674 return ret;
1675 }
1676
1677 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1678 * firmware will crash upon vdev up.
1679 */
1680
1681 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1682 if (ret) {
1683 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1684 return ret;
1685 }
1686
1687 ret = ath10k_mac_setup_prb_tmpl(arvif);
1688 if (ret) {
1689 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1690 return ret;
1691 }
1692
1693 ret = ath10k_vdev_restart(arvif, &def);
1694 if (ret) {
1695 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1696 arvif->vdev_id, ret);
1697 return ret;
1698 }
1699
1700 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1701 arvif->bssid);
1702 if (ret) {
1703 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1704 arvif->vdev_id, ret);
1705 return ret;
1706 }
1707
1708 return 0;
1709 }
1710
ath10k_control_beaconing(struct ath10k_vif * arvif,struct ieee80211_bss_conf * info)1711 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1712 struct ieee80211_bss_conf *info)
1713 {
1714 struct ath10k *ar = arvif->ar;
1715 int ret = 0;
1716
1717 lockdep_assert_held(&arvif->ar->conf_mutex);
1718
1719 if (!info->enable_beacon) {
1720 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1721 if (ret)
1722 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1723 arvif->vdev_id, ret);
1724
1725 arvif->is_up = false;
1726
1727 spin_lock_bh(&arvif->ar->data_lock);
1728 ath10k_mac_vif_beacon_free(arvif);
1729 spin_unlock_bh(&arvif->ar->data_lock);
1730
1731 return;
1732 }
1733
1734 arvif->tx_seq_no = 0x1000;
1735
1736 arvif->aid = 0;
1737 ether_addr_copy(arvif->bssid, info->bssid);
1738
1739 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1740 arvif->bssid);
1741 if (ret) {
1742 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1743 arvif->vdev_id, ret);
1744 return;
1745 }
1746
1747 arvif->is_up = true;
1748
1749 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1750 if (ret) {
1751 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1752 arvif->vdev_id, ret);
1753 return;
1754 }
1755
1756 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1757 }
1758
ath10k_control_ibss(struct ath10k_vif * arvif,struct ieee80211_bss_conf * info,const u8 self_peer[ETH_ALEN])1759 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1760 struct ieee80211_bss_conf *info,
1761 const u8 self_peer[ETH_ALEN])
1762 {
1763 struct ath10k *ar = arvif->ar;
1764 u32 vdev_param;
1765 int ret = 0;
1766
1767 lockdep_assert_held(&arvif->ar->conf_mutex);
1768
1769 if (!info->ibss_joined) {
1770 if (is_zero_ether_addr(arvif->bssid))
1771 return;
1772
1773 eth_zero_addr(arvif->bssid);
1774
1775 return;
1776 }
1777
1778 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1779 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1780 ATH10K_DEFAULT_ATIM);
1781 if (ret)
1782 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1783 arvif->vdev_id, ret);
1784 }
1785
ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif * arvif)1786 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1787 {
1788 struct ath10k *ar = arvif->ar;
1789 u32 param;
1790 u32 value;
1791 int ret;
1792
1793 lockdep_assert_held(&arvif->ar->conf_mutex);
1794
1795 if (arvif->u.sta.uapsd)
1796 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1797 else
1798 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1799
1800 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1801 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1802 if (ret) {
1803 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1804 value, arvif->vdev_id, ret);
1805 return ret;
1806 }
1807
1808 return 0;
1809 }
1810
ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif * arvif)1811 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1812 {
1813 struct ath10k *ar = arvif->ar;
1814 u32 param;
1815 u32 value;
1816 int ret;
1817
1818 lockdep_assert_held(&arvif->ar->conf_mutex);
1819
1820 if (arvif->u.sta.uapsd)
1821 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1822 else
1823 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1824
1825 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1826 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1827 param, value);
1828 if (ret) {
1829 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1830 value, arvif->vdev_id, ret);
1831 return ret;
1832 }
1833
1834 return 0;
1835 }
1836
ath10k_mac_num_vifs_started(struct ath10k * ar)1837 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1838 {
1839 struct ath10k_vif *arvif;
1840 int num = 0;
1841
1842 lockdep_assert_held(&ar->conf_mutex);
1843
1844 list_for_each_entry(arvif, &ar->arvifs, list)
1845 if (arvif->is_started)
1846 num++;
1847
1848 return num;
1849 }
1850
ath10k_mac_vif_setup_ps(struct ath10k_vif * arvif)1851 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1852 {
1853 struct ath10k *ar = arvif->ar;
1854 struct ieee80211_vif *vif = arvif->vif;
1855 struct ieee80211_conf *conf = &ar->hw->conf;
1856 enum wmi_sta_powersave_param param;
1857 enum wmi_sta_ps_mode psmode;
1858 int ret;
1859 int ps_timeout;
1860 bool enable_ps;
1861
1862 lockdep_assert_held(&arvif->ar->conf_mutex);
1863
1864 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1865 return 0;
1866
1867 enable_ps = arvif->ps;
1868
1869 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1870 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1871 ar->running_fw->fw_file.fw_features)) {
1872 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1873 arvif->vdev_id);
1874 enable_ps = false;
1875 }
1876
1877 if (!arvif->is_started) {
1878 /* mac80211 can update vif powersave state while disconnected.
1879 * Firmware doesn't behave nicely and consumes more power than
1880 * necessary if PS is disabled on a non-started vdev. Hence
1881 * force-enable PS for non-running vdevs.
1882 */
1883 psmode = WMI_STA_PS_MODE_ENABLED;
1884 } else if (enable_ps) {
1885 psmode = WMI_STA_PS_MODE_ENABLED;
1886 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1887
1888 ps_timeout = conf->dynamic_ps_timeout;
1889 if (ps_timeout == 0) {
1890 /* Firmware doesn't like 0 */
1891 ps_timeout = ieee80211_tu_to_usec(
1892 vif->bss_conf.beacon_int) / 1000;
1893 }
1894
1895 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1896 ps_timeout);
1897 if (ret) {
1898 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1899 arvif->vdev_id, ret);
1900 return ret;
1901 }
1902 } else {
1903 psmode = WMI_STA_PS_MODE_DISABLED;
1904 }
1905
1906 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1907 arvif->vdev_id, psmode ? "enable" : "disable");
1908
1909 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1910 if (ret) {
1911 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1912 psmode, arvif->vdev_id, ret);
1913 return ret;
1914 }
1915
1916 return 0;
1917 }
1918
ath10k_mac_vif_disable_keepalive(struct ath10k_vif * arvif)1919 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1920 {
1921 struct ath10k *ar = arvif->ar;
1922 struct wmi_sta_keepalive_arg arg = {};
1923 int ret;
1924
1925 lockdep_assert_held(&arvif->ar->conf_mutex);
1926
1927 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1928 return 0;
1929
1930 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1931 return 0;
1932
1933 /* Some firmware revisions have a bug and ignore the `enabled` field.
1934 * Instead use the interval to disable the keepalive.
1935 */
1936 arg.vdev_id = arvif->vdev_id;
1937 arg.enabled = 1;
1938 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1939 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1940
1941 ret = ath10k_wmi_sta_keepalive(ar, &arg);
1942 if (ret) {
1943 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1944 arvif->vdev_id, ret);
1945 return ret;
1946 }
1947
1948 return 0;
1949 }
1950
ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif * arvif)1951 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1952 {
1953 struct ath10k *ar = arvif->ar;
1954 struct ieee80211_vif *vif = arvif->vif;
1955 int ret;
1956
1957 lockdep_assert_held(&arvif->ar->conf_mutex);
1958
1959 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1960 return;
1961
1962 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1963 return;
1964
1965 if (!vif->csa_active)
1966 return;
1967
1968 if (!arvif->is_up)
1969 return;
1970
1971 if (!ieee80211_csa_is_complete(vif)) {
1972 ieee80211_csa_update_counter(vif);
1973
1974 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1975 if (ret)
1976 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1977 ret);
1978
1979 ret = ath10k_mac_setup_prb_tmpl(arvif);
1980 if (ret)
1981 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1982 ret);
1983 } else {
1984 ieee80211_csa_finish(vif);
1985 }
1986 }
1987
ath10k_mac_vif_ap_csa_work(struct work_struct * work)1988 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1989 {
1990 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1991 ap_csa_work);
1992 struct ath10k *ar = arvif->ar;
1993
1994 mutex_lock(&ar->conf_mutex);
1995 ath10k_mac_vif_ap_csa_count_down(arvif);
1996 mutex_unlock(&ar->conf_mutex);
1997 }
1998
ath10k_mac_handle_beacon_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1999 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
2000 struct ieee80211_vif *vif)
2001 {
2002 struct sk_buff *skb = data;
2003 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2004 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2005
2006 if (vif->type != NL80211_IFTYPE_STATION)
2007 return;
2008
2009 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
2010 return;
2011
2012 cancel_delayed_work(&arvif->connection_loss_work);
2013 }
2014
ath10k_mac_handle_beacon(struct ath10k * ar,struct sk_buff * skb)2015 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
2016 {
2017 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2018 IEEE80211_IFACE_ITER_NORMAL,
2019 ath10k_mac_handle_beacon_iter,
2020 skb);
2021 }
2022
ath10k_mac_handle_beacon_miss_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2023 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
2024 struct ieee80211_vif *vif)
2025 {
2026 u32 *vdev_id = data;
2027 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2028 struct ath10k *ar = arvif->ar;
2029 struct ieee80211_hw *hw = ar->hw;
2030
2031 if (arvif->vdev_id != *vdev_id)
2032 return;
2033
2034 if (!arvif->is_up)
2035 return;
2036
2037 ieee80211_beacon_loss(vif);
2038
2039 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
2040 * (done by mac80211) succeeds but beacons do not resume then it
2041 * doesn't make sense to continue operation. Queue connection loss work
2042 * which can be cancelled when beacon is received.
2043 */
2044 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
2045 ATH10K_CONNECTION_LOSS_HZ);
2046 }
2047
ath10k_mac_handle_beacon_miss(struct ath10k * ar,u32 vdev_id)2048 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2049 {
2050 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2051 IEEE80211_IFACE_ITER_NORMAL,
2052 ath10k_mac_handle_beacon_miss_iter,
2053 &vdev_id);
2054 }
2055
ath10k_mac_vif_sta_connection_loss_work(struct work_struct * work)2056 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2057 {
2058 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2059 connection_loss_work.work);
2060 struct ieee80211_vif *vif = arvif->vif;
2061
2062 if (!arvif->is_up)
2063 return;
2064
2065 ieee80211_connection_loss(vif);
2066 }
2067
2068 /**********************/
2069 /* Station management */
2070 /**********************/
2071
ath10k_peer_assoc_h_listen_intval(struct ath10k * ar,struct ieee80211_vif * vif)2072 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2073 struct ieee80211_vif *vif)
2074 {
2075 /* Some firmware revisions have unstable STA powersave when listen
2076 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2077 * generate NullFunc frames properly even if buffered frames have been
2078 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2079 * buffered frames. Often pinging the device from AP would simply fail.
2080 *
2081 * As a workaround set it to 1.
2082 */
2083 if (vif->type == NL80211_IFTYPE_STATION)
2084 return 1;
2085
2086 return ar->hw->conf.listen_interval;
2087 }
2088
ath10k_peer_assoc_h_basic(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2089 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2090 struct ieee80211_vif *vif,
2091 struct ieee80211_sta *sta,
2092 struct wmi_peer_assoc_complete_arg *arg)
2093 {
2094 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2095 u32 aid;
2096
2097 lockdep_assert_held(&ar->conf_mutex);
2098
2099 if (vif->type == NL80211_IFTYPE_STATION)
2100 aid = vif->bss_conf.aid;
2101 else
2102 aid = sta->aid;
2103
2104 ether_addr_copy(arg->addr, sta->addr);
2105 arg->vdev_id = arvif->vdev_id;
2106 arg->peer_aid = aid;
2107 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2108 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2109 arg->peer_num_spatial_streams = 1;
2110 arg->peer_caps = vif->bss_conf.assoc_capability;
2111 }
2112
ath10k_peer_assoc_h_crypto(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2113 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2114 struct ieee80211_vif *vif,
2115 struct ieee80211_sta *sta,
2116 struct wmi_peer_assoc_complete_arg *arg)
2117 {
2118 struct ieee80211_bss_conf *info = &vif->bss_conf;
2119 struct cfg80211_chan_def def;
2120 struct cfg80211_bss *bss;
2121 const u8 *rsnie = NULL;
2122 const u8 *wpaie = NULL;
2123
2124 lockdep_assert_held(&ar->conf_mutex);
2125
2126 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2127 return;
2128
2129 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2130 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2131 if (bss) {
2132 const struct cfg80211_bss_ies *ies;
2133
2134 rcu_read_lock();
2135 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2136
2137 ies = rcu_dereference(bss->ies);
2138
2139 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2140 WLAN_OUI_TYPE_MICROSOFT_WPA,
2141 ies->data,
2142 ies->len);
2143 rcu_read_unlock();
2144 cfg80211_put_bss(ar->hw->wiphy, bss);
2145 }
2146
2147 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2148 if (rsnie || wpaie) {
2149 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2150 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2151 }
2152
2153 if (wpaie) {
2154 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2155 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2156 }
2157
2158 if (sta->mfp &&
2159 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2160 ar->running_fw->fw_file.fw_features)) {
2161 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2162 }
2163 }
2164
ath10k_peer_assoc_h_rates(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2165 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2166 struct ieee80211_vif *vif,
2167 struct ieee80211_sta *sta,
2168 struct wmi_peer_assoc_complete_arg *arg)
2169 {
2170 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2171 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2172 struct cfg80211_chan_def def;
2173 const struct ieee80211_supported_band *sband;
2174 const struct ieee80211_rate *rates;
2175 enum nl80211_band band;
2176 u32 ratemask;
2177 u8 rate;
2178 int i;
2179
2180 lockdep_assert_held(&ar->conf_mutex);
2181
2182 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2183 return;
2184
2185 band = def.chan->band;
2186 sband = ar->hw->wiphy->bands[band];
2187 ratemask = sta->supp_rates[band];
2188 ratemask &= arvif->bitrate_mask.control[band].legacy;
2189 rates = sband->bitrates;
2190
2191 rateset->num_rates = 0;
2192
2193 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2194 if (!(ratemask & 1))
2195 continue;
2196
2197 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2198 rateset->rates[rateset->num_rates] = rate;
2199 rateset->num_rates++;
2200 }
2201 }
2202
2203 static bool
ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])2204 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2205 {
2206 int nss;
2207
2208 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2209 if (ht_mcs_mask[nss])
2210 return false;
2211
2212 return true;
2213 }
2214
2215 static bool
ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])2216 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2217 {
2218 int nss;
2219
2220 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2221 if (vht_mcs_mask[nss])
2222 return false;
2223
2224 return true;
2225 }
2226
ath10k_peer_assoc_h_ht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2227 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2228 struct ieee80211_vif *vif,
2229 struct ieee80211_sta *sta,
2230 struct wmi_peer_assoc_complete_arg *arg)
2231 {
2232 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2233 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2234 struct cfg80211_chan_def def;
2235 enum nl80211_band band;
2236 const u8 *ht_mcs_mask;
2237 const u16 *vht_mcs_mask;
2238 int i, n;
2239 u8 max_nss;
2240 u32 stbc;
2241
2242 lockdep_assert_held(&ar->conf_mutex);
2243
2244 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2245 return;
2246
2247 if (!ht_cap->ht_supported)
2248 return;
2249
2250 band = def.chan->band;
2251 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2252 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2253
2254 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2255 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2256 return;
2257
2258 arg->peer_flags |= ar->wmi.peer_flags->ht;
2259 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2260 ht_cap->ampdu_factor)) - 1;
2261
2262 arg->peer_mpdu_density =
2263 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2264
2265 arg->peer_ht_caps = ht_cap->cap;
2266 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2267
2268 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2269 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2270
2271 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2272 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2273 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2274 }
2275
2276 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2277 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2278 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2279
2280 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2281 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2282 }
2283
2284 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2285 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2286 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2287 }
2288
2289 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2290 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2291 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2292 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2293 arg->peer_rate_caps |= stbc;
2294 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2295 }
2296
2297 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2298 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2299 else if (ht_cap->mcs.rx_mask[1])
2300 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2301
2302 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2303 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2304 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2305 max_nss = (i / 8) + 1;
2306 arg->peer_ht_rates.rates[n++] = i;
2307 }
2308
2309 /*
2310 * This is a workaround for HT-enabled STAs which break the spec
2311 * and have no HT capabilities RX mask (no HT RX MCS map).
2312 *
2313 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2314 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2315 *
2316 * Firmware asserts if such situation occurs.
2317 */
2318 if (n == 0) {
2319 arg->peer_ht_rates.num_rates = 8;
2320 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2321 arg->peer_ht_rates.rates[i] = i;
2322 } else {
2323 arg->peer_ht_rates.num_rates = n;
2324 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2325 }
2326
2327 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2328 arg->addr,
2329 arg->peer_ht_rates.num_rates,
2330 arg->peer_num_spatial_streams);
2331 }
2332
ath10k_peer_assoc_qos_ap(struct ath10k * ar,struct ath10k_vif * arvif,struct ieee80211_sta * sta)2333 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2334 struct ath10k_vif *arvif,
2335 struct ieee80211_sta *sta)
2336 {
2337 u32 uapsd = 0;
2338 u32 max_sp = 0;
2339 int ret = 0;
2340
2341 lockdep_assert_held(&ar->conf_mutex);
2342
2343 if (sta->wme && sta->uapsd_queues) {
2344 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2345 sta->uapsd_queues, sta->max_sp);
2346
2347 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2348 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2349 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2350 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2351 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2352 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2353 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2354 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2355 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2356 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2357 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2358 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2359
2360 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2361 max_sp = sta->max_sp;
2362
2363 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2364 sta->addr,
2365 WMI_AP_PS_PEER_PARAM_UAPSD,
2366 uapsd);
2367 if (ret) {
2368 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2369 arvif->vdev_id, ret);
2370 return ret;
2371 }
2372
2373 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2374 sta->addr,
2375 WMI_AP_PS_PEER_PARAM_MAX_SP,
2376 max_sp);
2377 if (ret) {
2378 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2379 arvif->vdev_id, ret);
2380 return ret;
2381 }
2382
2383 /* TODO setup this based on STA listen interval and
2384 * beacon interval. Currently we don't know
2385 * sta->listen_interval - mac80211 patch required.
2386 * Currently use 10 seconds
2387 */
2388 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2389 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2390 10);
2391 if (ret) {
2392 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2393 arvif->vdev_id, ret);
2394 return ret;
2395 }
2396 }
2397
2398 return 0;
2399 }
2400
2401 static u16
ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])2402 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2403 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2404 {
2405 int idx_limit;
2406 int nss;
2407 u16 mcs_map;
2408 u16 mcs;
2409
2410 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2411 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2412 vht_mcs_limit[nss];
2413
2414 if (mcs_map)
2415 idx_limit = fls(mcs_map) - 1;
2416 else
2417 idx_limit = -1;
2418
2419 switch (idx_limit) {
2420 case 0: /* fall through */
2421 case 1: /* fall through */
2422 case 2: /* fall through */
2423 case 3: /* fall through */
2424 case 4: /* fall through */
2425 case 5: /* fall through */
2426 case 6: /* fall through */
2427 default:
2428 /* see ath10k_mac_can_set_bitrate_mask() */
2429 WARN_ON(1);
2430 /* fall through */
2431 case -1:
2432 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2433 break;
2434 case 7:
2435 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2436 break;
2437 case 8:
2438 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2439 break;
2440 case 9:
2441 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2442 break;
2443 }
2444
2445 tx_mcs_set &= ~(0x3 << (nss * 2));
2446 tx_mcs_set |= mcs << (nss * 2);
2447 }
2448
2449 return tx_mcs_set;
2450 }
2451
ath10k_peer_assoc_h_vht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2452 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2453 struct ieee80211_vif *vif,
2454 struct ieee80211_sta *sta,
2455 struct wmi_peer_assoc_complete_arg *arg)
2456 {
2457 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2458 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2459 struct cfg80211_chan_def def;
2460 enum nl80211_band band;
2461 const u16 *vht_mcs_mask;
2462 u8 ampdu_factor;
2463 u8 max_nss, vht_mcs;
2464 int i;
2465
2466 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2467 return;
2468
2469 if (!vht_cap->vht_supported)
2470 return;
2471
2472 band = def.chan->band;
2473 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2474
2475 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2476 return;
2477
2478 arg->peer_flags |= ar->wmi.peer_flags->vht;
2479
2480 if (def.chan->band == NL80211_BAND_2GHZ)
2481 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2482
2483 arg->peer_vht_caps = vht_cap->cap;
2484
2485 ampdu_factor = (vht_cap->cap &
2486 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2487 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2488
2489 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2490 * zero in VHT IE. Using it would result in degraded throughput.
2491 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2492 * it if VHT max_mpdu is smaller.
2493 */
2494 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2495 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2496 ampdu_factor)) - 1);
2497
2498 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2499 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2500
2501 if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
2502 arg->peer_flags |= ar->wmi.peer_flags->bw160;
2503
2504 /* Calculate peer NSS capability from VHT capabilities if STA
2505 * supports VHT.
2506 */
2507 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
2508 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
2509 (2 * i) & 3;
2510
2511 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
2512 vht_mcs_mask[i])
2513 max_nss = i + 1;
2514 }
2515 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2516 arg->peer_vht_rates.rx_max_rate =
2517 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2518 arg->peer_vht_rates.rx_mcs_set =
2519 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2520 arg->peer_vht_rates.tx_max_rate =
2521 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2522 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2523 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2524
2525 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2526 sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2527
2528 if (arg->peer_vht_rates.rx_max_rate &&
2529 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
2530 switch (arg->peer_vht_rates.rx_max_rate) {
2531 case 1560:
2532 /* Must be 2x2 at 160Mhz is all it can do. */
2533 arg->peer_bw_rxnss_override = 2;
2534 break;
2535 case 780:
2536 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
2537 arg->peer_bw_rxnss_override = 1;
2538 break;
2539 }
2540 }
2541 }
2542
ath10k_peer_assoc_h_qos(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2543 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2544 struct ieee80211_vif *vif,
2545 struct ieee80211_sta *sta,
2546 struct wmi_peer_assoc_complete_arg *arg)
2547 {
2548 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2549
2550 switch (arvif->vdev_type) {
2551 case WMI_VDEV_TYPE_AP:
2552 if (sta->wme)
2553 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2554
2555 if (sta->wme && sta->uapsd_queues) {
2556 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2557 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2558 }
2559 break;
2560 case WMI_VDEV_TYPE_STA:
2561 if (sta->wme)
2562 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2563 break;
2564 case WMI_VDEV_TYPE_IBSS:
2565 if (sta->wme)
2566 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2567 break;
2568 default:
2569 break;
2570 }
2571
2572 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2573 sta->addr, !!(arg->peer_flags &
2574 arvif->ar->wmi.peer_flags->qos));
2575 }
2576
ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta * sta)2577 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2578 {
2579 return sta->supp_rates[NL80211_BAND_2GHZ] >>
2580 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2581 }
2582
ath10k_mac_get_phymode_vht(struct ath10k * ar,struct ieee80211_sta * sta)2583 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
2584 struct ieee80211_sta *sta)
2585 {
2586 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
2587 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2588 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2589 return MODE_11AC_VHT160;
2590 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
2591 return MODE_11AC_VHT80_80;
2592 default:
2593 /* not sure if this is a valid case? */
2594 return MODE_11AC_VHT160;
2595 }
2596 }
2597
2598 if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2599 return MODE_11AC_VHT80;
2600
2601 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2602 return MODE_11AC_VHT40;
2603
2604 if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2605 return MODE_11AC_VHT20;
2606
2607 return MODE_UNKNOWN;
2608 }
2609
ath10k_peer_assoc_h_phymode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2610 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2611 struct ieee80211_vif *vif,
2612 struct ieee80211_sta *sta,
2613 struct wmi_peer_assoc_complete_arg *arg)
2614 {
2615 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2616 struct cfg80211_chan_def def;
2617 enum nl80211_band band;
2618 const u8 *ht_mcs_mask;
2619 const u16 *vht_mcs_mask;
2620 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2621
2622 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2623 return;
2624
2625 band = def.chan->band;
2626 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2627 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2628
2629 switch (band) {
2630 case NL80211_BAND_2GHZ:
2631 if (sta->vht_cap.vht_supported &&
2632 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2633 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2634 phymode = MODE_11AC_VHT40;
2635 else
2636 phymode = MODE_11AC_VHT20;
2637 } else if (sta->ht_cap.ht_supported &&
2638 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2639 if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2640 phymode = MODE_11NG_HT40;
2641 else
2642 phymode = MODE_11NG_HT20;
2643 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2644 phymode = MODE_11G;
2645 } else {
2646 phymode = MODE_11B;
2647 }
2648
2649 break;
2650 case NL80211_BAND_5GHZ:
2651 /*
2652 * Check VHT first.
2653 */
2654 if (sta->vht_cap.vht_supported &&
2655 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2656 phymode = ath10k_mac_get_phymode_vht(ar, sta);
2657 } else if (sta->ht_cap.ht_supported &&
2658 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2659 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2660 phymode = MODE_11NA_HT40;
2661 else
2662 phymode = MODE_11NA_HT20;
2663 } else {
2664 phymode = MODE_11A;
2665 }
2666
2667 break;
2668 default:
2669 break;
2670 }
2671
2672 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2673 sta->addr, ath10k_wmi_phymode_str(phymode));
2674
2675 arg->peer_phymode = phymode;
2676 WARN_ON(phymode == MODE_UNKNOWN);
2677 }
2678
ath10k_peer_assoc_prepare(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2679 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2680 struct ieee80211_vif *vif,
2681 struct ieee80211_sta *sta,
2682 struct wmi_peer_assoc_complete_arg *arg)
2683 {
2684 lockdep_assert_held(&ar->conf_mutex);
2685
2686 memset(arg, 0, sizeof(*arg));
2687
2688 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2689 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2690 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2691 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2692 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2693 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2694 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2695
2696 return 0;
2697 }
2698
2699 static const u32 ath10k_smps_map[] = {
2700 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2701 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2702 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2703 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2704 };
2705
ath10k_setup_peer_smps(struct ath10k * ar,struct ath10k_vif * arvif,const u8 * addr,const struct ieee80211_sta_ht_cap * ht_cap)2706 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2707 const u8 *addr,
2708 const struct ieee80211_sta_ht_cap *ht_cap)
2709 {
2710 int smps;
2711
2712 if (!ht_cap->ht_supported)
2713 return 0;
2714
2715 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2716 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2717
2718 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2719 return -EINVAL;
2720
2721 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2722 WMI_PEER_SMPS_STATE,
2723 ath10k_smps_map[smps]);
2724 }
2725
ath10k_mac_vif_recalc_txbf(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta_vht_cap vht_cap)2726 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2727 struct ieee80211_vif *vif,
2728 struct ieee80211_sta_vht_cap vht_cap)
2729 {
2730 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2731 int ret;
2732 u32 param;
2733 u32 value;
2734
2735 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2736 return 0;
2737
2738 if (!(ar->vht_cap_info &
2739 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2740 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2741 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2742 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2743 return 0;
2744
2745 param = ar->wmi.vdev_param->txbf;
2746 value = 0;
2747
2748 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2749 return 0;
2750
2751 /* The following logic is correct. If a remote STA advertises support
2752 * for being a beamformer then we should enable us being a beamformee.
2753 */
2754
2755 if (ar->vht_cap_info &
2756 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2757 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2758 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2759 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2760
2761 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2762 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2763 }
2764
2765 if (ar->vht_cap_info &
2766 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2767 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2768 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2769 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2770
2771 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2772 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2773 }
2774
2775 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2776 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2777
2778 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2779 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2780
2781 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2782 if (ret) {
2783 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2784 value, ret);
2785 return ret;
2786 }
2787
2788 return 0;
2789 }
2790
2791 /* can be called only in mac80211 callbacks due to `key_count` usage */
ath10k_bss_assoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf)2792 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2793 struct ieee80211_vif *vif,
2794 struct ieee80211_bss_conf *bss_conf)
2795 {
2796 struct ath10k *ar = hw->priv;
2797 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2798 struct ieee80211_sta_ht_cap ht_cap;
2799 struct ieee80211_sta_vht_cap vht_cap;
2800 struct wmi_peer_assoc_complete_arg peer_arg;
2801 struct ieee80211_sta *ap_sta;
2802 int ret;
2803
2804 lockdep_assert_held(&ar->conf_mutex);
2805
2806 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2807 arvif->vdev_id, arvif->bssid, arvif->aid);
2808
2809 rcu_read_lock();
2810
2811 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2812 if (!ap_sta) {
2813 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2814 bss_conf->bssid, arvif->vdev_id);
2815 rcu_read_unlock();
2816 return;
2817 }
2818
2819 /* ap_sta must be accessed only within rcu section which must be left
2820 * before calling ath10k_setup_peer_smps() which might sleep.
2821 */
2822 ht_cap = ap_sta->ht_cap;
2823 vht_cap = ap_sta->vht_cap;
2824
2825 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2826 if (ret) {
2827 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2828 bss_conf->bssid, arvif->vdev_id, ret);
2829 rcu_read_unlock();
2830 return;
2831 }
2832
2833 rcu_read_unlock();
2834
2835 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2836 if (ret) {
2837 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2838 bss_conf->bssid, arvif->vdev_id, ret);
2839 return;
2840 }
2841
2842 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2843 if (ret) {
2844 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2845 arvif->vdev_id, ret);
2846 return;
2847 }
2848
2849 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2850 if (ret) {
2851 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2852 arvif->vdev_id, bss_conf->bssid, ret);
2853 return;
2854 }
2855
2856 ath10k_dbg(ar, ATH10K_DBG_MAC,
2857 "mac vdev %d up (associated) bssid %pM aid %d\n",
2858 arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2859
2860 WARN_ON(arvif->is_up);
2861
2862 arvif->aid = bss_conf->aid;
2863 ether_addr_copy(arvif->bssid, bss_conf->bssid);
2864
2865 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2866 if (ret) {
2867 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2868 arvif->vdev_id, ret);
2869 return;
2870 }
2871
2872 arvif->is_up = true;
2873
2874 /* Workaround: Some firmware revisions (tested with qca6174
2875 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2876 * poked with peer param command.
2877 */
2878 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2879 WMI_PEER_DUMMY_VAR, 1);
2880 if (ret) {
2881 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2882 arvif->bssid, arvif->vdev_id, ret);
2883 return;
2884 }
2885 }
2886
ath10k_bss_disassoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2887 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2888 struct ieee80211_vif *vif)
2889 {
2890 struct ath10k *ar = hw->priv;
2891 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2892 struct ieee80211_sta_vht_cap vht_cap = {};
2893 int ret;
2894
2895 lockdep_assert_held(&ar->conf_mutex);
2896
2897 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2898 arvif->vdev_id, arvif->bssid);
2899
2900 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2901 if (ret)
2902 ath10k_warn(ar, "failed to down vdev %i: %d\n",
2903 arvif->vdev_id, ret);
2904
2905 arvif->def_wep_key_idx = -1;
2906
2907 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2908 if (ret) {
2909 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2910 arvif->vdev_id, ret);
2911 return;
2912 }
2913
2914 arvif->is_up = false;
2915
2916 cancel_delayed_work_sync(&arvif->connection_loss_work);
2917 }
2918
ath10k_station_assoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,bool reassoc)2919 static int ath10k_station_assoc(struct ath10k *ar,
2920 struct ieee80211_vif *vif,
2921 struct ieee80211_sta *sta,
2922 bool reassoc)
2923 {
2924 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2925 struct wmi_peer_assoc_complete_arg peer_arg;
2926 int ret = 0;
2927
2928 lockdep_assert_held(&ar->conf_mutex);
2929
2930 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2931 if (ret) {
2932 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2933 sta->addr, arvif->vdev_id, ret);
2934 return ret;
2935 }
2936
2937 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2938 if (ret) {
2939 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2940 sta->addr, arvif->vdev_id, ret);
2941 return ret;
2942 }
2943
2944 /* Re-assoc is run only to update supported rates for given station. It
2945 * doesn't make much sense to reconfigure the peer completely.
2946 */
2947 if (!reassoc) {
2948 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2949 &sta->ht_cap);
2950 if (ret) {
2951 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2952 arvif->vdev_id, ret);
2953 return ret;
2954 }
2955
2956 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2957 if (ret) {
2958 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2959 sta->addr, arvif->vdev_id, ret);
2960 return ret;
2961 }
2962
2963 if (!sta->wme) {
2964 arvif->num_legacy_stations++;
2965 ret = ath10k_recalc_rtscts_prot(arvif);
2966 if (ret) {
2967 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2968 arvif->vdev_id, ret);
2969 return ret;
2970 }
2971 }
2972
2973 /* Plumb cached keys only for static WEP */
2974 if (arvif->def_wep_key_idx != -1) {
2975 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2976 if (ret) {
2977 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2978 arvif->vdev_id, ret);
2979 return ret;
2980 }
2981 }
2982 }
2983
2984 return ret;
2985 }
2986
ath10k_station_disassoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta)2987 static int ath10k_station_disassoc(struct ath10k *ar,
2988 struct ieee80211_vif *vif,
2989 struct ieee80211_sta *sta)
2990 {
2991 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2992 int ret = 0;
2993
2994 lockdep_assert_held(&ar->conf_mutex);
2995
2996 if (!sta->wme) {
2997 arvif->num_legacy_stations--;
2998 ret = ath10k_recalc_rtscts_prot(arvif);
2999 if (ret) {
3000 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
3001 arvif->vdev_id, ret);
3002 return ret;
3003 }
3004 }
3005
3006 ret = ath10k_clear_peer_keys(arvif, sta->addr);
3007 if (ret) {
3008 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
3009 arvif->vdev_id, ret);
3010 return ret;
3011 }
3012
3013 return ret;
3014 }
3015
3016 /**************/
3017 /* Regulatory */
3018 /**************/
3019
ath10k_update_channel_list(struct ath10k * ar)3020 static int ath10k_update_channel_list(struct ath10k *ar)
3021 {
3022 struct ieee80211_hw *hw = ar->hw;
3023 struct ieee80211_supported_band **bands;
3024 enum nl80211_band band;
3025 struct ieee80211_channel *channel;
3026 struct wmi_scan_chan_list_arg arg = {0};
3027 struct wmi_channel_arg *ch;
3028 bool passive;
3029 int len;
3030 int ret;
3031 int i;
3032
3033 lockdep_assert_held(&ar->conf_mutex);
3034
3035 bands = hw->wiphy->bands;
3036 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3037 if (!bands[band])
3038 continue;
3039
3040 for (i = 0; i < bands[band]->n_channels; i++) {
3041 if (bands[band]->channels[i].flags &
3042 IEEE80211_CHAN_DISABLED)
3043 continue;
3044
3045 arg.n_channels++;
3046 }
3047 }
3048
3049 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
3050 arg.channels = kzalloc(len, GFP_KERNEL);
3051 if (!arg.channels)
3052 return -ENOMEM;
3053
3054 ch = arg.channels;
3055 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3056 if (!bands[band])
3057 continue;
3058
3059 for (i = 0; i < bands[band]->n_channels; i++) {
3060 channel = &bands[band]->channels[i];
3061
3062 if (channel->flags & IEEE80211_CHAN_DISABLED)
3063 continue;
3064
3065 ch->allow_ht = true;
3066
3067 /* FIXME: when should we really allow VHT? */
3068 ch->allow_vht = true;
3069
3070 ch->allow_ibss =
3071 !(channel->flags & IEEE80211_CHAN_NO_IR);
3072
3073 ch->ht40plus =
3074 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
3075
3076 ch->chan_radar =
3077 !!(channel->flags & IEEE80211_CHAN_RADAR);
3078
3079 passive = channel->flags & IEEE80211_CHAN_NO_IR;
3080 ch->passive = passive;
3081
3082 /* the firmware is ignoring the "radar" flag of the
3083 * channel and is scanning actively using Probe Requests
3084 * on "Radar detection"/DFS channels which are not
3085 * marked as "available"
3086 */
3087 ch->passive |= ch->chan_radar;
3088
3089 ch->freq = channel->center_freq;
3090 ch->band_center_freq1 = channel->center_freq;
3091 ch->min_power = 0;
3092 ch->max_power = channel->max_power * 2;
3093 ch->max_reg_power = channel->max_reg_power * 2;
3094 ch->max_antenna_gain = channel->max_antenna_gain * 2;
3095 ch->reg_class_id = 0; /* FIXME */
3096
3097 /* FIXME: why use only legacy modes, why not any
3098 * HT/VHT modes? Would that even make any
3099 * difference?
3100 */
3101 if (channel->band == NL80211_BAND_2GHZ)
3102 ch->mode = MODE_11G;
3103 else
3104 ch->mode = MODE_11A;
3105
3106 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
3107 continue;
3108
3109 ath10k_dbg(ar, ATH10K_DBG_WMI,
3110 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3111 ch - arg.channels, arg.n_channels,
3112 ch->freq, ch->max_power, ch->max_reg_power,
3113 ch->max_antenna_gain, ch->mode);
3114
3115 ch++;
3116 }
3117 }
3118
3119 ret = ath10k_wmi_scan_chan_list(ar, &arg);
3120 kfree(arg.channels);
3121
3122 return ret;
3123 }
3124
3125 static enum wmi_dfs_region
ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)3126 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3127 {
3128 switch (dfs_region) {
3129 case NL80211_DFS_UNSET:
3130 return WMI_UNINIT_DFS_DOMAIN;
3131 case NL80211_DFS_FCC:
3132 return WMI_FCC_DFS_DOMAIN;
3133 case NL80211_DFS_ETSI:
3134 return WMI_ETSI_DFS_DOMAIN;
3135 case NL80211_DFS_JP:
3136 return WMI_MKK4_DFS_DOMAIN;
3137 }
3138 return WMI_UNINIT_DFS_DOMAIN;
3139 }
3140
ath10k_regd_update(struct ath10k * ar)3141 static void ath10k_regd_update(struct ath10k *ar)
3142 {
3143 struct reg_dmn_pair_mapping *regpair;
3144 int ret;
3145 enum wmi_dfs_region wmi_dfs_reg;
3146 enum nl80211_dfs_regions nl_dfs_reg;
3147
3148 lockdep_assert_held(&ar->conf_mutex);
3149
3150 ret = ath10k_update_channel_list(ar);
3151 if (ret)
3152 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3153
3154 regpair = ar->ath_common.regulatory.regpair;
3155
3156 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3157 nl_dfs_reg = ar->dfs_detector->region;
3158 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3159 } else {
3160 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3161 }
3162
3163 /* Target allows setting up per-band regdomain but ath_common provides
3164 * a combined one only
3165 */
3166 ret = ath10k_wmi_pdev_set_regdomain(ar,
3167 regpair->reg_domain,
3168 regpair->reg_domain, /* 2ghz */
3169 regpair->reg_domain, /* 5ghz */
3170 regpair->reg_2ghz_ctl,
3171 regpair->reg_5ghz_ctl,
3172 wmi_dfs_reg);
3173 if (ret)
3174 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3175 }
3176
ath10k_mac_update_channel_list(struct ath10k * ar,struct ieee80211_supported_band * band)3177 static void ath10k_mac_update_channel_list(struct ath10k *ar,
3178 struct ieee80211_supported_band *band)
3179 {
3180 int i;
3181
3182 if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
3183 for (i = 0; i < band->n_channels; i++) {
3184 if (band->channels[i].center_freq < ar->low_5ghz_chan ||
3185 band->channels[i].center_freq > ar->high_5ghz_chan)
3186 band->channels[i].flags |=
3187 IEEE80211_CHAN_DISABLED;
3188 }
3189 }
3190 }
3191
ath10k_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)3192 static void ath10k_reg_notifier(struct wiphy *wiphy,
3193 struct regulatory_request *request)
3194 {
3195 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3196 struct ath10k *ar = hw->priv;
3197 bool result;
3198
3199 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3200
3201 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3202 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3203 request->dfs_region);
3204 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3205 request->dfs_region);
3206 if (!result)
3207 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3208 request->dfs_region);
3209 }
3210
3211 mutex_lock(&ar->conf_mutex);
3212 if (ar->state == ATH10K_STATE_ON)
3213 ath10k_regd_update(ar);
3214 mutex_unlock(&ar->conf_mutex);
3215
3216 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
3217 ath10k_mac_update_channel_list(ar,
3218 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
3219 }
3220
3221 /***************/
3222 /* TX handlers */
3223 /***************/
3224
3225 enum ath10k_mac_tx_path {
3226 ATH10K_MAC_TX_HTT,
3227 ATH10K_MAC_TX_HTT_MGMT,
3228 ATH10K_MAC_TX_WMI_MGMT,
3229 ATH10K_MAC_TX_UNKNOWN,
3230 };
3231
ath10k_mac_tx_lock(struct ath10k * ar,int reason)3232 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3233 {
3234 lockdep_assert_held(&ar->htt.tx_lock);
3235
3236 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3237 ar->tx_paused |= BIT(reason);
3238 ieee80211_stop_queues(ar->hw);
3239 }
3240
ath10k_mac_tx_unlock_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3241 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3242 struct ieee80211_vif *vif)
3243 {
3244 struct ath10k *ar = data;
3245 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3246
3247 if (arvif->tx_paused)
3248 return;
3249
3250 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3251 }
3252
ath10k_mac_tx_unlock(struct ath10k * ar,int reason)3253 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3254 {
3255 lockdep_assert_held(&ar->htt.tx_lock);
3256
3257 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3258 ar->tx_paused &= ~BIT(reason);
3259
3260 if (ar->tx_paused)
3261 return;
3262
3263 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3264 IEEE80211_IFACE_ITER_RESUME_ALL,
3265 ath10k_mac_tx_unlock_iter,
3266 ar);
3267
3268 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3269 }
3270
ath10k_mac_vif_tx_lock(struct ath10k_vif * arvif,int reason)3271 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3272 {
3273 struct ath10k *ar = arvif->ar;
3274
3275 lockdep_assert_held(&ar->htt.tx_lock);
3276
3277 WARN_ON(reason >= BITS_PER_LONG);
3278 arvif->tx_paused |= BIT(reason);
3279 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3280 }
3281
ath10k_mac_vif_tx_unlock(struct ath10k_vif * arvif,int reason)3282 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3283 {
3284 struct ath10k *ar = arvif->ar;
3285
3286 lockdep_assert_held(&ar->htt.tx_lock);
3287
3288 WARN_ON(reason >= BITS_PER_LONG);
3289 arvif->tx_paused &= ~BIT(reason);
3290
3291 if (ar->tx_paused)
3292 return;
3293
3294 if (arvif->tx_paused)
3295 return;
3296
3297 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3298 }
3299
ath10k_mac_vif_handle_tx_pause(struct ath10k_vif * arvif,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3300 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3301 enum wmi_tlv_tx_pause_id pause_id,
3302 enum wmi_tlv_tx_pause_action action)
3303 {
3304 struct ath10k *ar = arvif->ar;
3305
3306 lockdep_assert_held(&ar->htt.tx_lock);
3307
3308 switch (action) {
3309 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3310 ath10k_mac_vif_tx_lock(arvif, pause_id);
3311 break;
3312 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3313 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3314 break;
3315 default:
3316 ath10k_dbg(ar, ATH10K_DBG_BOOT,
3317 "received unknown tx pause action %d on vdev %i, ignoring\n",
3318 action, arvif->vdev_id);
3319 break;
3320 }
3321 }
3322
3323 struct ath10k_mac_tx_pause {
3324 u32 vdev_id;
3325 enum wmi_tlv_tx_pause_id pause_id;
3326 enum wmi_tlv_tx_pause_action action;
3327 };
3328
ath10k_mac_handle_tx_pause_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3329 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3330 struct ieee80211_vif *vif)
3331 {
3332 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3333 struct ath10k_mac_tx_pause *arg = data;
3334
3335 if (arvif->vdev_id != arg->vdev_id)
3336 return;
3337
3338 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3339 }
3340
ath10k_mac_handle_tx_pause_vdev(struct ath10k * ar,u32 vdev_id,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3341 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3342 enum wmi_tlv_tx_pause_id pause_id,
3343 enum wmi_tlv_tx_pause_action action)
3344 {
3345 struct ath10k_mac_tx_pause arg = {
3346 .vdev_id = vdev_id,
3347 .pause_id = pause_id,
3348 .action = action,
3349 };
3350
3351 spin_lock_bh(&ar->htt.tx_lock);
3352 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3353 IEEE80211_IFACE_ITER_RESUME_ALL,
3354 ath10k_mac_handle_tx_pause_iter,
3355 &arg);
3356 spin_unlock_bh(&ar->htt.tx_lock);
3357 }
3358
3359 static enum ath10k_hw_txrx_mode
ath10k_mac_tx_h_get_txmode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct sk_buff * skb)3360 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3361 struct ieee80211_vif *vif,
3362 struct ieee80211_sta *sta,
3363 struct sk_buff *skb)
3364 {
3365 const struct ieee80211_hdr *hdr = (void *)skb->data;
3366 __le16 fc = hdr->frame_control;
3367
3368 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3369 return ATH10K_HW_TXRX_RAW;
3370
3371 if (ieee80211_is_mgmt(fc))
3372 return ATH10K_HW_TXRX_MGMT;
3373
3374 /* Workaround:
3375 *
3376 * NullFunc frames are mostly used to ping if a client or AP are still
3377 * reachable and responsive. This implies tx status reports must be
3378 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3379 * come to a conclusion that the other end disappeared and tear down
3380 * BSS connection or it can never disconnect from BSS/client (which is
3381 * the case).
3382 *
3383 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3384 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3385 * which seems to deliver correct tx reports for NullFunc frames. The
3386 * downside of using it is it ignores client powersave state so it can
3387 * end up disconnecting sleeping clients in AP mode. It should fix STA
3388 * mode though because AP don't sleep.
3389 */
3390 if (ar->htt.target_version_major < 3 &&
3391 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3392 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3393 ar->running_fw->fw_file.fw_features))
3394 return ATH10K_HW_TXRX_MGMT;
3395
3396 /* Workaround:
3397 *
3398 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3399 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3400 * to work with Ethernet txmode so use it.
3401 *
3402 * FIXME: Check if raw mode works with TDLS.
3403 */
3404 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3405 return ATH10K_HW_TXRX_ETHERNET;
3406
3407 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3408 return ATH10K_HW_TXRX_RAW;
3409
3410 return ATH10K_HW_TXRX_NATIVE_WIFI;
3411 }
3412
ath10k_tx_h_use_hwcrypto(struct ieee80211_vif * vif,struct sk_buff * skb)3413 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3414 struct sk_buff *skb)
3415 {
3416 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3417 const struct ieee80211_hdr *hdr = (void *)skb->data;
3418 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3419 IEEE80211_TX_CTL_INJECTED;
3420
3421 if (!ieee80211_has_protected(hdr->frame_control))
3422 return false;
3423
3424 if ((info->flags & mask) == mask)
3425 return false;
3426
3427 if (vif)
3428 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
3429
3430 return true;
3431 }
3432
3433 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3434 * Control in the header.
3435 */
ath10k_tx_h_nwifi(struct ieee80211_hw * hw,struct sk_buff * skb)3436 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3437 {
3438 struct ieee80211_hdr *hdr = (void *)skb->data;
3439 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3440 u8 *qos_ctl;
3441
3442 if (!ieee80211_is_data_qos(hdr->frame_control))
3443 return;
3444
3445 qos_ctl = ieee80211_get_qos_ctl(hdr);
3446 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3447 skb->data, (void *)qos_ctl - (void *)skb->data);
3448 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3449
3450 /* Some firmware revisions don't handle sending QoS NullFunc well.
3451 * These frames are mainly used for CQM purposes so it doesn't really
3452 * matter whether QoS NullFunc or NullFunc are sent.
3453 */
3454 hdr = (void *)skb->data;
3455 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3456 cb->flags &= ~ATH10K_SKB_F_QOS;
3457
3458 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3459 }
3460
ath10k_tx_h_8023(struct sk_buff * skb)3461 static void ath10k_tx_h_8023(struct sk_buff *skb)
3462 {
3463 struct ieee80211_hdr *hdr;
3464 struct rfc1042_hdr *rfc1042;
3465 struct ethhdr *eth;
3466 size_t hdrlen;
3467 u8 da[ETH_ALEN];
3468 u8 sa[ETH_ALEN];
3469 __be16 type;
3470
3471 hdr = (void *)skb->data;
3472 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3473 rfc1042 = (void *)skb->data + hdrlen;
3474
3475 ether_addr_copy(da, ieee80211_get_DA(hdr));
3476 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3477 type = rfc1042->snap_type;
3478
3479 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3480 skb_push(skb, sizeof(*eth));
3481
3482 eth = (void *)skb->data;
3483 ether_addr_copy(eth->h_dest, da);
3484 ether_addr_copy(eth->h_source, sa);
3485 eth->h_proto = type;
3486 }
3487
ath10k_tx_h_add_p2p_noa_ie(struct ath10k * ar,struct ieee80211_vif * vif,struct sk_buff * skb)3488 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3489 struct ieee80211_vif *vif,
3490 struct sk_buff *skb)
3491 {
3492 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3493 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3494
3495 /* This is case only for P2P_GO */
3496 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3497 return;
3498
3499 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3500 spin_lock_bh(&ar->data_lock);
3501 if (arvif->u.ap.noa_data)
3502 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3503 GFP_ATOMIC))
3504 skb_put_data(skb, arvif->u.ap.noa_data,
3505 arvif->u.ap.noa_len);
3506 spin_unlock_bh(&ar->data_lock);
3507 }
3508 }
3509
ath10k_mac_tx_h_fill_cb(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_txq * txq,struct sk_buff * skb)3510 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3511 struct ieee80211_vif *vif,
3512 struct ieee80211_txq *txq,
3513 struct sk_buff *skb)
3514 {
3515 struct ieee80211_hdr *hdr = (void *)skb->data;
3516 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3517
3518 cb->flags = 0;
3519 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3520 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3521
3522 if (ieee80211_is_mgmt(hdr->frame_control))
3523 cb->flags |= ATH10K_SKB_F_MGMT;
3524
3525 if (ieee80211_is_data_qos(hdr->frame_control))
3526 cb->flags |= ATH10K_SKB_F_QOS;
3527
3528 cb->vif = vif;
3529 cb->txq = txq;
3530 }
3531
ath10k_mac_tx_frm_has_freq(struct ath10k * ar)3532 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3533 {
3534 /* FIXME: Not really sure since when the behaviour changed. At some
3535 * point new firmware stopped requiring creation of peer entries for
3536 * offchannel tx (and actually creating them causes issues with wmi-htc
3537 * tx credit replenishment and reliability). Assuming it's at least 3.4
3538 * because that's when the `freq` was introduced to TX_FRM HTT command.
3539 */
3540 return (ar->htt.target_version_major >= 3 &&
3541 ar->htt.target_version_minor >= 4 &&
3542 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3543 }
3544
ath10k_mac_tx_wmi_mgmt(struct ath10k * ar,struct sk_buff * skb)3545 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3546 {
3547 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3548 int ret = 0;
3549
3550 spin_lock_bh(&ar->data_lock);
3551
3552 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3553 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3554 ret = -ENOSPC;
3555 goto unlock;
3556 }
3557
3558 __skb_queue_tail(q, skb);
3559 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3560
3561 unlock:
3562 spin_unlock_bh(&ar->data_lock);
3563
3564 return ret;
3565 }
3566
3567 static enum ath10k_mac_tx_path
ath10k_mac_tx_h_get_txpath(struct ath10k * ar,struct sk_buff * skb,enum ath10k_hw_txrx_mode txmode)3568 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3569 struct sk_buff *skb,
3570 enum ath10k_hw_txrx_mode txmode)
3571 {
3572 switch (txmode) {
3573 case ATH10K_HW_TXRX_RAW:
3574 case ATH10K_HW_TXRX_NATIVE_WIFI:
3575 case ATH10K_HW_TXRX_ETHERNET:
3576 return ATH10K_MAC_TX_HTT;
3577 case ATH10K_HW_TXRX_MGMT:
3578 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3579 ar->running_fw->fw_file.fw_features))
3580 return ATH10K_MAC_TX_WMI_MGMT;
3581 else if (ar->htt.target_version_major >= 3)
3582 return ATH10K_MAC_TX_HTT;
3583 else
3584 return ATH10K_MAC_TX_HTT_MGMT;
3585 }
3586
3587 return ATH10K_MAC_TX_UNKNOWN;
3588 }
3589
ath10k_mac_tx_submit(struct ath10k * ar,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb)3590 static int ath10k_mac_tx_submit(struct ath10k *ar,
3591 enum ath10k_hw_txrx_mode txmode,
3592 enum ath10k_mac_tx_path txpath,
3593 struct sk_buff *skb)
3594 {
3595 struct ath10k_htt *htt = &ar->htt;
3596 int ret = -EINVAL;
3597
3598 switch (txpath) {
3599 case ATH10K_MAC_TX_HTT:
3600 ret = ath10k_htt_tx(htt, txmode, skb);
3601 break;
3602 case ATH10K_MAC_TX_HTT_MGMT:
3603 ret = ath10k_htt_mgmt_tx(htt, skb);
3604 break;
3605 case ATH10K_MAC_TX_WMI_MGMT:
3606 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3607 break;
3608 case ATH10K_MAC_TX_UNKNOWN:
3609 WARN_ON_ONCE(1);
3610 ret = -EINVAL;
3611 break;
3612 }
3613
3614 if (ret) {
3615 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3616 ret);
3617 ieee80211_free_txskb(ar->hw, skb);
3618 }
3619
3620 return ret;
3621 }
3622
3623 /* This function consumes the sk_buff regardless of return value as far as
3624 * caller is concerned so no freeing is necessary afterwards.
3625 */
ath10k_mac_tx(struct ath10k * ar,struct ieee80211_vif * vif,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb,bool noque_offchan)3626 static int ath10k_mac_tx(struct ath10k *ar,
3627 struct ieee80211_vif *vif,
3628 enum ath10k_hw_txrx_mode txmode,
3629 enum ath10k_mac_tx_path txpath,
3630 struct sk_buff *skb, bool noque_offchan)
3631 {
3632 struct ieee80211_hw *hw = ar->hw;
3633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3634 int ret;
3635
3636 /* We should disable CCK RATE due to P2P */
3637 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3638 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3639
3640 switch (txmode) {
3641 case ATH10K_HW_TXRX_MGMT:
3642 case ATH10K_HW_TXRX_NATIVE_WIFI:
3643 ath10k_tx_h_nwifi(hw, skb);
3644 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3645 ath10k_tx_h_seq_no(vif, skb);
3646 break;
3647 case ATH10K_HW_TXRX_ETHERNET:
3648 ath10k_tx_h_8023(skb);
3649 break;
3650 case ATH10K_HW_TXRX_RAW:
3651 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3652 WARN_ON_ONCE(1);
3653 ieee80211_free_txskb(hw, skb);
3654 return -ENOTSUPP;
3655 }
3656 }
3657
3658 if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3659 if (!ath10k_mac_tx_frm_has_freq(ar)) {
3660 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
3661 skb, skb->len);
3662
3663 skb_queue_tail(&ar->offchan_tx_queue, skb);
3664 ieee80211_queue_work(hw, &ar->offchan_tx_work);
3665 return 0;
3666 }
3667 }
3668
3669 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3670 if (ret) {
3671 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3672 return ret;
3673 }
3674
3675 return 0;
3676 }
3677
ath10k_offchan_tx_purge(struct ath10k * ar)3678 void ath10k_offchan_tx_purge(struct ath10k *ar)
3679 {
3680 struct sk_buff *skb;
3681
3682 for (;;) {
3683 skb = skb_dequeue(&ar->offchan_tx_queue);
3684 if (!skb)
3685 break;
3686
3687 ieee80211_free_txskb(ar->hw, skb);
3688 }
3689 }
3690
ath10k_offchan_tx_work(struct work_struct * work)3691 void ath10k_offchan_tx_work(struct work_struct *work)
3692 {
3693 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3694 struct ath10k_peer *peer;
3695 struct ath10k_vif *arvif;
3696 enum ath10k_hw_txrx_mode txmode;
3697 enum ath10k_mac_tx_path txpath;
3698 struct ieee80211_hdr *hdr;
3699 struct ieee80211_vif *vif;
3700 struct ieee80211_sta *sta;
3701 struct sk_buff *skb;
3702 const u8 *peer_addr;
3703 int vdev_id;
3704 int ret;
3705 unsigned long time_left;
3706 bool tmp_peer_created = false;
3707
3708 /* FW requirement: We must create a peer before FW will send out
3709 * an offchannel frame. Otherwise the frame will be stuck and
3710 * never transmitted. We delete the peer upon tx completion.
3711 * It is unlikely that a peer for offchannel tx will already be
3712 * present. However it may be in some rare cases so account for that.
3713 * Otherwise we might remove a legitimate peer and break stuff.
3714 */
3715
3716 for (;;) {
3717 skb = skb_dequeue(&ar->offchan_tx_queue);
3718 if (!skb)
3719 break;
3720
3721 mutex_lock(&ar->conf_mutex);
3722
3723 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
3724 skb, skb->len);
3725
3726 hdr = (struct ieee80211_hdr *)skb->data;
3727 peer_addr = ieee80211_get_DA(hdr);
3728
3729 spin_lock_bh(&ar->data_lock);
3730 vdev_id = ar->scan.vdev_id;
3731 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3732 spin_unlock_bh(&ar->data_lock);
3733
3734 if (peer)
3735 /* FIXME: should this use ath10k_warn()? */
3736 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3737 peer_addr, vdev_id);
3738
3739 if (!peer) {
3740 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3741 peer_addr,
3742 WMI_PEER_TYPE_DEFAULT);
3743 if (ret)
3744 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3745 peer_addr, vdev_id, ret);
3746 tmp_peer_created = (ret == 0);
3747 }
3748
3749 spin_lock_bh(&ar->data_lock);
3750 reinit_completion(&ar->offchan_tx_completed);
3751 ar->offchan_tx_skb = skb;
3752 spin_unlock_bh(&ar->data_lock);
3753
3754 /* It's safe to access vif and sta - conf_mutex guarantees that
3755 * sta_state() and remove_interface() are locked exclusively
3756 * out wrt to this offchannel worker.
3757 */
3758 arvif = ath10k_get_arvif(ar, vdev_id);
3759 if (arvif) {
3760 vif = arvif->vif;
3761 sta = ieee80211_find_sta(vif, peer_addr);
3762 } else {
3763 vif = NULL;
3764 sta = NULL;
3765 }
3766
3767 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3768 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3769
3770 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
3771 if (ret) {
3772 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3773 ret);
3774 /* not serious */
3775 }
3776
3777 time_left =
3778 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3779 if (time_left == 0)
3780 ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
3781 skb, skb->len);
3782
3783 if (!peer && tmp_peer_created) {
3784 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3785 if (ret)
3786 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3787 peer_addr, vdev_id, ret);
3788 }
3789
3790 mutex_unlock(&ar->conf_mutex);
3791 }
3792 }
3793
ath10k_mgmt_over_wmi_tx_purge(struct ath10k * ar)3794 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3795 {
3796 struct sk_buff *skb;
3797
3798 for (;;) {
3799 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3800 if (!skb)
3801 break;
3802
3803 ieee80211_free_txskb(ar->hw, skb);
3804 }
3805 }
3806
ath10k_mgmt_over_wmi_tx_work(struct work_struct * work)3807 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3808 {
3809 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3810 struct sk_buff *skb;
3811 int ret;
3812
3813 for (;;) {
3814 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3815 if (!skb)
3816 break;
3817
3818 ret = ath10k_wmi_mgmt_tx(ar, skb);
3819 if (ret) {
3820 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3821 ret);
3822 ieee80211_free_txskb(ar->hw, skb);
3823 }
3824 }
3825 }
3826
ath10k_mac_txq_init(struct ieee80211_txq * txq)3827 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3828 {
3829 struct ath10k_txq *artxq;
3830
3831 if (!txq)
3832 return;
3833
3834 artxq = (void *)txq->drv_priv;
3835 INIT_LIST_HEAD(&artxq->list);
3836 }
3837
ath10k_mac_txq_unref(struct ath10k * ar,struct ieee80211_txq * txq)3838 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3839 {
3840 struct ath10k_txq *artxq;
3841 struct ath10k_skb_cb *cb;
3842 struct sk_buff *msdu;
3843 int msdu_id;
3844
3845 if (!txq)
3846 return;
3847
3848 artxq = (void *)txq->drv_priv;
3849 spin_lock_bh(&ar->txqs_lock);
3850 if (!list_empty(&artxq->list))
3851 list_del_init(&artxq->list);
3852 spin_unlock_bh(&ar->txqs_lock);
3853
3854 spin_lock_bh(&ar->htt.tx_lock);
3855 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3856 cb = ATH10K_SKB_CB(msdu);
3857 if (cb->txq == txq)
3858 cb->txq = NULL;
3859 }
3860 spin_unlock_bh(&ar->htt.tx_lock);
3861 }
3862
ath10k_mac_txq_lookup(struct ath10k * ar,u16 peer_id,u8 tid)3863 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3864 u16 peer_id,
3865 u8 tid)
3866 {
3867 struct ath10k_peer *peer;
3868
3869 lockdep_assert_held(&ar->data_lock);
3870
3871 peer = ar->peer_map[peer_id];
3872 if (!peer)
3873 return NULL;
3874
3875 if (peer->removed)
3876 return NULL;
3877
3878 if (peer->sta)
3879 return peer->sta->txq[tid];
3880 else if (peer->vif)
3881 return peer->vif->txq;
3882 else
3883 return NULL;
3884 }
3885
ath10k_mac_tx_can_push(struct ieee80211_hw * hw,struct ieee80211_txq * txq)3886 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3887 struct ieee80211_txq *txq)
3888 {
3889 struct ath10k *ar = hw->priv;
3890 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3891
3892 /* No need to get locks */
3893
3894 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3895 return true;
3896
3897 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3898 return true;
3899
3900 if (artxq->num_fw_queued < artxq->num_push_allowed)
3901 return true;
3902
3903 return false;
3904 }
3905
ath10k_mac_tx_push_txq(struct ieee80211_hw * hw,struct ieee80211_txq * txq)3906 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3907 struct ieee80211_txq *txq)
3908 {
3909 struct ath10k *ar = hw->priv;
3910 struct ath10k_htt *htt = &ar->htt;
3911 struct ath10k_txq *artxq = (void *)txq->drv_priv;
3912 struct ieee80211_vif *vif = txq->vif;
3913 struct ieee80211_sta *sta = txq->sta;
3914 enum ath10k_hw_txrx_mode txmode;
3915 enum ath10k_mac_tx_path txpath;
3916 struct sk_buff *skb;
3917 struct ieee80211_hdr *hdr;
3918 size_t skb_len;
3919 bool is_mgmt, is_presp;
3920 int ret;
3921
3922 spin_lock_bh(&ar->htt.tx_lock);
3923 ret = ath10k_htt_tx_inc_pending(htt);
3924 spin_unlock_bh(&ar->htt.tx_lock);
3925
3926 if (ret)
3927 return ret;
3928
3929 skb = ieee80211_tx_dequeue(hw, txq);
3930 if (!skb) {
3931 spin_lock_bh(&ar->htt.tx_lock);
3932 ath10k_htt_tx_dec_pending(htt);
3933 spin_unlock_bh(&ar->htt.tx_lock);
3934
3935 return -ENOENT;
3936 }
3937
3938 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3939
3940 skb_len = skb->len;
3941 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3942 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3943 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3944
3945 if (is_mgmt) {
3946 hdr = (struct ieee80211_hdr *)skb->data;
3947 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3948
3949 spin_lock_bh(&ar->htt.tx_lock);
3950 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3951
3952 if (ret) {
3953 ath10k_htt_tx_dec_pending(htt);
3954 spin_unlock_bh(&ar->htt.tx_lock);
3955 return ret;
3956 }
3957 spin_unlock_bh(&ar->htt.tx_lock);
3958 }
3959
3960 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
3961 if (unlikely(ret)) {
3962 ath10k_warn(ar, "failed to push frame: %d\n", ret);
3963
3964 spin_lock_bh(&ar->htt.tx_lock);
3965 ath10k_htt_tx_dec_pending(htt);
3966 if (is_mgmt)
3967 ath10k_htt_tx_mgmt_dec_pending(htt);
3968 spin_unlock_bh(&ar->htt.tx_lock);
3969
3970 return ret;
3971 }
3972
3973 spin_lock_bh(&ar->htt.tx_lock);
3974 artxq->num_fw_queued++;
3975 spin_unlock_bh(&ar->htt.tx_lock);
3976
3977 return skb_len;
3978 }
3979
ath10k_mac_tx_push_pending(struct ath10k * ar)3980 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3981 {
3982 struct ieee80211_hw *hw = ar->hw;
3983 struct ieee80211_txq *txq;
3984 struct ath10k_txq *artxq;
3985 struct ath10k_txq *last;
3986 int ret;
3987 int max;
3988
3989 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3990 return;
3991
3992 spin_lock_bh(&ar->txqs_lock);
3993 rcu_read_lock();
3994
3995 last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3996 while (!list_empty(&ar->txqs)) {
3997 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3998 txq = container_of((void *)artxq, struct ieee80211_txq,
3999 drv_priv);
4000
4001 /* Prevent aggressive sta/tid taking over tx queue */
4002 max = 16;
4003 ret = 0;
4004 while (ath10k_mac_tx_can_push(hw, txq) && max--) {
4005 ret = ath10k_mac_tx_push_txq(hw, txq);
4006 if (ret < 0)
4007 break;
4008 }
4009
4010 list_del_init(&artxq->list);
4011 if (ret != -ENOENT)
4012 list_add_tail(&artxq->list, &ar->txqs);
4013
4014 ath10k_htt_tx_txq_update(hw, txq);
4015
4016 if (artxq == last || (ret < 0 && ret != -ENOENT))
4017 break;
4018 }
4019
4020 rcu_read_unlock();
4021 spin_unlock_bh(&ar->txqs_lock);
4022 }
4023 EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
4024
4025 /************/
4026 /* Scanning */
4027 /************/
4028
__ath10k_scan_finish(struct ath10k * ar)4029 void __ath10k_scan_finish(struct ath10k *ar)
4030 {
4031 lockdep_assert_held(&ar->data_lock);
4032
4033 switch (ar->scan.state) {
4034 case ATH10K_SCAN_IDLE:
4035 break;
4036 case ATH10K_SCAN_RUNNING:
4037 case ATH10K_SCAN_ABORTING:
4038 if (!ar->scan.is_roc) {
4039 struct cfg80211_scan_info info = {
4040 .aborted = (ar->scan.state ==
4041 ATH10K_SCAN_ABORTING),
4042 };
4043
4044 ieee80211_scan_completed(ar->hw, &info);
4045 } else if (ar->scan.roc_notify) {
4046 ieee80211_remain_on_channel_expired(ar->hw);
4047 }
4048 /* fall through */
4049 case ATH10K_SCAN_STARTING:
4050 ar->scan.state = ATH10K_SCAN_IDLE;
4051 ar->scan_channel = NULL;
4052 ar->scan.roc_freq = 0;
4053 ath10k_offchan_tx_purge(ar);
4054 cancel_delayed_work(&ar->scan.timeout);
4055 complete(&ar->scan.completed);
4056 break;
4057 }
4058 }
4059
ath10k_scan_finish(struct ath10k * ar)4060 void ath10k_scan_finish(struct ath10k *ar)
4061 {
4062 spin_lock_bh(&ar->data_lock);
4063 __ath10k_scan_finish(ar);
4064 spin_unlock_bh(&ar->data_lock);
4065 }
4066
ath10k_scan_stop(struct ath10k * ar)4067 static int ath10k_scan_stop(struct ath10k *ar)
4068 {
4069 struct wmi_stop_scan_arg arg = {
4070 .req_id = 1, /* FIXME */
4071 .req_type = WMI_SCAN_STOP_ONE,
4072 .u.scan_id = ATH10K_SCAN_ID,
4073 };
4074 int ret;
4075
4076 lockdep_assert_held(&ar->conf_mutex);
4077
4078 ret = ath10k_wmi_stop_scan(ar, &arg);
4079 if (ret) {
4080 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
4081 goto out;
4082 }
4083
4084 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
4085 if (ret == 0) {
4086 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
4087 ret = -ETIMEDOUT;
4088 } else if (ret > 0) {
4089 ret = 0;
4090 }
4091
4092 out:
4093 /* Scan state should be updated upon scan completion but in case
4094 * firmware fails to deliver the event (for whatever reason) it is
4095 * desired to clean up scan state anyway. Firmware may have just
4096 * dropped the scan completion event delivery due to transport pipe
4097 * being overflown with data and/or it can recover on its own before
4098 * next scan request is submitted.
4099 */
4100 spin_lock_bh(&ar->data_lock);
4101 if (ar->scan.state != ATH10K_SCAN_IDLE)
4102 __ath10k_scan_finish(ar);
4103 spin_unlock_bh(&ar->data_lock);
4104
4105 return ret;
4106 }
4107
ath10k_scan_abort(struct ath10k * ar)4108 static void ath10k_scan_abort(struct ath10k *ar)
4109 {
4110 int ret;
4111
4112 lockdep_assert_held(&ar->conf_mutex);
4113
4114 spin_lock_bh(&ar->data_lock);
4115
4116 switch (ar->scan.state) {
4117 case ATH10K_SCAN_IDLE:
4118 /* This can happen if timeout worker kicked in and called
4119 * abortion while scan completion was being processed.
4120 */
4121 break;
4122 case ATH10K_SCAN_STARTING:
4123 case ATH10K_SCAN_ABORTING:
4124 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
4125 ath10k_scan_state_str(ar->scan.state),
4126 ar->scan.state);
4127 break;
4128 case ATH10K_SCAN_RUNNING:
4129 ar->scan.state = ATH10K_SCAN_ABORTING;
4130 spin_unlock_bh(&ar->data_lock);
4131
4132 ret = ath10k_scan_stop(ar);
4133 if (ret)
4134 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
4135
4136 spin_lock_bh(&ar->data_lock);
4137 break;
4138 }
4139
4140 spin_unlock_bh(&ar->data_lock);
4141 }
4142
ath10k_scan_timeout_work(struct work_struct * work)4143 void ath10k_scan_timeout_work(struct work_struct *work)
4144 {
4145 struct ath10k *ar = container_of(work, struct ath10k,
4146 scan.timeout.work);
4147
4148 mutex_lock(&ar->conf_mutex);
4149 ath10k_scan_abort(ar);
4150 mutex_unlock(&ar->conf_mutex);
4151 }
4152
ath10k_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)4153 static int ath10k_start_scan(struct ath10k *ar,
4154 const struct wmi_start_scan_arg *arg)
4155 {
4156 int ret;
4157
4158 lockdep_assert_held(&ar->conf_mutex);
4159
4160 ret = ath10k_wmi_start_scan(ar, arg);
4161 if (ret)
4162 return ret;
4163
4164 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4165 if (ret == 0) {
4166 ret = ath10k_scan_stop(ar);
4167 if (ret)
4168 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4169
4170 return -ETIMEDOUT;
4171 }
4172
4173 /* If we failed to start the scan, return error code at
4174 * this point. This is probably due to some issue in the
4175 * firmware, but no need to wedge the driver due to that...
4176 */
4177 spin_lock_bh(&ar->data_lock);
4178 if (ar->scan.state == ATH10K_SCAN_IDLE) {
4179 spin_unlock_bh(&ar->data_lock);
4180 return -EINVAL;
4181 }
4182 spin_unlock_bh(&ar->data_lock);
4183
4184 return 0;
4185 }
4186
4187 /**********************/
4188 /* mac80211 callbacks */
4189 /**********************/
4190
ath10k_mac_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)4191 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4192 struct ieee80211_tx_control *control,
4193 struct sk_buff *skb)
4194 {
4195 struct ath10k *ar = hw->priv;
4196 struct ath10k_htt *htt = &ar->htt;
4197 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4198 struct ieee80211_vif *vif = info->control.vif;
4199 struct ieee80211_sta *sta = control->sta;
4200 struct ieee80211_txq *txq = NULL;
4201 struct ieee80211_hdr *hdr = (void *)skb->data;
4202 enum ath10k_hw_txrx_mode txmode;
4203 enum ath10k_mac_tx_path txpath;
4204 bool is_htt;
4205 bool is_mgmt;
4206 bool is_presp;
4207 int ret;
4208
4209 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4210
4211 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4212 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4213 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4214 txpath == ATH10K_MAC_TX_HTT_MGMT);
4215 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4216
4217 if (is_htt) {
4218 spin_lock_bh(&ar->htt.tx_lock);
4219 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4220
4221 ret = ath10k_htt_tx_inc_pending(htt);
4222 if (ret) {
4223 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4224 ret);
4225 spin_unlock_bh(&ar->htt.tx_lock);
4226 ieee80211_free_txskb(ar->hw, skb);
4227 return;
4228 }
4229
4230 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4231 if (ret) {
4232 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4233 ret);
4234 ath10k_htt_tx_dec_pending(htt);
4235 spin_unlock_bh(&ar->htt.tx_lock);
4236 ieee80211_free_txskb(ar->hw, skb);
4237 return;
4238 }
4239 spin_unlock_bh(&ar->htt.tx_lock);
4240 }
4241
4242 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
4243 if (ret) {
4244 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4245 if (is_htt) {
4246 spin_lock_bh(&ar->htt.tx_lock);
4247 ath10k_htt_tx_dec_pending(htt);
4248 if (is_mgmt)
4249 ath10k_htt_tx_mgmt_dec_pending(htt);
4250 spin_unlock_bh(&ar->htt.tx_lock);
4251 }
4252 return;
4253 }
4254 }
4255
ath10k_mac_op_wake_tx_queue(struct ieee80211_hw * hw,struct ieee80211_txq * txq)4256 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4257 struct ieee80211_txq *txq)
4258 {
4259 struct ath10k *ar = hw->priv;
4260 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4261 struct ieee80211_txq *f_txq;
4262 struct ath10k_txq *f_artxq;
4263 int ret = 0;
4264 int max = 16;
4265
4266 spin_lock_bh(&ar->txqs_lock);
4267 if (list_empty(&artxq->list))
4268 list_add_tail(&artxq->list, &ar->txqs);
4269
4270 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4271 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4272 list_del_init(&f_artxq->list);
4273
4274 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4275 ret = ath10k_mac_tx_push_txq(hw, f_txq);
4276 if (ret)
4277 break;
4278 }
4279 if (ret != -ENOENT)
4280 list_add_tail(&f_artxq->list, &ar->txqs);
4281 spin_unlock_bh(&ar->txqs_lock);
4282
4283 ath10k_htt_tx_txq_update(hw, f_txq);
4284 ath10k_htt_tx_txq_update(hw, txq);
4285 }
4286
4287 /* Must not be called with conf_mutex held as workers can use that also. */
ath10k_drain_tx(struct ath10k * ar)4288 void ath10k_drain_tx(struct ath10k *ar)
4289 {
4290 /* make sure rcu-protected mac80211 tx path itself is drained */
4291 synchronize_net();
4292
4293 ath10k_offchan_tx_purge(ar);
4294 ath10k_mgmt_over_wmi_tx_purge(ar);
4295
4296 cancel_work_sync(&ar->offchan_tx_work);
4297 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4298 }
4299
ath10k_halt(struct ath10k * ar)4300 void ath10k_halt(struct ath10k *ar)
4301 {
4302 struct ath10k_vif *arvif;
4303
4304 lockdep_assert_held(&ar->conf_mutex);
4305
4306 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4307 ar->filter_flags = 0;
4308 ar->monitor = false;
4309 ar->monitor_arvif = NULL;
4310
4311 if (ar->monitor_started)
4312 ath10k_monitor_stop(ar);
4313
4314 ar->monitor_started = false;
4315 ar->tx_paused = 0;
4316
4317 ath10k_scan_finish(ar);
4318 ath10k_peer_cleanup_all(ar);
4319 ath10k_core_stop(ar);
4320 ath10k_hif_power_down(ar);
4321
4322 spin_lock_bh(&ar->data_lock);
4323 list_for_each_entry(arvif, &ar->arvifs, list)
4324 ath10k_mac_vif_beacon_cleanup(arvif);
4325 spin_unlock_bh(&ar->data_lock);
4326 }
4327
ath10k_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)4328 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4329 {
4330 struct ath10k *ar = hw->priv;
4331
4332 mutex_lock(&ar->conf_mutex);
4333
4334 *tx_ant = ar->cfg_tx_chainmask;
4335 *rx_ant = ar->cfg_rx_chainmask;
4336
4337 mutex_unlock(&ar->conf_mutex);
4338
4339 return 0;
4340 }
4341
ath10k_check_chain_mask(struct ath10k * ar,u32 cm,const char * dbg)4342 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4343 {
4344 /* It is not clear that allowing gaps in chainmask
4345 * is helpful. Probably it will not do what user
4346 * is hoping for, so warn in that case.
4347 */
4348 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4349 return;
4350
4351 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4352 dbg, cm);
4353 }
4354
ath10k_mac_get_vht_cap_bf_sts(struct ath10k * ar)4355 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4356 {
4357 int nsts = ar->vht_cap_info;
4358
4359 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4360 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4361
4362 /* If firmware does not deliver to host number of space-time
4363 * streams supported, assume it support up to 4 BF STS and return
4364 * the value for VHT CAP: nsts-1)
4365 */
4366 if (nsts == 0)
4367 return 3;
4368
4369 return nsts;
4370 }
4371
ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k * ar)4372 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4373 {
4374 int sound_dim = ar->vht_cap_info;
4375
4376 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4377 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4378
4379 /* If the sounding dimension is not advertised by the firmware,
4380 * let's use a default value of 1
4381 */
4382 if (sound_dim == 0)
4383 return 1;
4384
4385 return sound_dim;
4386 }
4387
ath10k_create_vht_cap(struct ath10k * ar)4388 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4389 {
4390 struct ieee80211_sta_vht_cap vht_cap = {0};
4391 struct ath10k_hw_params *hw = &ar->hw_params;
4392 u16 mcs_map;
4393 u32 val;
4394 int i;
4395
4396 vht_cap.vht_supported = 1;
4397 vht_cap.cap = ar->vht_cap_info;
4398
4399 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4400 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4401 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4402 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4403 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4404
4405 vht_cap.cap |= val;
4406 }
4407
4408 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4409 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4410 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4411 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4412 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4413
4414 vht_cap.cap |= val;
4415 }
4416
4417 /* Currently the firmware seems to be buggy, don't enable 80+80
4418 * mode until that's resolved.
4419 */
4420 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
4421 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
4422 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
4423
4424 mcs_map = 0;
4425 for (i = 0; i < 8; i++) {
4426 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4427 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4428 else
4429 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4430 }
4431
4432 if (ar->cfg_tx_chainmask <= 1)
4433 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4434
4435 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4436 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4437
4438 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
4439 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
4440 * user-space a clue if that is the case.
4441 */
4442 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
4443 (hw->vht160_mcs_rx_highest != 0 ||
4444 hw->vht160_mcs_tx_highest != 0)) {
4445 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
4446 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
4447 }
4448
4449 return vht_cap;
4450 }
4451
ath10k_get_ht_cap(struct ath10k * ar)4452 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4453 {
4454 int i;
4455 struct ieee80211_sta_ht_cap ht_cap = {0};
4456
4457 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4458 return ht_cap;
4459
4460 ht_cap.ht_supported = 1;
4461 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4462 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4463 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4464 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4465 ht_cap.cap |=
4466 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4467
4468 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4469 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4470
4471 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4472 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4473
4474 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4475 u32 smps;
4476
4477 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4478 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4479
4480 ht_cap.cap |= smps;
4481 }
4482
4483 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4484 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4485
4486 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4487 u32 stbc;
4488
4489 stbc = ar->ht_cap_info;
4490 stbc &= WMI_HT_CAP_RX_STBC;
4491 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4492 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4493 stbc &= IEEE80211_HT_CAP_RX_STBC;
4494
4495 ht_cap.cap |= stbc;
4496 }
4497
4498 if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4499 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4500
4501 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4502 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4503
4504 /* max AMSDU is implicitly taken from vht_cap_info */
4505 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4506 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4507
4508 for (i = 0; i < ar->num_rf_chains; i++) {
4509 if (ar->cfg_rx_chainmask & BIT(i))
4510 ht_cap.mcs.rx_mask[i] = 0xFF;
4511 }
4512
4513 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4514
4515 return ht_cap;
4516 }
4517
ath10k_mac_setup_ht_vht_cap(struct ath10k * ar)4518 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4519 {
4520 struct ieee80211_supported_band *band;
4521 struct ieee80211_sta_vht_cap vht_cap;
4522 struct ieee80211_sta_ht_cap ht_cap;
4523
4524 ht_cap = ath10k_get_ht_cap(ar);
4525 vht_cap = ath10k_create_vht_cap(ar);
4526
4527 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4528 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4529 band->ht_cap = ht_cap;
4530 }
4531 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4532 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4533 band->ht_cap = ht_cap;
4534 band->vht_cap = vht_cap;
4535 }
4536 }
4537
__ath10k_set_antenna(struct ath10k * ar,u32 tx_ant,u32 rx_ant)4538 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4539 {
4540 int ret;
4541
4542 lockdep_assert_held(&ar->conf_mutex);
4543
4544 ath10k_check_chain_mask(ar, tx_ant, "tx");
4545 ath10k_check_chain_mask(ar, rx_ant, "rx");
4546
4547 ar->cfg_tx_chainmask = tx_ant;
4548 ar->cfg_rx_chainmask = rx_ant;
4549
4550 if ((ar->state != ATH10K_STATE_ON) &&
4551 (ar->state != ATH10K_STATE_RESTARTED))
4552 return 0;
4553
4554 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4555 tx_ant);
4556 if (ret) {
4557 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4558 ret, tx_ant);
4559 return ret;
4560 }
4561
4562 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4563 rx_ant);
4564 if (ret) {
4565 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4566 ret, rx_ant);
4567 return ret;
4568 }
4569
4570 /* Reload HT/VHT capability */
4571 ath10k_mac_setup_ht_vht_cap(ar);
4572
4573 return 0;
4574 }
4575
ath10k_set_antenna(struct ieee80211_hw * hw,u32 tx_ant,u32 rx_ant)4576 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4577 {
4578 struct ath10k *ar = hw->priv;
4579 int ret;
4580
4581 mutex_lock(&ar->conf_mutex);
4582 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4583 mutex_unlock(&ar->conf_mutex);
4584 return ret;
4585 }
4586
ath10k_start(struct ieee80211_hw * hw)4587 static int ath10k_start(struct ieee80211_hw *hw)
4588 {
4589 struct ath10k *ar = hw->priv;
4590 u32 param;
4591 int ret = 0;
4592
4593 /*
4594 * This makes sense only when restarting hw. It is harmless to call
4595 * unconditionally. This is necessary to make sure no HTT/WMI tx
4596 * commands will be submitted while restarting.
4597 */
4598 ath10k_drain_tx(ar);
4599
4600 mutex_lock(&ar->conf_mutex);
4601
4602 switch (ar->state) {
4603 case ATH10K_STATE_OFF:
4604 ar->state = ATH10K_STATE_ON;
4605 break;
4606 case ATH10K_STATE_RESTARTING:
4607 ar->state = ATH10K_STATE_RESTARTED;
4608 break;
4609 case ATH10K_STATE_ON:
4610 case ATH10K_STATE_RESTARTED:
4611 case ATH10K_STATE_WEDGED:
4612 WARN_ON(1);
4613 ret = -EINVAL;
4614 goto err;
4615 case ATH10K_STATE_UTF:
4616 ret = -EBUSY;
4617 goto err;
4618 }
4619
4620 ret = ath10k_hif_power_up(ar);
4621 if (ret) {
4622 ath10k_err(ar, "Could not init hif: %d\n", ret);
4623 goto err_off;
4624 }
4625
4626 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4627 &ar->normal_mode_fw);
4628 if (ret) {
4629 ath10k_err(ar, "Could not init core: %d\n", ret);
4630 goto err_power_down;
4631 }
4632
4633 param = ar->wmi.pdev_param->pmf_qos;
4634 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4635 if (ret) {
4636 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4637 goto err_core_stop;
4638 }
4639
4640 param = ar->wmi.pdev_param->dynamic_bw;
4641 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4642 if (ret) {
4643 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4644 goto err_core_stop;
4645 }
4646
4647 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4648 ret = ath10k_wmi_adaptive_qcs(ar, true);
4649 if (ret) {
4650 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4651 ret);
4652 goto err_core_stop;
4653 }
4654 }
4655
4656 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4657 param = ar->wmi.pdev_param->burst_enable;
4658 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4659 if (ret) {
4660 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4661 goto err_core_stop;
4662 }
4663 }
4664
4665 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4666
4667 /*
4668 * By default FW set ARP frames ac to voice (6). In that case ARP
4669 * exchange is not working properly for UAPSD enabled AP. ARP requests
4670 * which arrives with access category 0 are processed by network stack
4671 * and send back with access category 0, but FW changes access category
4672 * to 6. Set ARP frames access category to best effort (0) solves
4673 * this problem.
4674 */
4675
4676 param = ar->wmi.pdev_param->arp_ac_override;
4677 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4678 if (ret) {
4679 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4680 ret);
4681 goto err_core_stop;
4682 }
4683
4684 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4685 ar->running_fw->fw_file.fw_features)) {
4686 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4687 WMI_CCA_DETECT_LEVEL_AUTO,
4688 WMI_CCA_DETECT_MARGIN_AUTO);
4689 if (ret) {
4690 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4691 ret);
4692 goto err_core_stop;
4693 }
4694 }
4695
4696 param = ar->wmi.pdev_param->ani_enable;
4697 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4698 if (ret) {
4699 ath10k_warn(ar, "failed to enable ani by default: %d\n",
4700 ret);
4701 goto err_core_stop;
4702 }
4703
4704 ar->ani_enabled = true;
4705
4706 if (ath10k_peer_stats_enabled(ar)) {
4707 param = ar->wmi.pdev_param->peer_stats_update_period;
4708 ret = ath10k_wmi_pdev_set_param(ar, param,
4709 PEER_DEFAULT_STATS_UPDATE_PERIOD);
4710 if (ret) {
4711 ath10k_warn(ar,
4712 "failed to set peer stats period : %d\n",
4713 ret);
4714 goto err_core_stop;
4715 }
4716 }
4717
4718 param = ar->wmi.pdev_param->enable_btcoex;
4719 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4720 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4721 ar->running_fw->fw_file.fw_features)) {
4722 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4723 if (ret) {
4724 ath10k_warn(ar,
4725 "failed to set btcoex param: %d\n", ret);
4726 goto err_core_stop;
4727 }
4728 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4729 }
4730
4731 ar->num_started_vdevs = 0;
4732 ath10k_regd_update(ar);
4733
4734 ath10k_spectral_start(ar);
4735 ath10k_thermal_set_throttling(ar);
4736
4737 mutex_unlock(&ar->conf_mutex);
4738 return 0;
4739
4740 err_core_stop:
4741 ath10k_core_stop(ar);
4742
4743 err_power_down:
4744 ath10k_hif_power_down(ar);
4745
4746 err_off:
4747 ar->state = ATH10K_STATE_OFF;
4748
4749 err:
4750 mutex_unlock(&ar->conf_mutex);
4751 return ret;
4752 }
4753
ath10k_stop(struct ieee80211_hw * hw)4754 static void ath10k_stop(struct ieee80211_hw *hw)
4755 {
4756 struct ath10k *ar = hw->priv;
4757
4758 ath10k_drain_tx(ar);
4759
4760 mutex_lock(&ar->conf_mutex);
4761 if (ar->state != ATH10K_STATE_OFF) {
4762 ath10k_halt(ar);
4763 ar->state = ATH10K_STATE_OFF;
4764 }
4765 mutex_unlock(&ar->conf_mutex);
4766
4767 cancel_work_sync(&ar->set_coverage_class_work);
4768 cancel_delayed_work_sync(&ar->scan.timeout);
4769 cancel_work_sync(&ar->restart_work);
4770 }
4771
ath10k_config_ps(struct ath10k * ar)4772 static int ath10k_config_ps(struct ath10k *ar)
4773 {
4774 struct ath10k_vif *arvif;
4775 int ret = 0;
4776
4777 lockdep_assert_held(&ar->conf_mutex);
4778
4779 list_for_each_entry(arvif, &ar->arvifs, list) {
4780 ret = ath10k_mac_vif_setup_ps(arvif);
4781 if (ret) {
4782 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4783 break;
4784 }
4785 }
4786
4787 return ret;
4788 }
4789
ath10k_mac_txpower_setup(struct ath10k * ar,int txpower)4790 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4791 {
4792 int ret;
4793 u32 param;
4794
4795 lockdep_assert_held(&ar->conf_mutex);
4796
4797 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4798
4799 param = ar->wmi.pdev_param->txpower_limit2g;
4800 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4801 if (ret) {
4802 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4803 txpower, ret);
4804 return ret;
4805 }
4806
4807 param = ar->wmi.pdev_param->txpower_limit5g;
4808 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4809 if (ret) {
4810 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4811 txpower, ret);
4812 return ret;
4813 }
4814
4815 return 0;
4816 }
4817
ath10k_mac_txpower_recalc(struct ath10k * ar)4818 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4819 {
4820 struct ath10k_vif *arvif;
4821 int ret, txpower = -1;
4822
4823 lockdep_assert_held(&ar->conf_mutex);
4824
4825 list_for_each_entry(arvif, &ar->arvifs, list) {
4826 if (arvif->txpower <= 0)
4827 continue;
4828
4829 if (txpower == -1)
4830 txpower = arvif->txpower;
4831 else
4832 txpower = min(txpower, arvif->txpower);
4833 }
4834
4835 if (txpower == -1)
4836 return 0;
4837
4838 ret = ath10k_mac_txpower_setup(ar, txpower);
4839 if (ret) {
4840 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4841 txpower, ret);
4842 return ret;
4843 }
4844
4845 return 0;
4846 }
4847
ath10k_config(struct ieee80211_hw * hw,u32 changed)4848 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4849 {
4850 struct ath10k *ar = hw->priv;
4851 struct ieee80211_conf *conf = &hw->conf;
4852 int ret = 0;
4853
4854 mutex_lock(&ar->conf_mutex);
4855
4856 if (changed & IEEE80211_CONF_CHANGE_PS)
4857 ath10k_config_ps(ar);
4858
4859 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4860 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4861 ret = ath10k_monitor_recalc(ar);
4862 if (ret)
4863 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4864 }
4865
4866 mutex_unlock(&ar->conf_mutex);
4867 return ret;
4868 }
4869
get_nss_from_chainmask(u16 chain_mask)4870 static u32 get_nss_from_chainmask(u16 chain_mask)
4871 {
4872 if ((chain_mask & 0xf) == 0xf)
4873 return 4;
4874 else if ((chain_mask & 0x7) == 0x7)
4875 return 3;
4876 else if ((chain_mask & 0x3) == 0x3)
4877 return 2;
4878 return 1;
4879 }
4880
ath10k_mac_set_txbf_conf(struct ath10k_vif * arvif)4881 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4882 {
4883 u32 value = 0;
4884 struct ath10k *ar = arvif->ar;
4885 int nsts;
4886 int sound_dim;
4887
4888 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4889 return 0;
4890
4891 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4892 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4893 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4894 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4895
4896 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4897 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4898 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4899 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4900
4901 if (!value)
4902 return 0;
4903
4904 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4905 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4906
4907 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4908 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4909 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4910
4911 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4912 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4913
4914 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4915 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4916 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4917
4918 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4919 ar->wmi.vdev_param->txbf, value);
4920 }
4921
4922 /*
4923 * TODO:
4924 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4925 * because we will send mgmt frames without CCK. This requirement
4926 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4927 * in the TX packet.
4928 */
ath10k_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4929 static int ath10k_add_interface(struct ieee80211_hw *hw,
4930 struct ieee80211_vif *vif)
4931 {
4932 struct ath10k *ar = hw->priv;
4933 struct ath10k_vif *arvif = (void *)vif->drv_priv;
4934 struct ath10k_peer *peer;
4935 enum wmi_sta_powersave_param param;
4936 int ret = 0;
4937 u32 value;
4938 int bit;
4939 int i;
4940 u32 vdev_param;
4941
4942 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4943
4944 mutex_lock(&ar->conf_mutex);
4945
4946 memset(arvif, 0, sizeof(*arvif));
4947 ath10k_mac_txq_init(vif->txq);
4948
4949 arvif->ar = ar;
4950 arvif->vif = vif;
4951
4952 INIT_LIST_HEAD(&arvif->list);
4953 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4954 INIT_DELAYED_WORK(&arvif->connection_loss_work,
4955 ath10k_mac_vif_sta_connection_loss_work);
4956
4957 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4958 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4959 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4960 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4961 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4962 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4963 }
4964
4965 if (ar->num_peers >= ar->max_num_peers) {
4966 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4967 ret = -ENOBUFS;
4968 goto err;
4969 }
4970
4971 if (ar->free_vdev_map == 0) {
4972 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4973 ret = -EBUSY;
4974 goto err;
4975 }
4976 bit = __ffs64(ar->free_vdev_map);
4977
4978 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4979 bit, ar->free_vdev_map);
4980
4981 arvif->vdev_id = bit;
4982 arvif->vdev_subtype =
4983 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4984
4985 switch (vif->type) {
4986 case NL80211_IFTYPE_P2P_DEVICE:
4987 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4988 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4989 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4990 break;
4991 case NL80211_IFTYPE_UNSPECIFIED:
4992 case NL80211_IFTYPE_STATION:
4993 arvif->vdev_type = WMI_VDEV_TYPE_STA;
4994 if (vif->p2p)
4995 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4996 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4997 break;
4998 case NL80211_IFTYPE_ADHOC:
4999 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
5000 break;
5001 case NL80211_IFTYPE_MESH_POINT:
5002 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
5003 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5004 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
5005 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5006 ret = -EINVAL;
5007 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
5008 goto err;
5009 }
5010 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5011 break;
5012 case NL80211_IFTYPE_AP:
5013 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5014
5015 if (vif->p2p)
5016 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5017 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
5018 break;
5019 case NL80211_IFTYPE_MONITOR:
5020 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
5021 break;
5022 default:
5023 WARN_ON(1);
5024 break;
5025 }
5026
5027 /* Using vdev_id as queue number will make it very easy to do per-vif
5028 * tx queue locking. This shouldn't wrap due to interface combinations
5029 * but do a modulo for correctness sake and prevent using offchannel tx
5030 * queues for regular vif tx.
5031 */
5032 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5033 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
5034 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5035
5036 /* Some firmware revisions don't wait for beacon tx completion before
5037 * sending another SWBA event. This could lead to hardware using old
5038 * (freed) beacon data in some cases, e.g. tx credit starvation
5039 * combined with missed TBTT. This is very very rare.
5040 *
5041 * On non-IOMMU-enabled hosts this could be a possible security issue
5042 * because hw could beacon some random data on the air. On
5043 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
5044 * device would crash.
5045 *
5046 * Since there are no beacon tx completions (implicit nor explicit)
5047 * propagated to host the only workaround for this is to allocate a
5048 * DMA-coherent buffer for a lifetime of a vif and use it for all
5049 * beacon tx commands. Worst case for this approach is some beacons may
5050 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
5051 */
5052 if (vif->type == NL80211_IFTYPE_ADHOC ||
5053 vif->type == NL80211_IFTYPE_MESH_POINT ||
5054 vif->type == NL80211_IFTYPE_AP) {
5055 arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
5056 IEEE80211_MAX_FRAME_LEN,
5057 &arvif->beacon_paddr,
5058 GFP_ATOMIC);
5059 if (!arvif->beacon_buf) {
5060 ret = -ENOMEM;
5061 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
5062 ret);
5063 goto err;
5064 }
5065 }
5066 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
5067 arvif->nohwcrypt = true;
5068
5069 if (arvif->nohwcrypt &&
5070 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5071 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
5072 goto err;
5073 }
5074
5075 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
5076 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
5077 arvif->beacon_buf ? "single-buf" : "per-skb");
5078
5079 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
5080 arvif->vdev_subtype, vif->addr);
5081 if (ret) {
5082 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
5083 arvif->vdev_id, ret);
5084 goto err;
5085 }
5086
5087 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
5088 spin_lock_bh(&ar->data_lock);
5089 list_add(&arvif->list, &ar->arvifs);
5090 spin_unlock_bh(&ar->data_lock);
5091
5092 /* It makes no sense to have firmware do keepalives. mac80211 already
5093 * takes care of this with idle connection polling.
5094 */
5095 ret = ath10k_mac_vif_disable_keepalive(arvif);
5096 if (ret) {
5097 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
5098 arvif->vdev_id, ret);
5099 goto err_vdev_delete;
5100 }
5101
5102 arvif->def_wep_key_idx = -1;
5103
5104 vdev_param = ar->wmi.vdev_param->tx_encap_type;
5105 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5106 ATH10K_HW_TXRX_NATIVE_WIFI);
5107 /* 10.X firmware does not support this VDEV parameter. Do not warn */
5108 if (ret && ret != -EOPNOTSUPP) {
5109 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
5110 arvif->vdev_id, ret);
5111 goto err_vdev_delete;
5112 }
5113
5114 /* Configuring number of spatial stream for monitor interface is causing
5115 * target assert in qca9888 and qca6174.
5116 */
5117 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
5118 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
5119
5120 vdev_param = ar->wmi.vdev_param->nss;
5121 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5122 nss);
5123 if (ret) {
5124 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
5125 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
5126 ret);
5127 goto err_vdev_delete;
5128 }
5129 }
5130
5131 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5132 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5133 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
5134 vif->addr, WMI_PEER_TYPE_DEFAULT);
5135 if (ret) {
5136 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
5137 arvif->vdev_id, ret);
5138 goto err_vdev_delete;
5139 }
5140
5141 spin_lock_bh(&ar->data_lock);
5142
5143 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
5144 if (!peer) {
5145 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5146 vif->addr, arvif->vdev_id);
5147 spin_unlock_bh(&ar->data_lock);
5148 ret = -ENOENT;
5149 goto err_peer_delete;
5150 }
5151
5152 arvif->peer_id = find_first_bit(peer->peer_ids,
5153 ATH10K_MAX_NUM_PEER_IDS);
5154
5155 spin_unlock_bh(&ar->data_lock);
5156 } else {
5157 arvif->peer_id = HTT_INVALID_PEERID;
5158 }
5159
5160 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5161 ret = ath10k_mac_set_kickout(arvif);
5162 if (ret) {
5163 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5164 arvif->vdev_id, ret);
5165 goto err_peer_delete;
5166 }
5167 }
5168
5169 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5170 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5171 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5172 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5173 param, value);
5174 if (ret) {
5175 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5176 arvif->vdev_id, ret);
5177 goto err_peer_delete;
5178 }
5179
5180 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5181 if (ret) {
5182 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5183 arvif->vdev_id, ret);
5184 goto err_peer_delete;
5185 }
5186
5187 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5188 if (ret) {
5189 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5190 arvif->vdev_id, ret);
5191 goto err_peer_delete;
5192 }
5193 }
5194
5195 ret = ath10k_mac_set_txbf_conf(arvif);
5196 if (ret) {
5197 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5198 arvif->vdev_id, ret);
5199 goto err_peer_delete;
5200 }
5201
5202 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5203 if (ret) {
5204 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5205 arvif->vdev_id, ret);
5206 goto err_peer_delete;
5207 }
5208
5209 arvif->txpower = vif->bss_conf.txpower;
5210 ret = ath10k_mac_txpower_recalc(ar);
5211 if (ret) {
5212 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5213 goto err_peer_delete;
5214 }
5215
5216 if (vif->type == NL80211_IFTYPE_MONITOR) {
5217 ar->monitor_arvif = arvif;
5218 ret = ath10k_monitor_recalc(ar);
5219 if (ret) {
5220 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5221 goto err_peer_delete;
5222 }
5223 }
5224
5225 spin_lock_bh(&ar->htt.tx_lock);
5226 if (!ar->tx_paused)
5227 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5228 spin_unlock_bh(&ar->htt.tx_lock);
5229
5230 mutex_unlock(&ar->conf_mutex);
5231 return 0;
5232
5233 err_peer_delete:
5234 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5235 arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5236 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5237
5238 err_vdev_delete:
5239 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5240 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5241 spin_lock_bh(&ar->data_lock);
5242 list_del(&arvif->list);
5243 spin_unlock_bh(&ar->data_lock);
5244
5245 err:
5246 if (arvif->beacon_buf) {
5247 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5248 arvif->beacon_buf, arvif->beacon_paddr);
5249 arvif->beacon_buf = NULL;
5250 }
5251
5252 mutex_unlock(&ar->conf_mutex);
5253
5254 return ret;
5255 }
5256
ath10k_mac_vif_tx_unlock_all(struct ath10k_vif * arvif)5257 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5258 {
5259 int i;
5260
5261 for (i = 0; i < BITS_PER_LONG; i++)
5262 ath10k_mac_vif_tx_unlock(arvif, i);
5263 }
5264
ath10k_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5265 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5266 struct ieee80211_vif *vif)
5267 {
5268 struct ath10k *ar = hw->priv;
5269 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5270 struct ath10k_peer *peer;
5271 int ret;
5272 int i;
5273
5274 cancel_work_sync(&arvif->ap_csa_work);
5275 cancel_delayed_work_sync(&arvif->connection_loss_work);
5276
5277 mutex_lock(&ar->conf_mutex);
5278
5279 spin_lock_bh(&ar->data_lock);
5280 ath10k_mac_vif_beacon_cleanup(arvif);
5281 spin_unlock_bh(&ar->data_lock);
5282
5283 ret = ath10k_spectral_vif_stop(arvif);
5284 if (ret)
5285 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5286 arvif->vdev_id, ret);
5287
5288 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5289 spin_lock_bh(&ar->data_lock);
5290 list_del(&arvif->list);
5291 spin_unlock_bh(&ar->data_lock);
5292
5293 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5294 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5295 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5296 vif->addr);
5297 if (ret)
5298 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5299 arvif->vdev_id, ret);
5300
5301 kfree(arvif->u.ap.noa_data);
5302 }
5303
5304 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5305 arvif->vdev_id);
5306
5307 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5308 if (ret)
5309 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5310 arvif->vdev_id, ret);
5311
5312 /* Some firmware revisions don't notify host about self-peer removal
5313 * until after associated vdev is deleted.
5314 */
5315 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5316 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5317 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5318 vif->addr);
5319 if (ret)
5320 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5321 arvif->vdev_id, ret);
5322
5323 spin_lock_bh(&ar->data_lock);
5324 ar->num_peers--;
5325 spin_unlock_bh(&ar->data_lock);
5326 }
5327
5328 spin_lock_bh(&ar->data_lock);
5329 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5330 peer = ar->peer_map[i];
5331 if (!peer)
5332 continue;
5333
5334 if (peer->vif == vif) {
5335 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5336 vif->addr, arvif->vdev_id);
5337 peer->vif = NULL;
5338 }
5339 }
5340 spin_unlock_bh(&ar->data_lock);
5341
5342 ath10k_peer_cleanup(ar, arvif->vdev_id);
5343 ath10k_mac_txq_unref(ar, vif->txq);
5344
5345 if (vif->type == NL80211_IFTYPE_MONITOR) {
5346 ar->monitor_arvif = NULL;
5347 ret = ath10k_monitor_recalc(ar);
5348 if (ret)
5349 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5350 }
5351
5352 ret = ath10k_mac_txpower_recalc(ar);
5353 if (ret)
5354 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5355
5356 spin_lock_bh(&ar->htt.tx_lock);
5357 ath10k_mac_vif_tx_unlock_all(arvif);
5358 spin_unlock_bh(&ar->htt.tx_lock);
5359
5360 ath10k_mac_txq_unref(ar, vif->txq);
5361
5362 mutex_unlock(&ar->conf_mutex);
5363 }
5364
5365 /*
5366 * FIXME: Has to be verified.
5367 */
5368 #define SUPPORTED_FILTERS \
5369 (FIF_ALLMULTI | \
5370 FIF_CONTROL | \
5371 FIF_PSPOLL | \
5372 FIF_OTHER_BSS | \
5373 FIF_BCN_PRBRESP_PROMISC | \
5374 FIF_PROBE_REQ | \
5375 FIF_FCSFAIL)
5376
ath10k_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)5377 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5378 unsigned int changed_flags,
5379 unsigned int *total_flags,
5380 u64 multicast)
5381 {
5382 struct ath10k *ar = hw->priv;
5383 int ret;
5384
5385 mutex_lock(&ar->conf_mutex);
5386
5387 changed_flags &= SUPPORTED_FILTERS;
5388 *total_flags &= SUPPORTED_FILTERS;
5389 ar->filter_flags = *total_flags;
5390
5391 ret = ath10k_monitor_recalc(ar);
5392 if (ret)
5393 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5394
5395 mutex_unlock(&ar->conf_mutex);
5396 }
5397
ath10k_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * info,u32 changed)5398 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5399 struct ieee80211_vif *vif,
5400 struct ieee80211_bss_conf *info,
5401 u32 changed)
5402 {
5403 struct ath10k *ar = hw->priv;
5404 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5405 int ret = 0;
5406 u32 vdev_param, pdev_param, slottime, preamble;
5407
5408 mutex_lock(&ar->conf_mutex);
5409
5410 if (changed & BSS_CHANGED_IBSS)
5411 ath10k_control_ibss(arvif, info, vif->addr);
5412
5413 if (changed & BSS_CHANGED_BEACON_INT) {
5414 arvif->beacon_interval = info->beacon_int;
5415 vdev_param = ar->wmi.vdev_param->beacon_interval;
5416 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5417 arvif->beacon_interval);
5418 ath10k_dbg(ar, ATH10K_DBG_MAC,
5419 "mac vdev %d beacon_interval %d\n",
5420 arvif->vdev_id, arvif->beacon_interval);
5421
5422 if (ret)
5423 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5424 arvif->vdev_id, ret);
5425 }
5426
5427 if (changed & BSS_CHANGED_BEACON) {
5428 ath10k_dbg(ar, ATH10K_DBG_MAC,
5429 "vdev %d set beacon tx mode to staggered\n",
5430 arvif->vdev_id);
5431
5432 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5433 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5434 WMI_BEACON_STAGGERED_MODE);
5435 if (ret)
5436 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5437 arvif->vdev_id, ret);
5438
5439 ret = ath10k_mac_setup_bcn_tmpl(arvif);
5440 if (ret)
5441 ath10k_warn(ar, "failed to update beacon template: %d\n",
5442 ret);
5443
5444 if (ieee80211_vif_is_mesh(vif)) {
5445 /* mesh doesn't use SSID but firmware needs it */
5446 strncpy(arvif->u.ap.ssid, "mesh",
5447 sizeof(arvif->u.ap.ssid));
5448 arvif->u.ap.ssid_len = 4;
5449 }
5450 }
5451
5452 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5453 ret = ath10k_mac_setup_prb_tmpl(arvif);
5454 if (ret)
5455 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5456 arvif->vdev_id, ret);
5457 }
5458
5459 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5460 arvif->dtim_period = info->dtim_period;
5461
5462 ath10k_dbg(ar, ATH10K_DBG_MAC,
5463 "mac vdev %d dtim_period %d\n",
5464 arvif->vdev_id, arvif->dtim_period);
5465
5466 vdev_param = ar->wmi.vdev_param->dtim_period;
5467 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5468 arvif->dtim_period);
5469 if (ret)
5470 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5471 arvif->vdev_id, ret);
5472 }
5473
5474 if (changed & BSS_CHANGED_SSID &&
5475 vif->type == NL80211_IFTYPE_AP) {
5476 arvif->u.ap.ssid_len = info->ssid_len;
5477 if (info->ssid_len)
5478 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5479 arvif->u.ap.hidden_ssid = info->hidden_ssid;
5480 }
5481
5482 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5483 ether_addr_copy(arvif->bssid, info->bssid);
5484
5485 if (changed & BSS_CHANGED_BEACON_ENABLED)
5486 ath10k_control_beaconing(arvif, info);
5487
5488 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5489 arvif->use_cts_prot = info->use_cts_prot;
5490
5491 ret = ath10k_recalc_rtscts_prot(arvif);
5492 if (ret)
5493 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5494 arvif->vdev_id, ret);
5495
5496 if (ath10k_mac_can_set_cts_prot(arvif)) {
5497 ret = ath10k_mac_set_cts_prot(arvif);
5498 if (ret)
5499 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
5500 arvif->vdev_id, ret);
5501 }
5502 }
5503
5504 if (changed & BSS_CHANGED_ERP_SLOT) {
5505 if (info->use_short_slot)
5506 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5507
5508 else
5509 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5510
5511 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5512 arvif->vdev_id, slottime);
5513
5514 vdev_param = ar->wmi.vdev_param->slot_time;
5515 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5516 slottime);
5517 if (ret)
5518 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5519 arvif->vdev_id, ret);
5520 }
5521
5522 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5523 if (info->use_short_preamble)
5524 preamble = WMI_VDEV_PREAMBLE_SHORT;
5525 else
5526 preamble = WMI_VDEV_PREAMBLE_LONG;
5527
5528 ath10k_dbg(ar, ATH10K_DBG_MAC,
5529 "mac vdev %d preamble %dn",
5530 arvif->vdev_id, preamble);
5531
5532 vdev_param = ar->wmi.vdev_param->preamble;
5533 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5534 preamble);
5535 if (ret)
5536 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5537 arvif->vdev_id, ret);
5538 }
5539
5540 if (changed & BSS_CHANGED_ASSOC) {
5541 if (info->assoc) {
5542 /* Workaround: Make sure monitor vdev is not running
5543 * when associating to prevent some firmware revisions
5544 * (e.g. 10.1 and 10.2) from crashing.
5545 */
5546 if (ar->monitor_started)
5547 ath10k_monitor_stop(ar);
5548 ath10k_bss_assoc(hw, vif, info);
5549 ath10k_monitor_recalc(ar);
5550 } else {
5551 ath10k_bss_disassoc(hw, vif);
5552 }
5553 }
5554
5555 if (changed & BSS_CHANGED_TXPOWER) {
5556 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5557 arvif->vdev_id, info->txpower);
5558
5559 arvif->txpower = info->txpower;
5560 ret = ath10k_mac_txpower_recalc(ar);
5561 if (ret)
5562 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5563 }
5564
5565 if (changed & BSS_CHANGED_PS) {
5566 arvif->ps = vif->bss_conf.ps;
5567
5568 ret = ath10k_config_ps(ar);
5569 if (ret)
5570 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5571 arvif->vdev_id, ret);
5572 }
5573
5574 mutex_unlock(&ar->conf_mutex);
5575 }
5576
ath10k_mac_op_set_coverage_class(struct ieee80211_hw * hw,s16 value)5577 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
5578 {
5579 struct ath10k *ar = hw->priv;
5580
5581 /* This function should never be called if setting the coverage class
5582 * is not supported on this hardware.
5583 */
5584 if (!ar->hw_params.hw_ops->set_coverage_class) {
5585 WARN_ON_ONCE(1);
5586 return;
5587 }
5588 ar->hw_params.hw_ops->set_coverage_class(ar, value);
5589 }
5590
ath10k_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)5591 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5592 struct ieee80211_vif *vif,
5593 struct ieee80211_scan_request *hw_req)
5594 {
5595 struct ath10k *ar = hw->priv;
5596 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5597 struct cfg80211_scan_request *req = &hw_req->req;
5598 struct wmi_start_scan_arg arg;
5599 int ret = 0;
5600 int i;
5601
5602 mutex_lock(&ar->conf_mutex);
5603
5604 spin_lock_bh(&ar->data_lock);
5605 switch (ar->scan.state) {
5606 case ATH10K_SCAN_IDLE:
5607 reinit_completion(&ar->scan.started);
5608 reinit_completion(&ar->scan.completed);
5609 ar->scan.state = ATH10K_SCAN_STARTING;
5610 ar->scan.is_roc = false;
5611 ar->scan.vdev_id = arvif->vdev_id;
5612 ret = 0;
5613 break;
5614 case ATH10K_SCAN_STARTING:
5615 case ATH10K_SCAN_RUNNING:
5616 case ATH10K_SCAN_ABORTING:
5617 ret = -EBUSY;
5618 break;
5619 }
5620 spin_unlock_bh(&ar->data_lock);
5621
5622 if (ret)
5623 goto exit;
5624
5625 memset(&arg, 0, sizeof(arg));
5626 ath10k_wmi_start_scan_init(ar, &arg);
5627 arg.vdev_id = arvif->vdev_id;
5628 arg.scan_id = ATH10K_SCAN_ID;
5629
5630 if (req->ie_len) {
5631 arg.ie_len = req->ie_len;
5632 memcpy(arg.ie, req->ie, arg.ie_len);
5633 }
5634
5635 if (req->n_ssids) {
5636 arg.n_ssids = req->n_ssids;
5637 for (i = 0; i < arg.n_ssids; i++) {
5638 arg.ssids[i].len = req->ssids[i].ssid_len;
5639 arg.ssids[i].ssid = req->ssids[i].ssid;
5640 }
5641 } else {
5642 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5643 }
5644
5645 if (req->n_channels) {
5646 arg.n_channels = req->n_channels;
5647 for (i = 0; i < arg.n_channels; i++)
5648 arg.channels[i] = req->channels[i]->center_freq;
5649 }
5650
5651 ret = ath10k_start_scan(ar, &arg);
5652 if (ret) {
5653 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5654 spin_lock_bh(&ar->data_lock);
5655 ar->scan.state = ATH10K_SCAN_IDLE;
5656 spin_unlock_bh(&ar->data_lock);
5657 }
5658
5659 /* Add a 200ms margin to account for event/command processing */
5660 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5661 msecs_to_jiffies(arg.max_scan_time +
5662 200));
5663
5664 exit:
5665 mutex_unlock(&ar->conf_mutex);
5666 return ret;
5667 }
5668
ath10k_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5669 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5670 struct ieee80211_vif *vif)
5671 {
5672 struct ath10k *ar = hw->priv;
5673
5674 mutex_lock(&ar->conf_mutex);
5675 ath10k_scan_abort(ar);
5676 mutex_unlock(&ar->conf_mutex);
5677
5678 cancel_delayed_work_sync(&ar->scan.timeout);
5679 }
5680
ath10k_set_key_h_def_keyidx(struct ath10k * ar,struct ath10k_vif * arvif,enum set_key_cmd cmd,struct ieee80211_key_conf * key)5681 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5682 struct ath10k_vif *arvif,
5683 enum set_key_cmd cmd,
5684 struct ieee80211_key_conf *key)
5685 {
5686 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5687 int ret;
5688
5689 /* 10.1 firmware branch requires default key index to be set to group
5690 * key index after installing it. Otherwise FW/HW Txes corrupted
5691 * frames with multi-vif APs. This is not required for main firmware
5692 * branch (e.g. 636).
5693 *
5694 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5695 *
5696 * FIXME: It remains unknown if this is required for multi-vif STA
5697 * interfaces on 10.1.
5698 */
5699
5700 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5701 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5702 return;
5703
5704 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5705 return;
5706
5707 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5708 return;
5709
5710 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5711 return;
5712
5713 if (cmd != SET_KEY)
5714 return;
5715
5716 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5717 key->keyidx);
5718 if (ret)
5719 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5720 arvif->vdev_id, ret);
5721 }
5722
ath10k_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)5723 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5724 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5725 struct ieee80211_key_conf *key)
5726 {
5727 struct ath10k *ar = hw->priv;
5728 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5729 struct ath10k_peer *peer;
5730 const u8 *peer_addr;
5731 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5732 key->cipher == WLAN_CIPHER_SUITE_WEP104;
5733 int ret = 0;
5734 int ret2;
5735 u32 flags = 0;
5736 u32 flags2;
5737
5738 /* this one needs to be done in software */
5739 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5740 return 1;
5741
5742 if (arvif->nohwcrypt)
5743 return 1;
5744
5745 if (key->keyidx > WMI_MAX_KEY_INDEX)
5746 return -ENOSPC;
5747
5748 mutex_lock(&ar->conf_mutex);
5749
5750 if (sta)
5751 peer_addr = sta->addr;
5752 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5753 peer_addr = vif->bss_conf.bssid;
5754 else
5755 peer_addr = vif->addr;
5756
5757 key->hw_key_idx = key->keyidx;
5758
5759 if (is_wep) {
5760 if (cmd == SET_KEY)
5761 arvif->wep_keys[key->keyidx] = key;
5762 else
5763 arvif->wep_keys[key->keyidx] = NULL;
5764 }
5765
5766 /* the peer should not disappear in mid-way (unless FW goes awry) since
5767 * we already hold conf_mutex. we just make sure its there now.
5768 */
5769 spin_lock_bh(&ar->data_lock);
5770 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5771 spin_unlock_bh(&ar->data_lock);
5772
5773 if (!peer) {
5774 if (cmd == SET_KEY) {
5775 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5776 peer_addr);
5777 ret = -EOPNOTSUPP;
5778 goto exit;
5779 } else {
5780 /* if the peer doesn't exist there is no key to disable anymore */
5781 goto exit;
5782 }
5783 }
5784
5785 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5786 flags |= WMI_KEY_PAIRWISE;
5787 else
5788 flags |= WMI_KEY_GROUP;
5789
5790 if (is_wep) {
5791 if (cmd == DISABLE_KEY)
5792 ath10k_clear_vdev_key(arvif, key);
5793
5794 /* When WEP keys are uploaded it's possible that there are
5795 * stations associated already (e.g. when merging) without any
5796 * keys. Static WEP needs an explicit per-peer key upload.
5797 */
5798 if (vif->type == NL80211_IFTYPE_ADHOC &&
5799 cmd == SET_KEY)
5800 ath10k_mac_vif_update_wep_key(arvif, key);
5801
5802 /* 802.1x never sets the def_wep_key_idx so each set_key()
5803 * call changes default tx key.
5804 *
5805 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5806 * after first set_key().
5807 */
5808 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5809 flags |= WMI_KEY_TX_USAGE;
5810 }
5811
5812 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5813 if (ret) {
5814 WARN_ON(ret > 0);
5815 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5816 arvif->vdev_id, peer_addr, ret);
5817 goto exit;
5818 }
5819
5820 /* mac80211 sets static WEP keys as groupwise while firmware requires
5821 * them to be installed twice as both pairwise and groupwise.
5822 */
5823 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5824 flags2 = flags;
5825 flags2 &= ~WMI_KEY_GROUP;
5826 flags2 |= WMI_KEY_PAIRWISE;
5827
5828 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5829 if (ret) {
5830 WARN_ON(ret > 0);
5831 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5832 arvif->vdev_id, peer_addr, ret);
5833 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5834 peer_addr, flags);
5835 if (ret2) {
5836 WARN_ON(ret2 > 0);
5837 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5838 arvif->vdev_id, peer_addr, ret2);
5839 }
5840 goto exit;
5841 }
5842 }
5843
5844 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5845
5846 spin_lock_bh(&ar->data_lock);
5847 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5848 if (peer && cmd == SET_KEY)
5849 peer->keys[key->keyidx] = key;
5850 else if (peer && cmd == DISABLE_KEY)
5851 peer->keys[key->keyidx] = NULL;
5852 else if (peer == NULL)
5853 /* impossible unless FW goes crazy */
5854 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5855 spin_unlock_bh(&ar->data_lock);
5856
5857 exit:
5858 mutex_unlock(&ar->conf_mutex);
5859 return ret;
5860 }
5861
ath10k_set_default_unicast_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int keyidx)5862 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5863 struct ieee80211_vif *vif,
5864 int keyidx)
5865 {
5866 struct ath10k *ar = hw->priv;
5867 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5868 int ret;
5869
5870 mutex_lock(&arvif->ar->conf_mutex);
5871
5872 if (arvif->ar->state != ATH10K_STATE_ON)
5873 goto unlock;
5874
5875 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5876 arvif->vdev_id, keyidx);
5877
5878 ret = ath10k_wmi_vdev_set_param(arvif->ar,
5879 arvif->vdev_id,
5880 arvif->ar->wmi.vdev_param->def_keyid,
5881 keyidx);
5882
5883 if (ret) {
5884 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5885 arvif->vdev_id,
5886 ret);
5887 goto unlock;
5888 }
5889
5890 arvif->def_wep_key_idx = keyidx;
5891
5892 unlock:
5893 mutex_unlock(&arvif->ar->conf_mutex);
5894 }
5895
ath10k_sta_rc_update_wk(struct work_struct * wk)5896 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5897 {
5898 struct ath10k *ar;
5899 struct ath10k_vif *arvif;
5900 struct ath10k_sta *arsta;
5901 struct ieee80211_sta *sta;
5902 struct cfg80211_chan_def def;
5903 enum nl80211_band band;
5904 const u8 *ht_mcs_mask;
5905 const u16 *vht_mcs_mask;
5906 u32 changed, bw, nss, smps;
5907 int err;
5908
5909 arsta = container_of(wk, struct ath10k_sta, update_wk);
5910 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5911 arvif = arsta->arvif;
5912 ar = arvif->ar;
5913
5914 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5915 return;
5916
5917 band = def.chan->band;
5918 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5919 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5920
5921 spin_lock_bh(&ar->data_lock);
5922
5923 changed = arsta->changed;
5924 arsta->changed = 0;
5925
5926 bw = arsta->bw;
5927 nss = arsta->nss;
5928 smps = arsta->smps;
5929
5930 spin_unlock_bh(&ar->data_lock);
5931
5932 mutex_lock(&ar->conf_mutex);
5933
5934 nss = max_t(u32, 1, nss);
5935 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5936 ath10k_mac_max_vht_nss(vht_mcs_mask)));
5937
5938 if (changed & IEEE80211_RC_BW_CHANGED) {
5939 enum wmi_phy_mode mode;
5940
5941 mode = chan_to_phymode(&def);
5942 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
5943 sta->addr, bw, mode);
5944
5945 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5946 WMI_PEER_PHYMODE, mode);
5947 if (err) {
5948 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
5949 sta->addr, mode, err);
5950 goto exit;
5951 }
5952
5953 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5954 WMI_PEER_CHAN_WIDTH, bw);
5955 if (err)
5956 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5957 sta->addr, bw, err);
5958 }
5959
5960 if (changed & IEEE80211_RC_NSS_CHANGED) {
5961 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5962 sta->addr, nss);
5963
5964 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5965 WMI_PEER_NSS, nss);
5966 if (err)
5967 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5968 sta->addr, nss, err);
5969 }
5970
5971 if (changed & IEEE80211_RC_SMPS_CHANGED) {
5972 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5973 sta->addr, smps);
5974
5975 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5976 WMI_PEER_SMPS_STATE, smps);
5977 if (err)
5978 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5979 sta->addr, smps, err);
5980 }
5981
5982 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
5983 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
5984 sta->addr);
5985
5986 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5987 if (err)
5988 ath10k_warn(ar, "failed to reassociate station: %pM\n",
5989 sta->addr);
5990 }
5991
5992 exit:
5993 mutex_unlock(&ar->conf_mutex);
5994 }
5995
ath10k_mac_inc_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)5996 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5997 struct ieee80211_sta *sta)
5998 {
5999 struct ath10k *ar = arvif->ar;
6000
6001 lockdep_assert_held(&ar->conf_mutex);
6002
6003 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6004 return 0;
6005
6006 if (ar->num_stations >= ar->max_num_stations)
6007 return -ENOBUFS;
6008
6009 ar->num_stations++;
6010
6011 return 0;
6012 }
6013
ath10k_mac_dec_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)6014 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
6015 struct ieee80211_sta *sta)
6016 {
6017 struct ath10k *ar = arvif->ar;
6018
6019 lockdep_assert_held(&ar->conf_mutex);
6020
6021 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6022 return;
6023
6024 ar->num_stations--;
6025 }
6026
6027 struct ath10k_mac_tdls_iter_data {
6028 u32 num_tdls_stations;
6029 struct ieee80211_vif *curr_vif;
6030 };
6031
ath10k_mac_tdls_vif_stations_count_iter(void * data,struct ieee80211_sta * sta)6032 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
6033 struct ieee80211_sta *sta)
6034 {
6035 struct ath10k_mac_tdls_iter_data *iter_data = data;
6036 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6037 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
6038
6039 if (sta->tdls && sta_vif == iter_data->curr_vif)
6040 iter_data->num_tdls_stations++;
6041 }
6042
ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw * hw,struct ieee80211_vif * vif)6043 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
6044 struct ieee80211_vif *vif)
6045 {
6046 struct ath10k_mac_tdls_iter_data data = {};
6047
6048 data.curr_vif = vif;
6049
6050 ieee80211_iterate_stations_atomic(hw,
6051 ath10k_mac_tdls_vif_stations_count_iter,
6052 &data);
6053 return data.num_tdls_stations;
6054 }
6055
ath10k_mac_tdls_vifs_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)6056 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
6057 struct ieee80211_vif *vif)
6058 {
6059 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6060 int *num_tdls_vifs = data;
6061
6062 if (vif->type != NL80211_IFTYPE_STATION)
6063 return;
6064
6065 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
6066 (*num_tdls_vifs)++;
6067 }
6068
ath10k_mac_tdls_vifs_count(struct ieee80211_hw * hw)6069 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
6070 {
6071 int num_tdls_vifs = 0;
6072
6073 ieee80211_iterate_active_interfaces_atomic(hw,
6074 IEEE80211_IFACE_ITER_NORMAL,
6075 ath10k_mac_tdls_vifs_count_iter,
6076 &num_tdls_vifs);
6077 return num_tdls_vifs;
6078 }
6079
ath10k_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)6080 static int ath10k_sta_state(struct ieee80211_hw *hw,
6081 struct ieee80211_vif *vif,
6082 struct ieee80211_sta *sta,
6083 enum ieee80211_sta_state old_state,
6084 enum ieee80211_sta_state new_state)
6085 {
6086 struct ath10k *ar = hw->priv;
6087 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6088 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6089 struct ath10k_peer *peer;
6090 int ret = 0;
6091 int i;
6092
6093 if (old_state == IEEE80211_STA_NOTEXIST &&
6094 new_state == IEEE80211_STA_NONE) {
6095 memset(arsta, 0, sizeof(*arsta));
6096 arsta->arvif = arvif;
6097 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
6098
6099 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6100 ath10k_mac_txq_init(sta->txq[i]);
6101 }
6102
6103 /* cancel must be done outside the mutex to avoid deadlock */
6104 if ((old_state == IEEE80211_STA_NONE &&
6105 new_state == IEEE80211_STA_NOTEXIST))
6106 cancel_work_sync(&arsta->update_wk);
6107
6108 mutex_lock(&ar->conf_mutex);
6109
6110 if (old_state == IEEE80211_STA_NOTEXIST &&
6111 new_state == IEEE80211_STA_NONE) {
6112 /*
6113 * New station addition.
6114 */
6115 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
6116 u32 num_tdls_stations;
6117 u32 num_tdls_vifs;
6118
6119 ath10k_dbg(ar, ATH10K_DBG_MAC,
6120 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
6121 arvif->vdev_id, sta->addr,
6122 ar->num_stations + 1, ar->max_num_stations,
6123 ar->num_peers + 1, ar->max_num_peers);
6124
6125 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
6126 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
6127
6128 if (sta->tdls) {
6129 if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
6130 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
6131 arvif->vdev_id,
6132 ar->max_num_tdls_vdevs);
6133 ret = -ELNRNG;
6134 goto exit;
6135 }
6136 peer_type = WMI_PEER_TYPE_TDLS;
6137 }
6138
6139 ret = ath10k_mac_inc_num_stations(arvif, sta);
6140 if (ret) {
6141 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
6142 ar->max_num_stations);
6143 goto exit;
6144 }
6145
6146 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
6147 sta->addr, peer_type);
6148 if (ret) {
6149 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
6150 sta->addr, arvif->vdev_id, ret);
6151 ath10k_mac_dec_num_stations(arvif, sta);
6152 goto exit;
6153 }
6154
6155 spin_lock_bh(&ar->data_lock);
6156
6157 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
6158 if (!peer) {
6159 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
6160 vif->addr, arvif->vdev_id);
6161 spin_unlock_bh(&ar->data_lock);
6162 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6163 ath10k_mac_dec_num_stations(arvif, sta);
6164 ret = -ENOENT;
6165 goto exit;
6166 }
6167
6168 arsta->peer_id = find_first_bit(peer->peer_ids,
6169 ATH10K_MAX_NUM_PEER_IDS);
6170
6171 spin_unlock_bh(&ar->data_lock);
6172
6173 if (!sta->tdls)
6174 goto exit;
6175
6176 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6177 WMI_TDLS_ENABLE_ACTIVE);
6178 if (ret) {
6179 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6180 arvif->vdev_id, ret);
6181 ath10k_peer_delete(ar, arvif->vdev_id,
6182 sta->addr);
6183 ath10k_mac_dec_num_stations(arvif, sta);
6184 goto exit;
6185 }
6186
6187 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6188 WMI_TDLS_PEER_STATE_PEERING);
6189 if (ret) {
6190 ath10k_warn(ar,
6191 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6192 sta->addr, arvif->vdev_id, ret);
6193 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6194 ath10k_mac_dec_num_stations(arvif, sta);
6195
6196 if (num_tdls_stations != 0)
6197 goto exit;
6198 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6199 WMI_TDLS_DISABLE);
6200 }
6201 } else if ((old_state == IEEE80211_STA_NONE &&
6202 new_state == IEEE80211_STA_NOTEXIST)) {
6203 /*
6204 * Existing station deletion.
6205 */
6206 ath10k_dbg(ar, ATH10K_DBG_MAC,
6207 "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6208 arvif->vdev_id, sta->addr, sta);
6209
6210 if (sta->tdls) {
6211 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
6212 sta,
6213 WMI_TDLS_PEER_STATE_TEARDOWN);
6214 if (ret)
6215 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
6216 sta->addr,
6217 WMI_TDLS_PEER_STATE_TEARDOWN, ret);
6218 }
6219
6220 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6221 if (ret)
6222 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6223 sta->addr, arvif->vdev_id, ret);
6224
6225 ath10k_mac_dec_num_stations(arvif, sta);
6226
6227 spin_lock_bh(&ar->data_lock);
6228 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6229 peer = ar->peer_map[i];
6230 if (!peer)
6231 continue;
6232
6233 if (peer->sta == sta) {
6234 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6235 sta->addr, peer, i, arvif->vdev_id);
6236 peer->sta = NULL;
6237
6238 /* Clean up the peer object as well since we
6239 * must have failed to do this above.
6240 */
6241 list_del(&peer->list);
6242 ar->peer_map[i] = NULL;
6243 kfree(peer);
6244 ar->num_peers--;
6245 }
6246 }
6247 spin_unlock_bh(&ar->data_lock);
6248
6249 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6250 ath10k_mac_txq_unref(ar, sta->txq[i]);
6251
6252 if (!sta->tdls)
6253 goto exit;
6254
6255 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6256 goto exit;
6257
6258 /* This was the last tdls peer in current vif */
6259 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6260 WMI_TDLS_DISABLE);
6261 if (ret) {
6262 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6263 arvif->vdev_id, ret);
6264 }
6265 } else if (old_state == IEEE80211_STA_AUTH &&
6266 new_state == IEEE80211_STA_ASSOC &&
6267 (vif->type == NL80211_IFTYPE_AP ||
6268 vif->type == NL80211_IFTYPE_MESH_POINT ||
6269 vif->type == NL80211_IFTYPE_ADHOC)) {
6270 /*
6271 * New association.
6272 */
6273 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6274 sta->addr);
6275
6276 ret = ath10k_station_assoc(ar, vif, sta, false);
6277 if (ret)
6278 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6279 sta->addr, arvif->vdev_id, ret);
6280 } else if (old_state == IEEE80211_STA_ASSOC &&
6281 new_state == IEEE80211_STA_AUTHORIZED &&
6282 sta->tdls) {
6283 /*
6284 * Tdls station authorized.
6285 */
6286 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6287 sta->addr);
6288
6289 ret = ath10k_station_assoc(ar, vif, sta, false);
6290 if (ret) {
6291 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6292 sta->addr, arvif->vdev_id, ret);
6293 goto exit;
6294 }
6295
6296 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6297 WMI_TDLS_PEER_STATE_CONNECTED);
6298 if (ret)
6299 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6300 sta->addr, arvif->vdev_id, ret);
6301 } else if (old_state == IEEE80211_STA_ASSOC &&
6302 new_state == IEEE80211_STA_AUTH &&
6303 (vif->type == NL80211_IFTYPE_AP ||
6304 vif->type == NL80211_IFTYPE_MESH_POINT ||
6305 vif->type == NL80211_IFTYPE_ADHOC)) {
6306 /*
6307 * Disassociation.
6308 */
6309 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6310 sta->addr);
6311
6312 ret = ath10k_station_disassoc(ar, vif, sta);
6313 if (ret)
6314 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6315 sta->addr, arvif->vdev_id, ret);
6316 }
6317 exit:
6318 mutex_unlock(&ar->conf_mutex);
6319 return ret;
6320 }
6321
ath10k_conf_tx_uapsd(struct ath10k * ar,struct ieee80211_vif * vif,u16 ac,bool enable)6322 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6323 u16 ac, bool enable)
6324 {
6325 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6326 struct wmi_sta_uapsd_auto_trig_arg arg = {};
6327 u32 prio = 0, acc = 0;
6328 u32 value = 0;
6329 int ret = 0;
6330
6331 lockdep_assert_held(&ar->conf_mutex);
6332
6333 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6334 return 0;
6335
6336 switch (ac) {
6337 case IEEE80211_AC_VO:
6338 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6339 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6340 prio = 7;
6341 acc = 3;
6342 break;
6343 case IEEE80211_AC_VI:
6344 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6345 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6346 prio = 5;
6347 acc = 2;
6348 break;
6349 case IEEE80211_AC_BE:
6350 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6351 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6352 prio = 2;
6353 acc = 1;
6354 break;
6355 case IEEE80211_AC_BK:
6356 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6357 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6358 prio = 0;
6359 acc = 0;
6360 break;
6361 }
6362
6363 if (enable)
6364 arvif->u.sta.uapsd |= value;
6365 else
6366 arvif->u.sta.uapsd &= ~value;
6367
6368 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6369 WMI_STA_PS_PARAM_UAPSD,
6370 arvif->u.sta.uapsd);
6371 if (ret) {
6372 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6373 goto exit;
6374 }
6375
6376 if (arvif->u.sta.uapsd)
6377 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6378 else
6379 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6380
6381 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6382 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6383 value);
6384 if (ret)
6385 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6386
6387 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6388 if (ret) {
6389 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6390 arvif->vdev_id, ret);
6391 return ret;
6392 }
6393
6394 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6395 if (ret) {
6396 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6397 arvif->vdev_id, ret);
6398 return ret;
6399 }
6400
6401 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6402 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6403 /* Only userspace can make an educated decision when to send
6404 * trigger frame. The following effectively disables u-UAPSD
6405 * autotrigger in firmware (which is enabled by default
6406 * provided the autotrigger service is available).
6407 */
6408
6409 arg.wmm_ac = acc;
6410 arg.user_priority = prio;
6411 arg.service_interval = 0;
6412 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6413 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6414
6415 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6416 arvif->bssid, &arg, 1);
6417 if (ret) {
6418 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6419 ret);
6420 return ret;
6421 }
6422 }
6423
6424 exit:
6425 return ret;
6426 }
6427
ath10k_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 ac,const struct ieee80211_tx_queue_params * params)6428 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6429 struct ieee80211_vif *vif, u16 ac,
6430 const struct ieee80211_tx_queue_params *params)
6431 {
6432 struct ath10k *ar = hw->priv;
6433 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6434 struct wmi_wmm_params_arg *p = NULL;
6435 int ret;
6436
6437 mutex_lock(&ar->conf_mutex);
6438
6439 switch (ac) {
6440 case IEEE80211_AC_VO:
6441 p = &arvif->wmm_params.ac_vo;
6442 break;
6443 case IEEE80211_AC_VI:
6444 p = &arvif->wmm_params.ac_vi;
6445 break;
6446 case IEEE80211_AC_BE:
6447 p = &arvif->wmm_params.ac_be;
6448 break;
6449 case IEEE80211_AC_BK:
6450 p = &arvif->wmm_params.ac_bk;
6451 break;
6452 }
6453
6454 if (WARN_ON(!p)) {
6455 ret = -EINVAL;
6456 goto exit;
6457 }
6458
6459 p->cwmin = params->cw_min;
6460 p->cwmax = params->cw_max;
6461 p->aifs = params->aifs;
6462
6463 /*
6464 * The channel time duration programmed in the HW is in absolute
6465 * microseconds, while mac80211 gives the txop in units of
6466 * 32 microseconds.
6467 */
6468 p->txop = params->txop * 32;
6469
6470 if (ar->wmi.ops->gen_vdev_wmm_conf) {
6471 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6472 &arvif->wmm_params);
6473 if (ret) {
6474 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6475 arvif->vdev_id, ret);
6476 goto exit;
6477 }
6478 } else {
6479 /* This won't work well with multi-interface cases but it's
6480 * better than nothing.
6481 */
6482 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6483 if (ret) {
6484 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6485 goto exit;
6486 }
6487 }
6488
6489 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6490 if (ret)
6491 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6492
6493 exit:
6494 mutex_unlock(&ar->conf_mutex);
6495 return ret;
6496 }
6497
6498 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6499
ath10k_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)6500 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6501 struct ieee80211_vif *vif,
6502 struct ieee80211_channel *chan,
6503 int duration,
6504 enum ieee80211_roc_type type)
6505 {
6506 struct ath10k *ar = hw->priv;
6507 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6508 struct wmi_start_scan_arg arg;
6509 int ret = 0;
6510 u32 scan_time_msec;
6511
6512 mutex_lock(&ar->conf_mutex);
6513
6514 spin_lock_bh(&ar->data_lock);
6515 switch (ar->scan.state) {
6516 case ATH10K_SCAN_IDLE:
6517 reinit_completion(&ar->scan.started);
6518 reinit_completion(&ar->scan.completed);
6519 reinit_completion(&ar->scan.on_channel);
6520 ar->scan.state = ATH10K_SCAN_STARTING;
6521 ar->scan.is_roc = true;
6522 ar->scan.vdev_id = arvif->vdev_id;
6523 ar->scan.roc_freq = chan->center_freq;
6524 ar->scan.roc_notify = true;
6525 ret = 0;
6526 break;
6527 case ATH10K_SCAN_STARTING:
6528 case ATH10K_SCAN_RUNNING:
6529 case ATH10K_SCAN_ABORTING:
6530 ret = -EBUSY;
6531 break;
6532 }
6533 spin_unlock_bh(&ar->data_lock);
6534
6535 if (ret)
6536 goto exit;
6537
6538 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6539
6540 memset(&arg, 0, sizeof(arg));
6541 ath10k_wmi_start_scan_init(ar, &arg);
6542 arg.vdev_id = arvif->vdev_id;
6543 arg.scan_id = ATH10K_SCAN_ID;
6544 arg.n_channels = 1;
6545 arg.channels[0] = chan->center_freq;
6546 arg.dwell_time_active = scan_time_msec;
6547 arg.dwell_time_passive = scan_time_msec;
6548 arg.max_scan_time = scan_time_msec;
6549 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6550 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6551 arg.burst_duration_ms = duration;
6552
6553 ret = ath10k_start_scan(ar, &arg);
6554 if (ret) {
6555 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6556 spin_lock_bh(&ar->data_lock);
6557 ar->scan.state = ATH10K_SCAN_IDLE;
6558 spin_unlock_bh(&ar->data_lock);
6559 goto exit;
6560 }
6561
6562 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6563 if (ret == 0) {
6564 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6565
6566 ret = ath10k_scan_stop(ar);
6567 if (ret)
6568 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6569
6570 ret = -ETIMEDOUT;
6571 goto exit;
6572 }
6573
6574 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6575 msecs_to_jiffies(duration));
6576
6577 ret = 0;
6578 exit:
6579 mutex_unlock(&ar->conf_mutex);
6580 return ret;
6581 }
6582
ath10k_cancel_remain_on_channel(struct ieee80211_hw * hw)6583 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6584 {
6585 struct ath10k *ar = hw->priv;
6586
6587 mutex_lock(&ar->conf_mutex);
6588
6589 spin_lock_bh(&ar->data_lock);
6590 ar->scan.roc_notify = false;
6591 spin_unlock_bh(&ar->data_lock);
6592
6593 ath10k_scan_abort(ar);
6594
6595 mutex_unlock(&ar->conf_mutex);
6596
6597 cancel_delayed_work_sync(&ar->scan.timeout);
6598
6599 return 0;
6600 }
6601
6602 /*
6603 * Both RTS and Fragmentation threshold are interface-specific
6604 * in ath10k, but device-specific in mac80211.
6605 */
6606
ath10k_set_rts_threshold(struct ieee80211_hw * hw,u32 value)6607 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6608 {
6609 struct ath10k *ar = hw->priv;
6610 struct ath10k_vif *arvif;
6611 int ret = 0;
6612
6613 mutex_lock(&ar->conf_mutex);
6614 list_for_each_entry(arvif, &ar->arvifs, list) {
6615 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6616 arvif->vdev_id, value);
6617
6618 ret = ath10k_mac_set_rts(arvif, value);
6619 if (ret) {
6620 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6621 arvif->vdev_id, ret);
6622 break;
6623 }
6624 }
6625 mutex_unlock(&ar->conf_mutex);
6626
6627 return ret;
6628 }
6629
ath10k_mac_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)6630 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6631 {
6632 /* Even though there's a WMI enum for fragmentation threshold no known
6633 * firmware actually implements it. Moreover it is not possible to rely
6634 * frame fragmentation to mac80211 because firmware clears the "more
6635 * fragments" bit in frame control making it impossible for remote
6636 * devices to reassemble frames.
6637 *
6638 * Hence implement a dummy callback just to say fragmentation isn't
6639 * supported. This effectively prevents mac80211 from doing frame
6640 * fragmentation in software.
6641 */
6642 return -EOPNOTSUPP;
6643 }
6644
ath10k_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)6645 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6646 u32 queues, bool drop)
6647 {
6648 struct ath10k *ar = hw->priv;
6649 bool skip;
6650 long time_left;
6651
6652 /* mac80211 doesn't care if we really xmit queued frames or not
6653 * we'll collect those frames either way if we stop/delete vdevs
6654 */
6655 if (drop)
6656 return;
6657
6658 mutex_lock(&ar->conf_mutex);
6659
6660 if (ar->state == ATH10K_STATE_WEDGED)
6661 goto skip;
6662
6663 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6664 bool empty;
6665
6666 spin_lock_bh(&ar->htt.tx_lock);
6667 empty = (ar->htt.num_pending_tx == 0);
6668 spin_unlock_bh(&ar->htt.tx_lock);
6669
6670 skip = (ar->state == ATH10K_STATE_WEDGED) ||
6671 test_bit(ATH10K_FLAG_CRASH_FLUSH,
6672 &ar->dev_flags);
6673
6674 (empty || skip);
6675 }), ATH10K_FLUSH_TIMEOUT_HZ);
6676
6677 if (time_left == 0 || skip)
6678 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6679 skip, ar->state, time_left);
6680
6681 skip:
6682 mutex_unlock(&ar->conf_mutex);
6683 }
6684
6685 /* TODO: Implement this function properly
6686 * For now it is needed to reply to Probe Requests in IBSS mode.
6687 * Propably we need this information from FW.
6688 */
ath10k_tx_last_beacon(struct ieee80211_hw * hw)6689 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6690 {
6691 return 1;
6692 }
6693
ath10k_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)6694 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6695 enum ieee80211_reconfig_type reconfig_type)
6696 {
6697 struct ath10k *ar = hw->priv;
6698
6699 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6700 return;
6701
6702 mutex_lock(&ar->conf_mutex);
6703
6704 /* If device failed to restart it will be in a different state, e.g.
6705 * ATH10K_STATE_WEDGED
6706 */
6707 if (ar->state == ATH10K_STATE_RESTARTED) {
6708 ath10k_info(ar, "device successfully recovered\n");
6709 ar->state = ATH10K_STATE_ON;
6710 ieee80211_wake_queues(ar->hw);
6711 }
6712
6713 mutex_unlock(&ar->conf_mutex);
6714 }
6715
6716 static void
ath10k_mac_update_bss_chan_survey(struct ath10k * ar,struct ieee80211_channel * channel)6717 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6718 struct ieee80211_channel *channel)
6719 {
6720 int ret;
6721 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6722
6723 lockdep_assert_held(&ar->conf_mutex);
6724
6725 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6726 (ar->rx_channel != channel))
6727 return;
6728
6729 if (ar->scan.state != ATH10K_SCAN_IDLE) {
6730 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6731 return;
6732 }
6733
6734 reinit_completion(&ar->bss_survey_done);
6735
6736 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6737 if (ret) {
6738 ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6739 return;
6740 }
6741
6742 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6743 if (!ret) {
6744 ath10k_warn(ar, "bss channel survey timed out\n");
6745 return;
6746 }
6747 }
6748
ath10k_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)6749 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6750 struct survey_info *survey)
6751 {
6752 struct ath10k *ar = hw->priv;
6753 struct ieee80211_supported_band *sband;
6754 struct survey_info *ar_survey = &ar->survey[idx];
6755 int ret = 0;
6756
6757 mutex_lock(&ar->conf_mutex);
6758
6759 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6760 if (sband && idx >= sband->n_channels) {
6761 idx -= sband->n_channels;
6762 sband = NULL;
6763 }
6764
6765 if (!sband)
6766 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6767
6768 if (!sband || idx >= sband->n_channels) {
6769 ret = -ENOENT;
6770 goto exit;
6771 }
6772
6773 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6774
6775 spin_lock_bh(&ar->data_lock);
6776 memcpy(survey, ar_survey, sizeof(*survey));
6777 spin_unlock_bh(&ar->data_lock);
6778
6779 survey->channel = &sband->channels[idx];
6780
6781 if (ar->rx_channel == survey->channel)
6782 survey->filled |= SURVEY_INFO_IN_USE;
6783
6784 exit:
6785 mutex_unlock(&ar->conf_mutex);
6786 return ret;
6787 }
6788
6789 static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask)6790 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6791 enum nl80211_band band,
6792 const struct cfg80211_bitrate_mask *mask)
6793 {
6794 int num_rates = 0;
6795 int i;
6796
6797 num_rates += hweight32(mask->control[band].legacy);
6798
6799 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6800 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6801
6802 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6803 num_rates += hweight16(mask->control[band].vht_mcs[i]);
6804
6805 return num_rates == 1;
6806 }
6807
6808 static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,int * nss)6809 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6810 enum nl80211_band band,
6811 const struct cfg80211_bitrate_mask *mask,
6812 int *nss)
6813 {
6814 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6815 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6816 u8 ht_nss_mask = 0;
6817 u8 vht_nss_mask = 0;
6818 int i;
6819
6820 if (mask->control[band].legacy)
6821 return false;
6822
6823 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6824 if (mask->control[band].ht_mcs[i] == 0)
6825 continue;
6826 else if (mask->control[band].ht_mcs[i] ==
6827 sband->ht_cap.mcs.rx_mask[i])
6828 ht_nss_mask |= BIT(i);
6829 else
6830 return false;
6831 }
6832
6833 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6834 if (mask->control[band].vht_mcs[i] == 0)
6835 continue;
6836 else if (mask->control[band].vht_mcs[i] ==
6837 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6838 vht_nss_mask |= BIT(i);
6839 else
6840 return false;
6841 }
6842
6843 if (ht_nss_mask != vht_nss_mask)
6844 return false;
6845
6846 if (ht_nss_mask == 0)
6847 return false;
6848
6849 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6850 return false;
6851
6852 *nss = fls(ht_nss_mask);
6853
6854 return true;
6855 }
6856
6857 static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,u8 * rate,u8 * nss)6858 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6859 enum nl80211_band band,
6860 const struct cfg80211_bitrate_mask *mask,
6861 u8 *rate, u8 *nss)
6862 {
6863 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6864 int rate_idx;
6865 int i;
6866 u16 bitrate;
6867 u8 preamble;
6868 u8 hw_rate;
6869
6870 if (hweight32(mask->control[band].legacy) == 1) {
6871 rate_idx = ffs(mask->control[band].legacy) - 1;
6872
6873 hw_rate = sband->bitrates[rate_idx].hw_value;
6874 bitrate = sband->bitrates[rate_idx].bitrate;
6875
6876 if (ath10k_mac_bitrate_is_cck(bitrate))
6877 preamble = WMI_RATE_PREAMBLE_CCK;
6878 else
6879 preamble = WMI_RATE_PREAMBLE_OFDM;
6880
6881 *nss = 1;
6882 *rate = preamble << 6 |
6883 (*nss - 1) << 4 |
6884 hw_rate << 0;
6885
6886 return 0;
6887 }
6888
6889 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6890 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6891 *nss = i + 1;
6892 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6893 (*nss - 1) << 4 |
6894 (ffs(mask->control[band].ht_mcs[i]) - 1);
6895
6896 return 0;
6897 }
6898 }
6899
6900 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6901 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6902 *nss = i + 1;
6903 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6904 (*nss - 1) << 4 |
6905 (ffs(mask->control[band].vht_mcs[i]) - 1);
6906
6907 return 0;
6908 }
6909 }
6910
6911 return -EINVAL;
6912 }
6913
ath10k_mac_set_fixed_rate_params(struct ath10k_vif * arvif,u8 rate,u8 nss,u8 sgi,u8 ldpc)6914 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6915 u8 rate, u8 nss, u8 sgi, u8 ldpc)
6916 {
6917 struct ath10k *ar = arvif->ar;
6918 u32 vdev_param;
6919 int ret;
6920
6921 lockdep_assert_held(&ar->conf_mutex);
6922
6923 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6924 arvif->vdev_id, rate, nss, sgi);
6925
6926 vdev_param = ar->wmi.vdev_param->fixed_rate;
6927 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6928 if (ret) {
6929 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6930 rate, ret);
6931 return ret;
6932 }
6933
6934 vdev_param = ar->wmi.vdev_param->nss;
6935 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6936 if (ret) {
6937 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6938 return ret;
6939 }
6940
6941 vdev_param = ar->wmi.vdev_param->sgi;
6942 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6943 if (ret) {
6944 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6945 return ret;
6946 }
6947
6948 vdev_param = ar->wmi.vdev_param->ldpc;
6949 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6950 if (ret) {
6951 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6952 return ret;
6953 }
6954
6955 return 0;
6956 }
6957
6958 static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask)6959 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6960 enum nl80211_band band,
6961 const struct cfg80211_bitrate_mask *mask)
6962 {
6963 int i;
6964 u16 vht_mcs;
6965
6966 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6967 * to express all VHT MCS rate masks. Effectively only the following
6968 * ranges can be used: none, 0-7, 0-8 and 0-9.
6969 */
6970 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6971 vht_mcs = mask->control[band].vht_mcs[i];
6972
6973 switch (vht_mcs) {
6974 case 0:
6975 case BIT(8) - 1:
6976 case BIT(9) - 1:
6977 case BIT(10) - 1:
6978 break;
6979 default:
6980 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6981 return false;
6982 }
6983 }
6984
6985 return true;
6986 }
6987
ath10k_mac_set_bitrate_mask_iter(void * data,struct ieee80211_sta * sta)6988 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6989 struct ieee80211_sta *sta)
6990 {
6991 struct ath10k_vif *arvif = data;
6992 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6993 struct ath10k *ar = arvif->ar;
6994
6995 if (arsta->arvif != arvif)
6996 return;
6997
6998 spin_lock_bh(&ar->data_lock);
6999 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
7000 spin_unlock_bh(&ar->data_lock);
7001
7002 ieee80211_queue_work(ar->hw, &arsta->update_wk);
7003 }
7004
ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)7005 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
7006 struct ieee80211_vif *vif,
7007 const struct cfg80211_bitrate_mask *mask)
7008 {
7009 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7010 struct cfg80211_chan_def def;
7011 struct ath10k *ar = arvif->ar;
7012 enum nl80211_band band;
7013 const u8 *ht_mcs_mask;
7014 const u16 *vht_mcs_mask;
7015 u8 rate;
7016 u8 nss;
7017 u8 sgi;
7018 u8 ldpc;
7019 int single_nss;
7020 int ret;
7021
7022 if (ath10k_mac_vif_chan(vif, &def))
7023 return -EPERM;
7024
7025 band = def.chan->band;
7026 ht_mcs_mask = mask->control[band].ht_mcs;
7027 vht_mcs_mask = mask->control[band].vht_mcs;
7028 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
7029
7030 sgi = mask->control[band].gi;
7031 if (sgi == NL80211_TXRATE_FORCE_LGI)
7032 return -EINVAL;
7033
7034 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
7035 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
7036 &rate, &nss);
7037 if (ret) {
7038 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
7039 arvif->vdev_id, ret);
7040 return ret;
7041 }
7042 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
7043 &single_nss)) {
7044 rate = WMI_FIXED_RATE_NONE;
7045 nss = single_nss;
7046 } else {
7047 rate = WMI_FIXED_RATE_NONE;
7048 nss = min(ar->num_rf_chains,
7049 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
7050 ath10k_mac_max_vht_nss(vht_mcs_mask)));
7051
7052 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
7053 return -EINVAL;
7054
7055 mutex_lock(&ar->conf_mutex);
7056
7057 arvif->bitrate_mask = *mask;
7058 ieee80211_iterate_stations_atomic(ar->hw,
7059 ath10k_mac_set_bitrate_mask_iter,
7060 arvif);
7061
7062 mutex_unlock(&ar->conf_mutex);
7063 }
7064
7065 mutex_lock(&ar->conf_mutex);
7066
7067 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
7068 if (ret) {
7069 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
7070 arvif->vdev_id, ret);
7071 goto exit;
7072 }
7073
7074 exit:
7075 mutex_unlock(&ar->conf_mutex);
7076
7077 return ret;
7078 }
7079
ath10k_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)7080 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
7081 struct ieee80211_vif *vif,
7082 struct ieee80211_sta *sta,
7083 u32 changed)
7084 {
7085 struct ath10k *ar = hw->priv;
7086 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7087 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7088 struct ath10k_peer *peer;
7089 u32 bw, smps;
7090
7091 spin_lock_bh(&ar->data_lock);
7092
7093 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
7094 if (!peer) {
7095 spin_unlock_bh(&ar->data_lock);
7096 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
7097 sta->addr, arvif->vdev_id);
7098 return;
7099 }
7100
7101 ath10k_dbg(ar, ATH10K_DBG_MAC,
7102 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
7103 sta->addr, changed, sta->bandwidth, sta->rx_nss,
7104 sta->smps_mode);
7105
7106 if (changed & IEEE80211_RC_BW_CHANGED) {
7107 bw = WMI_PEER_CHWIDTH_20MHZ;
7108
7109 switch (sta->bandwidth) {
7110 case IEEE80211_STA_RX_BW_20:
7111 bw = WMI_PEER_CHWIDTH_20MHZ;
7112 break;
7113 case IEEE80211_STA_RX_BW_40:
7114 bw = WMI_PEER_CHWIDTH_40MHZ;
7115 break;
7116 case IEEE80211_STA_RX_BW_80:
7117 bw = WMI_PEER_CHWIDTH_80MHZ;
7118 break;
7119 case IEEE80211_STA_RX_BW_160:
7120 bw = WMI_PEER_CHWIDTH_160MHZ;
7121 break;
7122 default:
7123 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
7124 sta->bandwidth, sta->addr);
7125 bw = WMI_PEER_CHWIDTH_20MHZ;
7126 break;
7127 }
7128
7129 arsta->bw = bw;
7130 }
7131
7132 if (changed & IEEE80211_RC_NSS_CHANGED)
7133 arsta->nss = sta->rx_nss;
7134
7135 if (changed & IEEE80211_RC_SMPS_CHANGED) {
7136 smps = WMI_PEER_SMPS_PS_NONE;
7137
7138 switch (sta->smps_mode) {
7139 case IEEE80211_SMPS_AUTOMATIC:
7140 case IEEE80211_SMPS_OFF:
7141 smps = WMI_PEER_SMPS_PS_NONE;
7142 break;
7143 case IEEE80211_SMPS_STATIC:
7144 smps = WMI_PEER_SMPS_STATIC;
7145 break;
7146 case IEEE80211_SMPS_DYNAMIC:
7147 smps = WMI_PEER_SMPS_DYNAMIC;
7148 break;
7149 case IEEE80211_SMPS_NUM_MODES:
7150 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
7151 sta->smps_mode, sta->addr);
7152 smps = WMI_PEER_SMPS_PS_NONE;
7153 break;
7154 }
7155
7156 arsta->smps = smps;
7157 }
7158
7159 arsta->changed |= changed;
7160
7161 spin_unlock_bh(&ar->data_lock);
7162
7163 ieee80211_queue_work(hw, &arsta->update_wk);
7164 }
7165
ath10k_offset_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif,s64 tsf_offset)7166 static void ath10k_offset_tsf(struct ieee80211_hw *hw,
7167 struct ieee80211_vif *vif, s64 tsf_offset)
7168 {
7169 struct ath10k *ar = hw->priv;
7170 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7171 u32 offset, vdev_param;
7172 int ret;
7173
7174 if (tsf_offset < 0) {
7175 vdev_param = ar->wmi.vdev_param->dec_tsf;
7176 offset = -tsf_offset;
7177 } else {
7178 vdev_param = ar->wmi.vdev_param->inc_tsf;
7179 offset = tsf_offset;
7180 }
7181
7182 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
7183 vdev_param, offset);
7184
7185 if (ret && ret != -EOPNOTSUPP)
7186 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
7187 offset, vdev_param, ret);
7188 }
7189
ath10k_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)7190 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
7191 struct ieee80211_vif *vif,
7192 struct ieee80211_ampdu_params *params)
7193 {
7194 struct ath10k *ar = hw->priv;
7195 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7196 struct ieee80211_sta *sta = params->sta;
7197 enum ieee80211_ampdu_mlme_action action = params->action;
7198 u16 tid = params->tid;
7199
7200 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7201 arvif->vdev_id, sta->addr, tid, action);
7202
7203 switch (action) {
7204 case IEEE80211_AMPDU_RX_START:
7205 case IEEE80211_AMPDU_RX_STOP:
7206 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7207 * creation/removal. Do we need to verify this?
7208 */
7209 return 0;
7210 case IEEE80211_AMPDU_TX_START:
7211 case IEEE80211_AMPDU_TX_STOP_CONT:
7212 case IEEE80211_AMPDU_TX_STOP_FLUSH:
7213 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7214 case IEEE80211_AMPDU_TX_OPERATIONAL:
7215 /* Firmware offloads Tx aggregation entirely so deny mac80211
7216 * Tx aggregation requests.
7217 */
7218 return -EOPNOTSUPP;
7219 }
7220
7221 return -EINVAL;
7222 }
7223
7224 static void
ath10k_mac_update_rx_channel(struct ath10k * ar,struct ieee80211_chanctx_conf * ctx,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)7225 ath10k_mac_update_rx_channel(struct ath10k *ar,
7226 struct ieee80211_chanctx_conf *ctx,
7227 struct ieee80211_vif_chanctx_switch *vifs,
7228 int n_vifs)
7229 {
7230 struct cfg80211_chan_def *def = NULL;
7231
7232 /* Both locks are required because ar->rx_channel is modified. This
7233 * allows readers to hold either lock.
7234 */
7235 lockdep_assert_held(&ar->conf_mutex);
7236 lockdep_assert_held(&ar->data_lock);
7237
7238 WARN_ON(ctx && vifs);
7239 WARN_ON(vifs && !n_vifs);
7240
7241 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7242 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7243 * ppdu on Rx may reduce performance on low-end systems. It should be
7244 * possible to make tables/hashmaps to speed the lookup up (be vary of
7245 * cpu data cache lines though regarding sizes) but to keep the initial
7246 * implementation simple and less intrusive fallback to the slow lookup
7247 * only for multi-channel cases. Single-channel cases will remain to
7248 * use the old channel derival and thus performance should not be
7249 * affected much.
7250 */
7251 rcu_read_lock();
7252 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7253 ieee80211_iter_chan_contexts_atomic(ar->hw,
7254 ath10k_mac_get_any_chandef_iter,
7255 &def);
7256
7257 if (vifs)
7258 def = &vifs[0].new_ctx->def;
7259
7260 ar->rx_channel = def->chan;
7261 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7262 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7263 /* During driver restart due to firmware assert, since mac80211
7264 * already has valid channel context for given radio, channel
7265 * context iteration return num_chanctx > 0. So fix rx_channel
7266 * when restart is in progress.
7267 */
7268 ar->rx_channel = ctx->def.chan;
7269 } else {
7270 ar->rx_channel = NULL;
7271 }
7272 rcu_read_unlock();
7273 }
7274
7275 static void
ath10k_mac_update_vif_chan(struct ath10k * ar,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)7276 ath10k_mac_update_vif_chan(struct ath10k *ar,
7277 struct ieee80211_vif_chanctx_switch *vifs,
7278 int n_vifs)
7279 {
7280 struct ath10k_vif *arvif;
7281 int ret;
7282 int i;
7283
7284 lockdep_assert_held(&ar->conf_mutex);
7285
7286 /* First stop monitor interface. Some FW versions crash if there's a
7287 * lone monitor interface.
7288 */
7289 if (ar->monitor_started)
7290 ath10k_monitor_stop(ar);
7291
7292 for (i = 0; i < n_vifs; i++) {
7293 arvif = (void *)vifs[i].vif->drv_priv;
7294
7295 ath10k_dbg(ar, ATH10K_DBG_MAC,
7296 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7297 arvif->vdev_id,
7298 vifs[i].old_ctx->def.chan->center_freq,
7299 vifs[i].new_ctx->def.chan->center_freq,
7300 vifs[i].old_ctx->def.width,
7301 vifs[i].new_ctx->def.width);
7302
7303 if (WARN_ON(!arvif->is_started))
7304 continue;
7305
7306 if (WARN_ON(!arvif->is_up))
7307 continue;
7308
7309 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7310 if (ret) {
7311 ath10k_warn(ar, "failed to down vdev %d: %d\n",
7312 arvif->vdev_id, ret);
7313 continue;
7314 }
7315 }
7316
7317 /* All relevant vdevs are downed and associated channel resources
7318 * should be available for the channel switch now.
7319 */
7320
7321 spin_lock_bh(&ar->data_lock);
7322 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7323 spin_unlock_bh(&ar->data_lock);
7324
7325 for (i = 0; i < n_vifs; i++) {
7326 arvif = (void *)vifs[i].vif->drv_priv;
7327
7328 if (WARN_ON(!arvif->is_started))
7329 continue;
7330
7331 if (WARN_ON(!arvif->is_up))
7332 continue;
7333
7334 ret = ath10k_mac_setup_bcn_tmpl(arvif);
7335 if (ret)
7336 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7337 ret);
7338
7339 ret = ath10k_mac_setup_prb_tmpl(arvif);
7340 if (ret)
7341 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7342 ret);
7343
7344 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7345 if (ret) {
7346 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7347 arvif->vdev_id, ret);
7348 continue;
7349 }
7350
7351 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7352 arvif->bssid);
7353 if (ret) {
7354 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7355 arvif->vdev_id, ret);
7356 continue;
7357 }
7358 }
7359
7360 ath10k_monitor_recalc(ar);
7361 }
7362
7363 static int
ath10k_mac_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)7364 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7365 struct ieee80211_chanctx_conf *ctx)
7366 {
7367 struct ath10k *ar = hw->priv;
7368
7369 ath10k_dbg(ar, ATH10K_DBG_MAC,
7370 "mac chanctx add freq %hu width %d ptr %pK\n",
7371 ctx->def.chan->center_freq, ctx->def.width, ctx);
7372
7373 mutex_lock(&ar->conf_mutex);
7374
7375 spin_lock_bh(&ar->data_lock);
7376 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7377 spin_unlock_bh(&ar->data_lock);
7378
7379 ath10k_recalc_radar_detection(ar);
7380 ath10k_monitor_recalc(ar);
7381
7382 mutex_unlock(&ar->conf_mutex);
7383
7384 return 0;
7385 }
7386
7387 static void
ath10k_mac_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)7388 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7389 struct ieee80211_chanctx_conf *ctx)
7390 {
7391 struct ath10k *ar = hw->priv;
7392
7393 ath10k_dbg(ar, ATH10K_DBG_MAC,
7394 "mac chanctx remove freq %hu width %d ptr %pK\n",
7395 ctx->def.chan->center_freq, ctx->def.width, ctx);
7396
7397 mutex_lock(&ar->conf_mutex);
7398
7399 spin_lock_bh(&ar->data_lock);
7400 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7401 spin_unlock_bh(&ar->data_lock);
7402
7403 ath10k_recalc_radar_detection(ar);
7404 ath10k_monitor_recalc(ar);
7405
7406 mutex_unlock(&ar->conf_mutex);
7407 }
7408
7409 struct ath10k_mac_change_chanctx_arg {
7410 struct ieee80211_chanctx_conf *ctx;
7411 struct ieee80211_vif_chanctx_switch *vifs;
7412 int n_vifs;
7413 int next_vif;
7414 };
7415
7416 static void
ath10k_mac_change_chanctx_cnt_iter(void * data,u8 * mac,struct ieee80211_vif * vif)7417 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7418 struct ieee80211_vif *vif)
7419 {
7420 struct ath10k_mac_change_chanctx_arg *arg = data;
7421
7422 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7423 return;
7424
7425 arg->n_vifs++;
7426 }
7427
7428 static void
ath10k_mac_change_chanctx_fill_iter(void * data,u8 * mac,struct ieee80211_vif * vif)7429 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7430 struct ieee80211_vif *vif)
7431 {
7432 struct ath10k_mac_change_chanctx_arg *arg = data;
7433 struct ieee80211_chanctx_conf *ctx;
7434
7435 ctx = rcu_access_pointer(vif->chanctx_conf);
7436 if (ctx != arg->ctx)
7437 return;
7438
7439 if (WARN_ON(arg->next_vif == arg->n_vifs))
7440 return;
7441
7442 arg->vifs[arg->next_vif].vif = vif;
7443 arg->vifs[arg->next_vif].old_ctx = ctx;
7444 arg->vifs[arg->next_vif].new_ctx = ctx;
7445 arg->next_vif++;
7446 }
7447
7448 static void
ath10k_mac_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)7449 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7450 struct ieee80211_chanctx_conf *ctx,
7451 u32 changed)
7452 {
7453 struct ath10k *ar = hw->priv;
7454 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7455
7456 mutex_lock(&ar->conf_mutex);
7457
7458 ath10k_dbg(ar, ATH10K_DBG_MAC,
7459 "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7460 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7461
7462 /* This shouldn't really happen because channel switching should use
7463 * switch_vif_chanctx().
7464 */
7465 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7466 goto unlock;
7467
7468 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7469 ieee80211_iterate_active_interfaces_atomic(
7470 hw,
7471 IEEE80211_IFACE_ITER_NORMAL,
7472 ath10k_mac_change_chanctx_cnt_iter,
7473 &arg);
7474 if (arg.n_vifs == 0)
7475 goto radar;
7476
7477 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7478 GFP_KERNEL);
7479 if (!arg.vifs)
7480 goto radar;
7481
7482 ieee80211_iterate_active_interfaces_atomic(
7483 hw,
7484 IEEE80211_IFACE_ITER_NORMAL,
7485 ath10k_mac_change_chanctx_fill_iter,
7486 &arg);
7487 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7488 kfree(arg.vifs);
7489 }
7490
7491 radar:
7492 ath10k_recalc_radar_detection(ar);
7493
7494 /* FIXME: How to configure Rx chains properly? */
7495
7496 /* No other actions are actually necessary. Firmware maintains channel
7497 * definitions per vdev internally and there's no host-side channel
7498 * context abstraction to configure, e.g. channel width.
7499 */
7500
7501 unlock:
7502 mutex_unlock(&ar->conf_mutex);
7503 }
7504
7505 static int
ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)7506 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7507 struct ieee80211_vif *vif,
7508 struct ieee80211_chanctx_conf *ctx)
7509 {
7510 struct ath10k *ar = hw->priv;
7511 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7512 int ret;
7513
7514 mutex_lock(&ar->conf_mutex);
7515
7516 ath10k_dbg(ar, ATH10K_DBG_MAC,
7517 "mac chanctx assign ptr %pK vdev_id %i\n",
7518 ctx, arvif->vdev_id);
7519
7520 if (WARN_ON(arvif->is_started)) {
7521 mutex_unlock(&ar->conf_mutex);
7522 return -EBUSY;
7523 }
7524
7525 ret = ath10k_vdev_start(arvif, &ctx->def);
7526 if (ret) {
7527 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7528 arvif->vdev_id, vif->addr,
7529 ctx->def.chan->center_freq, ret);
7530 goto err;
7531 }
7532
7533 arvif->is_started = true;
7534
7535 ret = ath10k_mac_vif_setup_ps(arvif);
7536 if (ret) {
7537 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7538 arvif->vdev_id, ret);
7539 goto err_stop;
7540 }
7541
7542 if (vif->type == NL80211_IFTYPE_MONITOR) {
7543 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7544 if (ret) {
7545 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7546 arvif->vdev_id, ret);
7547 goto err_stop;
7548 }
7549
7550 arvif->is_up = true;
7551 }
7552
7553 if (ath10k_mac_can_set_cts_prot(arvif)) {
7554 ret = ath10k_mac_set_cts_prot(arvif);
7555 if (ret)
7556 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
7557 arvif->vdev_id, ret);
7558 }
7559
7560 mutex_unlock(&ar->conf_mutex);
7561 return 0;
7562
7563 err_stop:
7564 ath10k_vdev_stop(arvif);
7565 arvif->is_started = false;
7566 ath10k_mac_vif_setup_ps(arvif);
7567
7568 err:
7569 mutex_unlock(&ar->conf_mutex);
7570 return ret;
7571 }
7572
7573 static void
ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)7574 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7575 struct ieee80211_vif *vif,
7576 struct ieee80211_chanctx_conf *ctx)
7577 {
7578 struct ath10k *ar = hw->priv;
7579 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7580 int ret;
7581
7582 mutex_lock(&ar->conf_mutex);
7583
7584 ath10k_dbg(ar, ATH10K_DBG_MAC,
7585 "mac chanctx unassign ptr %pK vdev_id %i\n",
7586 ctx, arvif->vdev_id);
7587
7588 WARN_ON(!arvif->is_started);
7589
7590 if (vif->type == NL80211_IFTYPE_MONITOR) {
7591 WARN_ON(!arvif->is_up);
7592
7593 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7594 if (ret)
7595 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7596 arvif->vdev_id, ret);
7597
7598 arvif->is_up = false;
7599 }
7600
7601 ret = ath10k_vdev_stop(arvif);
7602 if (ret)
7603 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7604 arvif->vdev_id, ret);
7605
7606 arvif->is_started = false;
7607
7608 mutex_unlock(&ar->conf_mutex);
7609 }
7610
7611 static int
ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)7612 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7613 struct ieee80211_vif_chanctx_switch *vifs,
7614 int n_vifs,
7615 enum ieee80211_chanctx_switch_mode mode)
7616 {
7617 struct ath10k *ar = hw->priv;
7618
7619 mutex_lock(&ar->conf_mutex);
7620
7621 ath10k_dbg(ar, ATH10K_DBG_MAC,
7622 "mac chanctx switch n_vifs %d mode %d\n",
7623 n_vifs, mode);
7624 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7625
7626 mutex_unlock(&ar->conf_mutex);
7627 return 0;
7628 }
7629
ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)7630 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
7631 struct ieee80211_vif *vif,
7632 struct ieee80211_sta *sta)
7633 {
7634 struct ath10k *ar;
7635 struct ath10k_peer *peer;
7636
7637 ar = hw->priv;
7638
7639 list_for_each_entry(peer, &ar->peers, list)
7640 if (peer->sta == sta)
7641 peer->removed = true;
7642 }
7643
7644 static const struct ieee80211_ops ath10k_ops = {
7645 .tx = ath10k_mac_op_tx,
7646 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
7647 .start = ath10k_start,
7648 .stop = ath10k_stop,
7649 .config = ath10k_config,
7650 .add_interface = ath10k_add_interface,
7651 .remove_interface = ath10k_remove_interface,
7652 .configure_filter = ath10k_configure_filter,
7653 .bss_info_changed = ath10k_bss_info_changed,
7654 .set_coverage_class = ath10k_mac_op_set_coverage_class,
7655 .hw_scan = ath10k_hw_scan,
7656 .cancel_hw_scan = ath10k_cancel_hw_scan,
7657 .set_key = ath10k_set_key,
7658 .set_default_unicast_key = ath10k_set_default_unicast_key,
7659 .sta_state = ath10k_sta_state,
7660 .conf_tx = ath10k_conf_tx,
7661 .remain_on_channel = ath10k_remain_on_channel,
7662 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
7663 .set_rts_threshold = ath10k_set_rts_threshold,
7664 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
7665 .flush = ath10k_flush,
7666 .tx_last_beacon = ath10k_tx_last_beacon,
7667 .set_antenna = ath10k_set_antenna,
7668 .get_antenna = ath10k_get_antenna,
7669 .reconfig_complete = ath10k_reconfig_complete,
7670 .get_survey = ath10k_get_survey,
7671 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
7672 .sta_rc_update = ath10k_sta_rc_update,
7673 .offset_tsf = ath10k_offset_tsf,
7674 .ampdu_action = ath10k_ampdu_action,
7675 .get_et_sset_count = ath10k_debug_get_et_sset_count,
7676 .get_et_stats = ath10k_debug_get_et_stats,
7677 .get_et_strings = ath10k_debug_get_et_strings,
7678 .add_chanctx = ath10k_mac_op_add_chanctx,
7679 .remove_chanctx = ath10k_mac_op_remove_chanctx,
7680 .change_chanctx = ath10k_mac_op_change_chanctx,
7681 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
7682 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
7683 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
7684 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
7685
7686 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7687
7688 #ifdef CONFIG_PM
7689 .suspend = ath10k_wow_op_suspend,
7690 .resume = ath10k_wow_op_resume,
7691 .set_wakeup = ath10k_wow_op_set_wakeup,
7692 #endif
7693 #ifdef CONFIG_MAC80211_DEBUGFS
7694 .sta_add_debugfs = ath10k_sta_add_debugfs,
7695 .sta_statistics = ath10k_sta_statistics,
7696 #endif
7697 };
7698
7699 #define CHAN2G(_channel, _freq, _flags) { \
7700 .band = NL80211_BAND_2GHZ, \
7701 .hw_value = (_channel), \
7702 .center_freq = (_freq), \
7703 .flags = (_flags), \
7704 .max_antenna_gain = 0, \
7705 .max_power = 30, \
7706 }
7707
7708 #define CHAN5G(_channel, _freq, _flags) { \
7709 .band = NL80211_BAND_5GHZ, \
7710 .hw_value = (_channel), \
7711 .center_freq = (_freq), \
7712 .flags = (_flags), \
7713 .max_antenna_gain = 0, \
7714 .max_power = 30, \
7715 }
7716
7717 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7718 CHAN2G(1, 2412, 0),
7719 CHAN2G(2, 2417, 0),
7720 CHAN2G(3, 2422, 0),
7721 CHAN2G(4, 2427, 0),
7722 CHAN2G(5, 2432, 0),
7723 CHAN2G(6, 2437, 0),
7724 CHAN2G(7, 2442, 0),
7725 CHAN2G(8, 2447, 0),
7726 CHAN2G(9, 2452, 0),
7727 CHAN2G(10, 2457, 0),
7728 CHAN2G(11, 2462, 0),
7729 CHAN2G(12, 2467, 0),
7730 CHAN2G(13, 2472, 0),
7731 CHAN2G(14, 2484, 0),
7732 };
7733
7734 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7735 CHAN5G(36, 5180, 0),
7736 CHAN5G(40, 5200, 0),
7737 CHAN5G(44, 5220, 0),
7738 CHAN5G(48, 5240, 0),
7739 CHAN5G(52, 5260, 0),
7740 CHAN5G(56, 5280, 0),
7741 CHAN5G(60, 5300, 0),
7742 CHAN5G(64, 5320, 0),
7743 CHAN5G(100, 5500, 0),
7744 CHAN5G(104, 5520, 0),
7745 CHAN5G(108, 5540, 0),
7746 CHAN5G(112, 5560, 0),
7747 CHAN5G(116, 5580, 0),
7748 CHAN5G(120, 5600, 0),
7749 CHAN5G(124, 5620, 0),
7750 CHAN5G(128, 5640, 0),
7751 CHAN5G(132, 5660, 0),
7752 CHAN5G(136, 5680, 0),
7753 CHAN5G(140, 5700, 0),
7754 CHAN5G(144, 5720, 0),
7755 CHAN5G(149, 5745, 0),
7756 CHAN5G(153, 5765, 0),
7757 CHAN5G(157, 5785, 0),
7758 CHAN5G(161, 5805, 0),
7759 CHAN5G(165, 5825, 0),
7760 CHAN5G(169, 5845, 0),
7761 };
7762
ath10k_mac_create(size_t priv_size)7763 struct ath10k *ath10k_mac_create(size_t priv_size)
7764 {
7765 struct ieee80211_hw *hw;
7766 struct ieee80211_ops *ops;
7767 struct ath10k *ar;
7768
7769 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7770 if (!ops)
7771 return NULL;
7772
7773 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7774 if (!hw) {
7775 kfree(ops);
7776 return NULL;
7777 }
7778
7779 ar = hw->priv;
7780 ar->hw = hw;
7781 ar->ops = ops;
7782
7783 return ar;
7784 }
7785
ath10k_mac_destroy(struct ath10k * ar)7786 void ath10k_mac_destroy(struct ath10k *ar)
7787 {
7788 struct ieee80211_ops *ops = ar->ops;
7789
7790 ieee80211_free_hw(ar->hw);
7791 kfree(ops);
7792 }
7793
7794 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7795 {
7796 .max = 8,
7797 .types = BIT(NL80211_IFTYPE_STATION)
7798 | BIT(NL80211_IFTYPE_P2P_CLIENT)
7799 },
7800 {
7801 .max = 3,
7802 .types = BIT(NL80211_IFTYPE_P2P_GO)
7803 },
7804 {
7805 .max = 1,
7806 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
7807 },
7808 {
7809 .max = 7,
7810 .types = BIT(NL80211_IFTYPE_AP)
7811 #ifdef CONFIG_MAC80211_MESH
7812 | BIT(NL80211_IFTYPE_MESH_POINT)
7813 #endif
7814 },
7815 };
7816
7817 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7818 {
7819 .max = 8,
7820 .types = BIT(NL80211_IFTYPE_AP)
7821 #ifdef CONFIG_MAC80211_MESH
7822 | BIT(NL80211_IFTYPE_MESH_POINT)
7823 #endif
7824 },
7825 {
7826 .max = 1,
7827 .types = BIT(NL80211_IFTYPE_STATION)
7828 },
7829 };
7830
7831 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7832 {
7833 .limits = ath10k_if_limits,
7834 .n_limits = ARRAY_SIZE(ath10k_if_limits),
7835 .max_interfaces = 8,
7836 .num_different_channels = 1,
7837 .beacon_int_infra_match = true,
7838 },
7839 };
7840
7841 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7842 {
7843 .limits = ath10k_10x_if_limits,
7844 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7845 .max_interfaces = 8,
7846 .num_different_channels = 1,
7847 .beacon_int_infra_match = true,
7848 .beacon_int_min_gcd = 1,
7849 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7850 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7851 BIT(NL80211_CHAN_WIDTH_20) |
7852 BIT(NL80211_CHAN_WIDTH_40) |
7853 BIT(NL80211_CHAN_WIDTH_80),
7854 #endif
7855 },
7856 };
7857
7858 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7859 {
7860 .max = 2,
7861 .types = BIT(NL80211_IFTYPE_STATION),
7862 },
7863 {
7864 .max = 2,
7865 .types = BIT(NL80211_IFTYPE_AP) |
7866 #ifdef CONFIG_MAC80211_MESH
7867 BIT(NL80211_IFTYPE_MESH_POINT) |
7868 #endif
7869 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7870 BIT(NL80211_IFTYPE_P2P_GO),
7871 },
7872 {
7873 .max = 1,
7874 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7875 },
7876 };
7877
7878 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7879 {
7880 .max = 2,
7881 .types = BIT(NL80211_IFTYPE_STATION),
7882 },
7883 {
7884 .max = 2,
7885 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7886 },
7887 {
7888 .max = 1,
7889 .types = BIT(NL80211_IFTYPE_AP) |
7890 #ifdef CONFIG_MAC80211_MESH
7891 BIT(NL80211_IFTYPE_MESH_POINT) |
7892 #endif
7893 BIT(NL80211_IFTYPE_P2P_GO),
7894 },
7895 {
7896 .max = 1,
7897 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7898 },
7899 };
7900
7901 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7902 {
7903 .max = 1,
7904 .types = BIT(NL80211_IFTYPE_STATION),
7905 },
7906 {
7907 .max = 1,
7908 .types = BIT(NL80211_IFTYPE_ADHOC),
7909 },
7910 };
7911
7912 /* FIXME: This is not thouroughly tested. These combinations may over- or
7913 * underestimate hw/fw capabilities.
7914 */
7915 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7916 {
7917 .limits = ath10k_tlv_if_limit,
7918 .num_different_channels = 1,
7919 .max_interfaces = 4,
7920 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7921 },
7922 {
7923 .limits = ath10k_tlv_if_limit_ibss,
7924 .num_different_channels = 1,
7925 .max_interfaces = 2,
7926 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7927 },
7928 };
7929
7930 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7931 {
7932 .limits = ath10k_tlv_if_limit,
7933 .num_different_channels = 1,
7934 .max_interfaces = 4,
7935 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7936 },
7937 {
7938 .limits = ath10k_tlv_qcs_if_limit,
7939 .num_different_channels = 2,
7940 .max_interfaces = 4,
7941 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7942 },
7943 {
7944 .limits = ath10k_tlv_if_limit_ibss,
7945 .num_different_channels = 1,
7946 .max_interfaces = 2,
7947 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7948 },
7949 };
7950
7951 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7952 {
7953 .max = 1,
7954 .types = BIT(NL80211_IFTYPE_STATION),
7955 },
7956 {
7957 .max = 16,
7958 .types = BIT(NL80211_IFTYPE_AP)
7959 #ifdef CONFIG_MAC80211_MESH
7960 | BIT(NL80211_IFTYPE_MESH_POINT)
7961 #endif
7962 },
7963 };
7964
7965 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7966 {
7967 .limits = ath10k_10_4_if_limits,
7968 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7969 .max_interfaces = 16,
7970 .num_different_channels = 1,
7971 .beacon_int_infra_match = true,
7972 .beacon_int_min_gcd = 1,
7973 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7974 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7975 BIT(NL80211_CHAN_WIDTH_20) |
7976 BIT(NL80211_CHAN_WIDTH_40) |
7977 BIT(NL80211_CHAN_WIDTH_80),
7978 #endif
7979 },
7980 };
7981
ath10k_get_arvif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)7982 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7983 struct ieee80211_vif *vif)
7984 {
7985 struct ath10k_vif_iter *arvif_iter = data;
7986 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7987
7988 if (arvif->vdev_id == arvif_iter->vdev_id)
7989 arvif_iter->arvif = arvif;
7990 }
7991
ath10k_get_arvif(struct ath10k * ar,u32 vdev_id)7992 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7993 {
7994 struct ath10k_vif_iter arvif_iter;
7995 u32 flags;
7996
7997 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7998 arvif_iter.vdev_id = vdev_id;
7999
8000 flags = IEEE80211_IFACE_ITER_RESUME_ALL;
8001 ieee80211_iterate_active_interfaces_atomic(ar->hw,
8002 flags,
8003 ath10k_get_arvif_iter,
8004 &arvif_iter);
8005 if (!arvif_iter.arvif) {
8006 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
8007 return NULL;
8008 }
8009
8010 return arvif_iter.arvif;
8011 }
8012
8013 #define WRD_METHOD "WRDD"
8014 #define WRDD_WIFI (0x07)
8015
ath10k_mac_wrdd_get_mcc(struct ath10k * ar,union acpi_object * wrdd)8016 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
8017 {
8018 union acpi_object *mcc_pkg;
8019 union acpi_object *domain_type;
8020 union acpi_object *mcc_value;
8021 u32 i;
8022
8023 if (wrdd->type != ACPI_TYPE_PACKAGE ||
8024 wrdd->package.count < 2 ||
8025 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
8026 wrdd->package.elements[0].integer.value != 0) {
8027 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
8028 return 0;
8029 }
8030
8031 for (i = 1; i < wrdd->package.count; ++i) {
8032 mcc_pkg = &wrdd->package.elements[i];
8033
8034 if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
8035 continue;
8036 if (mcc_pkg->package.count < 2)
8037 continue;
8038 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
8039 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
8040 continue;
8041
8042 domain_type = &mcc_pkg->package.elements[0];
8043 if (domain_type->integer.value != WRDD_WIFI)
8044 continue;
8045
8046 mcc_value = &mcc_pkg->package.elements[1];
8047 return mcc_value->integer.value;
8048 }
8049 return 0;
8050 }
8051
ath10k_mac_get_wrdd_regulatory(struct ath10k * ar,u16 * rd)8052 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
8053 {
8054 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
8055 acpi_handle root_handle;
8056 acpi_handle handle;
8057 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
8058 acpi_status status;
8059 u32 alpha2_code;
8060 char alpha2[3];
8061
8062 root_handle = ACPI_HANDLE(&pdev->dev);
8063 if (!root_handle)
8064 return -EOPNOTSUPP;
8065
8066 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
8067 if (ACPI_FAILURE(status)) {
8068 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8069 "failed to get wrd method %d\n", status);
8070 return -EIO;
8071 }
8072
8073 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
8074 if (ACPI_FAILURE(status)) {
8075 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8076 "failed to call wrdc %d\n", status);
8077 return -EIO;
8078 }
8079
8080 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
8081 kfree(wrdd.pointer);
8082 if (!alpha2_code)
8083 return -EIO;
8084
8085 alpha2[0] = (alpha2_code >> 8) & 0xff;
8086 alpha2[1] = (alpha2_code >> 0) & 0xff;
8087 alpha2[2] = '\0';
8088
8089 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8090 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
8091
8092 *rd = ath_regd_find_country_by_name(alpha2);
8093 if (*rd == 0xffff)
8094 return -EIO;
8095
8096 *rd |= COUNTRY_ERD_FLAG;
8097 return 0;
8098 }
8099
ath10k_mac_init_rd(struct ath10k * ar)8100 static int ath10k_mac_init_rd(struct ath10k *ar)
8101 {
8102 int ret;
8103 u16 rd;
8104
8105 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
8106 if (ret) {
8107 ath10k_dbg(ar, ATH10K_DBG_BOOT,
8108 "fallback to eeprom programmed regulatory settings\n");
8109 rd = ar->hw_eeprom_rd;
8110 }
8111
8112 ar->ath_common.regulatory.current_rd = rd;
8113 return 0;
8114 }
8115
ath10k_mac_register(struct ath10k * ar)8116 int ath10k_mac_register(struct ath10k *ar)
8117 {
8118 static const u32 cipher_suites[] = {
8119 WLAN_CIPHER_SUITE_WEP40,
8120 WLAN_CIPHER_SUITE_WEP104,
8121 WLAN_CIPHER_SUITE_TKIP,
8122 WLAN_CIPHER_SUITE_CCMP,
8123 WLAN_CIPHER_SUITE_AES_CMAC,
8124 };
8125 struct ieee80211_supported_band *band;
8126 void *channels;
8127 int ret;
8128
8129 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
8130
8131 SET_IEEE80211_DEV(ar->hw, ar->dev);
8132
8133 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
8134 ARRAY_SIZE(ath10k_5ghz_channels)) !=
8135 ATH10K_NUM_CHANS);
8136
8137 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
8138 channels = kmemdup(ath10k_2ghz_channels,
8139 sizeof(ath10k_2ghz_channels),
8140 GFP_KERNEL);
8141 if (!channels) {
8142 ret = -ENOMEM;
8143 goto err_free;
8144 }
8145
8146 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
8147 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
8148 band->channels = channels;
8149
8150 if (ar->hw_params.cck_rate_map_rev2) {
8151 band->n_bitrates = ath10k_g_rates_rev2_size;
8152 band->bitrates = ath10k_g_rates_rev2;
8153 } else {
8154 band->n_bitrates = ath10k_g_rates_size;
8155 band->bitrates = ath10k_g_rates;
8156 }
8157
8158 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
8159 }
8160
8161 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
8162 channels = kmemdup(ath10k_5ghz_channels,
8163 sizeof(ath10k_5ghz_channels),
8164 GFP_KERNEL);
8165 if (!channels) {
8166 ret = -ENOMEM;
8167 goto err_free;
8168 }
8169
8170 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
8171 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
8172 band->channels = channels;
8173 band->n_bitrates = ath10k_a_rates_size;
8174 band->bitrates = ath10k_a_rates;
8175 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
8176 }
8177
8178 wiphy_read_of_freq_limits(ar->hw->wiphy);
8179 ath10k_mac_setup_ht_vht_cap(ar);
8180
8181 ar->hw->wiphy->interface_modes =
8182 BIT(NL80211_IFTYPE_STATION) |
8183 BIT(NL80211_IFTYPE_AP) |
8184 BIT(NL80211_IFTYPE_MESH_POINT);
8185
8186 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
8187 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
8188
8189 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
8190 ar->hw->wiphy->interface_modes |=
8191 BIT(NL80211_IFTYPE_P2P_DEVICE) |
8192 BIT(NL80211_IFTYPE_P2P_CLIENT) |
8193 BIT(NL80211_IFTYPE_P2P_GO);
8194
8195 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
8196 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
8197 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
8198 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
8199 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
8200 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
8201 ieee80211_hw_set(ar->hw, AP_LINK_PS);
8202 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
8203 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
8204 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
8205 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
8206 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
8207 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
8208 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
8209 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
8210 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
8211
8212 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8213 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
8214
8215 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
8216 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8217
8218 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
8219 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
8220
8221 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
8222 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
8223 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
8224 }
8225
8226 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8227 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8228
8229 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8230 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8231 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
8232
8233 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
8234
8235 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
8236 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
8237
8238 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
8239 * that userspace (e.g. wpa_supplicant/hostapd) can generate
8240 * correct Probe Responses. This is more of a hack advert..
8241 */
8242 ar->hw->wiphy->probe_resp_offload |=
8243 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
8244 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
8245 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
8246 }
8247
8248 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
8249 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
8250 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
8251 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
8252 }
8253
8254 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
8255 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
8256 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
8257
8258 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
8259 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
8260 NL80211_FEATURE_AP_SCAN;
8261
8262 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
8263
8264 ret = ath10k_wow_init(ar);
8265 if (ret) {
8266 ath10k_warn(ar, "failed to init wow: %d\n", ret);
8267 goto err_free;
8268 }
8269
8270 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
8271
8272 /*
8273 * on LL hardware queues are managed entirely by the FW
8274 * so we only advertise to mac we can do the queues thing
8275 */
8276 ar->hw->queues = IEEE80211_MAX_QUEUES;
8277
8278 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
8279 * something that vdev_ids can't reach so that we don't stop the queue
8280 * accidentally.
8281 */
8282 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
8283
8284 switch (ar->running_fw->fw_file.wmi_op_version) {
8285 case ATH10K_FW_WMI_OP_VERSION_MAIN:
8286 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
8287 ar->hw->wiphy->n_iface_combinations =
8288 ARRAY_SIZE(ath10k_if_comb);
8289 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8290 break;
8291 case ATH10K_FW_WMI_OP_VERSION_TLV:
8292 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
8293 ar->hw->wiphy->iface_combinations =
8294 ath10k_tlv_qcs_if_comb;
8295 ar->hw->wiphy->n_iface_combinations =
8296 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
8297 } else {
8298 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
8299 ar->hw->wiphy->n_iface_combinations =
8300 ARRAY_SIZE(ath10k_tlv_if_comb);
8301 }
8302 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8303 break;
8304 case ATH10K_FW_WMI_OP_VERSION_10_1:
8305 case ATH10K_FW_WMI_OP_VERSION_10_2:
8306 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
8307 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
8308 ar->hw->wiphy->n_iface_combinations =
8309 ARRAY_SIZE(ath10k_10x_if_comb);
8310 break;
8311 case ATH10K_FW_WMI_OP_VERSION_10_4:
8312 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
8313 ar->hw->wiphy->n_iface_combinations =
8314 ARRAY_SIZE(ath10k_10_4_if_comb);
8315 break;
8316 case ATH10K_FW_WMI_OP_VERSION_UNSET:
8317 case ATH10K_FW_WMI_OP_VERSION_MAX:
8318 WARN_ON(1);
8319 ret = -EINVAL;
8320 goto err_free;
8321 }
8322
8323 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8324 ar->hw->netdev_features = NETIF_F_HW_CSUM;
8325
8326 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
8327 /* Init ath dfs pattern detector */
8328 ar->ath_common.debug_mask = ATH_DBG_DFS;
8329 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8330 NL80211_DFS_UNSET);
8331
8332 if (!ar->dfs_detector)
8333 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8334 }
8335
8336 /* Current wake_tx_queue implementation imposes a significant
8337 * performance penalty in some setups. The tx scheduling code needs
8338 * more work anyway so disable the wake_tx_queue unless firmware
8339 * supports the pull-push mechanism.
8340 */
8341 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
8342 ar->running_fw->fw_file.fw_features))
8343 ar->ops->wake_tx_queue = NULL;
8344
8345 ret = ath10k_mac_init_rd(ar);
8346 if (ret) {
8347 ath10k_err(ar, "failed to derive regdom: %d\n", ret);
8348 goto err_dfs_detector_exit;
8349 }
8350
8351 /* Disable set_coverage_class for chipsets that do not support it. */
8352 if (!ar->hw_params.hw_ops->set_coverage_class)
8353 ar->ops->set_coverage_class = NULL;
8354
8355 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8356 ath10k_reg_notifier);
8357 if (ret) {
8358 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8359 goto err_dfs_detector_exit;
8360 }
8361
8362 ar->hw->wiphy->cipher_suites = cipher_suites;
8363 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
8364
8365 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
8366
8367 ret = ieee80211_register_hw(ar->hw);
8368 if (ret) {
8369 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8370 goto err_dfs_detector_exit;
8371 }
8372
8373 if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8374 ret = regulatory_hint(ar->hw->wiphy,
8375 ar->ath_common.regulatory.alpha2);
8376 if (ret)
8377 goto err_unregister;
8378 }
8379
8380 return 0;
8381
8382 err_unregister:
8383 ieee80211_unregister_hw(ar->hw);
8384
8385 err_dfs_detector_exit:
8386 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8387 ar->dfs_detector->exit(ar->dfs_detector);
8388
8389 err_free:
8390 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8391 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8392
8393 SET_IEEE80211_DEV(ar->hw, NULL);
8394 return ret;
8395 }
8396
ath10k_mac_unregister(struct ath10k * ar)8397 void ath10k_mac_unregister(struct ath10k *ar)
8398 {
8399 ieee80211_unregister_hw(ar->hw);
8400
8401 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8402 ar->dfs_detector->exit(ar->dfs_detector);
8403
8404 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8405 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8406
8407 SET_IEEE80211_DEV(ar->hw, NULL);
8408 }
8409