1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5 #include <linux/sched.h>
6 #include <linux/of.h>
7 #include "mt76.h"
8
9 #define CHAN2G(_idx, _freq) { \
10 .band = NL80211_BAND_2GHZ, \
11 .center_freq = (_freq), \
12 .hw_value = (_idx), \
13 .max_power = 30, \
14 }
15
16 #define CHAN5G(_idx, _freq) { \
17 .band = NL80211_BAND_5GHZ, \
18 .center_freq = (_freq), \
19 .hw_value = (_idx), \
20 .max_power = 30, \
21 }
22
23 static const struct ieee80211_channel mt76_channels_2ghz[] = {
24 CHAN2G(1, 2412),
25 CHAN2G(2, 2417),
26 CHAN2G(3, 2422),
27 CHAN2G(4, 2427),
28 CHAN2G(5, 2432),
29 CHAN2G(6, 2437),
30 CHAN2G(7, 2442),
31 CHAN2G(8, 2447),
32 CHAN2G(9, 2452),
33 CHAN2G(10, 2457),
34 CHAN2G(11, 2462),
35 CHAN2G(12, 2467),
36 CHAN2G(13, 2472),
37 CHAN2G(14, 2484),
38 };
39
40 static const struct ieee80211_channel mt76_channels_5ghz[] = {
41 CHAN5G(36, 5180),
42 CHAN5G(40, 5200),
43 CHAN5G(44, 5220),
44 CHAN5G(48, 5240),
45
46 CHAN5G(52, 5260),
47 CHAN5G(56, 5280),
48 CHAN5G(60, 5300),
49 CHAN5G(64, 5320),
50
51 CHAN5G(100, 5500),
52 CHAN5G(104, 5520),
53 CHAN5G(108, 5540),
54 CHAN5G(112, 5560),
55 CHAN5G(116, 5580),
56 CHAN5G(120, 5600),
57 CHAN5G(124, 5620),
58 CHAN5G(128, 5640),
59 CHAN5G(132, 5660),
60 CHAN5G(136, 5680),
61 CHAN5G(140, 5700),
62 CHAN5G(144, 5720),
63
64 CHAN5G(149, 5745),
65 CHAN5G(153, 5765),
66 CHAN5G(157, 5785),
67 CHAN5G(161, 5805),
68 CHAN5G(165, 5825),
69 CHAN5G(169, 5845),
70 CHAN5G(173, 5865),
71 };
72
73 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
74 { .throughput = 0 * 1024, .blink_time = 334 },
75 { .throughput = 1 * 1024, .blink_time = 260 },
76 { .throughput = 5 * 1024, .blink_time = 220 },
77 { .throughput = 10 * 1024, .blink_time = 190 },
78 { .throughput = 20 * 1024, .blink_time = 170 },
79 { .throughput = 50 * 1024, .blink_time = 150 },
80 { .throughput = 70 * 1024, .blink_time = 130 },
81 { .throughput = 100 * 1024, .blink_time = 110 },
82 { .throughput = 200 * 1024, .blink_time = 80 },
83 { .throughput = 300 * 1024, .blink_time = 50 },
84 };
85
86 struct ieee80211_rate mt76_rates[] = {
87 CCK_RATE(0, 10),
88 CCK_RATE(1, 20),
89 CCK_RATE(2, 55),
90 CCK_RATE(3, 110),
91 OFDM_RATE(11, 60),
92 OFDM_RATE(15, 90),
93 OFDM_RATE(10, 120),
94 OFDM_RATE(14, 180),
95 OFDM_RATE(9, 240),
96 OFDM_RATE(13, 360),
97 OFDM_RATE(8, 480),
98 OFDM_RATE(12, 540),
99 };
100 EXPORT_SYMBOL_GPL(mt76_rates);
101
mt76_led_init(struct mt76_dev * dev)102 static int mt76_led_init(struct mt76_dev *dev)
103 {
104 struct device_node *np = dev->dev->of_node;
105 struct ieee80211_hw *hw = dev->hw;
106 int led_pin;
107
108 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
109 return 0;
110
111 snprintf(dev->led_name, sizeof(dev->led_name),
112 "mt76-%s", wiphy_name(hw->wiphy));
113
114 dev->led_cdev.name = dev->led_name;
115 dev->led_cdev.default_trigger =
116 ieee80211_create_tpt_led_trigger(hw,
117 IEEE80211_TPT_LEDTRIG_FL_RADIO,
118 mt76_tpt_blink,
119 ARRAY_SIZE(mt76_tpt_blink));
120
121 np = of_get_child_by_name(np, "led");
122 if (np) {
123 if (!of_property_read_u32(np, "led-sources", &led_pin))
124 dev->led_pin = led_pin;
125 dev->led_al = of_property_read_bool(np, "led-active-low");
126 of_node_put(np);
127 }
128
129 return led_classdev_register(dev->dev, &dev->led_cdev);
130 }
131
mt76_led_cleanup(struct mt76_dev * dev)132 static void mt76_led_cleanup(struct mt76_dev *dev)
133 {
134 if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
135 return;
136
137 led_classdev_unregister(&dev->led_cdev);
138 }
139
mt76_init_stream_cap(struct mt76_phy * phy,struct ieee80211_supported_band * sband,bool vht)140 static void mt76_init_stream_cap(struct mt76_phy *phy,
141 struct ieee80211_supported_band *sband,
142 bool vht)
143 {
144 struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
145 int i, nstream = hweight8(phy->antenna_mask);
146 struct ieee80211_sta_vht_cap *vht_cap;
147 u16 mcs_map = 0;
148
149 if (nstream > 1)
150 ht_cap->cap |= IEEE80211_HT_CAP_TX_STBC;
151 else
152 ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
153
154 for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
155 ht_cap->mcs.rx_mask[i] = i < nstream ? 0xff : 0;
156
157 if (!vht)
158 return;
159
160 vht_cap = &sband->vht_cap;
161 if (nstream > 1)
162 vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
163 else
164 vht_cap->cap &= ~IEEE80211_VHT_CAP_TXSTBC;
165
166 for (i = 0; i < 8; i++) {
167 if (i < nstream)
168 mcs_map |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2));
169 else
170 mcs_map |=
171 (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2));
172 }
173 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
174 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
175 }
176
mt76_set_stream_caps(struct mt76_phy * phy,bool vht)177 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
178 {
179 if (phy->cap.has_2ghz)
180 mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
181 if (phy->cap.has_5ghz)
182 mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
183 }
184 EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
185
186 static int
mt76_init_sband(struct mt76_phy * phy,struct mt76_sband * msband,const struct ieee80211_channel * chan,int n_chan,struct ieee80211_rate * rates,int n_rates,bool vht)187 mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
188 const struct ieee80211_channel *chan, int n_chan,
189 struct ieee80211_rate *rates, int n_rates, bool vht)
190 {
191 struct ieee80211_supported_band *sband = &msband->sband;
192 struct ieee80211_sta_vht_cap *vht_cap;
193 struct ieee80211_sta_ht_cap *ht_cap;
194 struct mt76_dev *dev = phy->dev;
195 void *chanlist;
196 int size;
197
198 size = n_chan * sizeof(*chan);
199 chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL);
200 if (!chanlist)
201 return -ENOMEM;
202
203 msband->chan = devm_kcalloc(dev->dev, n_chan, sizeof(*msband->chan),
204 GFP_KERNEL);
205 if (!msband->chan)
206 return -ENOMEM;
207
208 sband->channels = chanlist;
209 sband->n_channels = n_chan;
210 sband->bitrates = rates;
211 sband->n_bitrates = n_rates;
212
213 ht_cap = &sband->ht_cap;
214 ht_cap->ht_supported = true;
215 ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
216 IEEE80211_HT_CAP_GRN_FLD |
217 IEEE80211_HT_CAP_SGI_20 |
218 IEEE80211_HT_CAP_SGI_40 |
219 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
220
221 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
222 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
223
224 mt76_init_stream_cap(phy, sband, vht);
225
226 if (!vht)
227 return 0;
228
229 vht_cap = &sband->vht_cap;
230 vht_cap->vht_supported = true;
231 vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
232 IEEE80211_VHT_CAP_RXSTBC_1 |
233 IEEE80211_VHT_CAP_SHORT_GI_80 |
234 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
235 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
236 (3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
237
238 return 0;
239 }
240
241 static int
mt76_init_sband_2g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates)242 mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
243 int n_rates)
244 {
245 phy->hw->wiphy->bands[NL80211_BAND_2GHZ] = &phy->sband_2g.sband;
246
247 return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
248 ARRAY_SIZE(mt76_channels_2ghz), rates,
249 n_rates, false);
250 }
251
252 static int
mt76_init_sband_5g(struct mt76_phy * phy,struct ieee80211_rate * rates,int n_rates,bool vht)253 mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
254 int n_rates, bool vht)
255 {
256 phy->hw->wiphy->bands[NL80211_BAND_5GHZ] = &phy->sband_5g.sband;
257
258 return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
259 ARRAY_SIZE(mt76_channels_5ghz), rates,
260 n_rates, vht);
261 }
262
263 static void
mt76_check_sband(struct mt76_phy * phy,struct mt76_sband * msband,enum nl80211_band band)264 mt76_check_sband(struct mt76_phy *phy, struct mt76_sband *msband,
265 enum nl80211_band band)
266 {
267 struct ieee80211_supported_band *sband = &msband->sband;
268 bool found = false;
269 int i;
270
271 if (!sband)
272 return;
273
274 for (i = 0; i < sband->n_channels; i++) {
275 if (sband->channels[i].flags & IEEE80211_CHAN_DISABLED)
276 continue;
277
278 found = true;
279 break;
280 }
281
282 if (found) {
283 phy->chandef.chan = &sband->channels[0];
284 phy->chan_state = &msband->chan[0];
285 return;
286 }
287
288 sband->n_channels = 0;
289 phy->hw->wiphy->bands[band] = NULL;
290 }
291
292 static void
mt76_phy_init(struct mt76_phy * phy,struct ieee80211_hw * hw)293 mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
294 {
295 struct mt76_dev *dev = phy->dev;
296 struct wiphy *wiphy = hw->wiphy;
297
298 SET_IEEE80211_DEV(hw, dev->dev);
299 SET_IEEE80211_PERM_ADDR(hw, phy->macaddr);
300
301 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
302 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
303 WIPHY_FLAG_SUPPORTS_TDLS |
304 WIPHY_FLAG_AP_UAPSD;
305
306 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
307 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
308 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AQL);
309
310 wiphy->available_antennas_tx = dev->phy.antenna_mask;
311 wiphy->available_antennas_rx = dev->phy.antenna_mask;
312
313 hw->txq_data_size = sizeof(struct mt76_txq);
314 hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
315
316 if (!hw->max_tx_fragments)
317 hw->max_tx_fragments = 16;
318
319 ieee80211_hw_set(hw, SIGNAL_DBM);
320 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
321 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
322 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
323 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
324 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
325 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
326
327 if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
328 ieee80211_hw_set(hw, TX_AMSDU);
329 ieee80211_hw_set(hw, TX_FRAG_LIST);
330 }
331
332 ieee80211_hw_set(hw, MFP_CAPABLE);
333 ieee80211_hw_set(hw, AP_LINK_PS);
334 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
335 }
336
337 struct mt76_phy *
mt76_alloc_phy(struct mt76_dev * dev,unsigned int size,const struct ieee80211_ops * ops)338 mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
339 const struct ieee80211_ops *ops)
340 {
341 struct ieee80211_hw *hw;
342 unsigned int phy_size;
343 struct mt76_phy *phy;
344
345 phy_size = ALIGN(sizeof(*phy), 8);
346 hw = ieee80211_alloc_hw(size + phy_size, ops);
347 if (!hw)
348 return NULL;
349
350 phy = hw->priv;
351 phy->dev = dev;
352 phy->hw = hw;
353 phy->priv = hw->priv + phy_size;
354
355 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
356 hw->wiphy->interface_modes =
357 BIT(NL80211_IFTYPE_STATION) |
358 BIT(NL80211_IFTYPE_AP) |
359 #ifdef CONFIG_MAC80211_MESH
360 BIT(NL80211_IFTYPE_MESH_POINT) |
361 #endif
362 BIT(NL80211_IFTYPE_P2P_CLIENT) |
363 BIT(NL80211_IFTYPE_P2P_GO) |
364 BIT(NL80211_IFTYPE_ADHOC);
365
366 return phy;
367 }
368 EXPORT_SYMBOL_GPL(mt76_alloc_phy);
369
mt76_register_phy(struct mt76_phy * phy,bool vht,struct ieee80211_rate * rates,int n_rates)370 int mt76_register_phy(struct mt76_phy *phy, bool vht,
371 struct ieee80211_rate *rates, int n_rates)
372 {
373 int ret;
374
375 mt76_phy_init(phy, phy->hw);
376
377 if (phy->cap.has_2ghz) {
378 ret = mt76_init_sband_2g(phy, rates, n_rates);
379 if (ret)
380 return ret;
381 }
382
383 if (phy->cap.has_5ghz) {
384 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
385 if (ret)
386 return ret;
387 }
388
389 wiphy_read_of_freq_limits(phy->hw->wiphy);
390 mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
391 mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
392
393 ret = ieee80211_register_hw(phy->hw);
394 if (ret)
395 return ret;
396
397 phy->dev->phy2 = phy;
398
399 return 0;
400 }
401 EXPORT_SYMBOL_GPL(mt76_register_phy);
402
mt76_unregister_phy(struct mt76_phy * phy)403 void mt76_unregister_phy(struct mt76_phy *phy)
404 {
405 struct mt76_dev *dev = phy->dev;
406
407 mt76_tx_status_check(dev, NULL, true);
408 ieee80211_unregister_hw(phy->hw);
409 dev->phy2 = NULL;
410 }
411 EXPORT_SYMBOL_GPL(mt76_unregister_phy);
412
413 struct mt76_dev *
mt76_alloc_device(struct device * pdev,unsigned int size,const struct ieee80211_ops * ops,const struct mt76_driver_ops * drv_ops)414 mt76_alloc_device(struct device *pdev, unsigned int size,
415 const struct ieee80211_ops *ops,
416 const struct mt76_driver_ops *drv_ops)
417 {
418 struct ieee80211_hw *hw;
419 struct mt76_phy *phy;
420 struct mt76_dev *dev;
421 int i;
422
423 hw = ieee80211_alloc_hw(size, ops);
424 if (!hw)
425 return NULL;
426
427 dev = hw->priv;
428 dev->hw = hw;
429 dev->dev = pdev;
430 dev->drv = drv_ops;
431 dev->dma_dev = pdev;
432
433 phy = &dev->phy;
434 phy->dev = dev;
435 phy->hw = hw;
436
437 spin_lock_init(&dev->rx_lock);
438 spin_lock_init(&dev->lock);
439 spin_lock_init(&dev->cc_lock);
440 mutex_init(&dev->mutex);
441 init_waitqueue_head(&dev->tx_wait);
442 skb_queue_head_init(&dev->status_list);
443
444 skb_queue_head_init(&dev->mcu.res_q);
445 init_waitqueue_head(&dev->mcu.wait);
446 mutex_init(&dev->mcu.mutex);
447 dev->tx_worker.fn = mt76_tx_worker;
448
449 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
450 hw->wiphy->interface_modes =
451 BIT(NL80211_IFTYPE_STATION) |
452 BIT(NL80211_IFTYPE_AP) |
453 #ifdef CONFIG_MAC80211_MESH
454 BIT(NL80211_IFTYPE_MESH_POINT) |
455 #endif
456 BIT(NL80211_IFTYPE_P2P_CLIENT) |
457 BIT(NL80211_IFTYPE_P2P_GO) |
458 BIT(NL80211_IFTYPE_ADHOC);
459
460 spin_lock_init(&dev->token_lock);
461 idr_init(&dev->token);
462
463 INIT_LIST_HEAD(&dev->txwi_cache);
464
465 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
466 skb_queue_head_init(&dev->rx_skb[i]);
467
468 dev->wq = alloc_ordered_workqueue("mt76", 0);
469 if (!dev->wq) {
470 ieee80211_free_hw(hw);
471 return NULL;
472 }
473
474 return dev;
475 }
476 EXPORT_SYMBOL_GPL(mt76_alloc_device);
477
mt76_register_device(struct mt76_dev * dev,bool vht,struct ieee80211_rate * rates,int n_rates)478 int mt76_register_device(struct mt76_dev *dev, bool vht,
479 struct ieee80211_rate *rates, int n_rates)
480 {
481 struct ieee80211_hw *hw = dev->hw;
482 struct mt76_phy *phy = &dev->phy;
483 int ret;
484
485 dev_set_drvdata(dev->dev, dev);
486 mt76_phy_init(phy, hw);
487
488 if (phy->cap.has_2ghz) {
489 ret = mt76_init_sband_2g(phy, rates, n_rates);
490 if (ret)
491 return ret;
492 }
493
494 if (phy->cap.has_5ghz) {
495 ret = mt76_init_sband_5g(phy, rates + 4, n_rates - 4, vht);
496 if (ret)
497 return ret;
498 }
499
500 wiphy_read_of_freq_limits(hw->wiphy);
501 mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
502 mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
503
504 if (IS_ENABLED(CONFIG_MT76_LEDS)) {
505 ret = mt76_led_init(dev);
506 if (ret)
507 return ret;
508 }
509
510 ret = ieee80211_register_hw(hw);
511 if (ret)
512 return ret;
513
514 WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
515 sched_set_fifo_low(dev->tx_worker.task);
516
517 return 0;
518 }
519 EXPORT_SYMBOL_GPL(mt76_register_device);
520
mt76_unregister_device(struct mt76_dev * dev)521 void mt76_unregister_device(struct mt76_dev *dev)
522 {
523 struct ieee80211_hw *hw = dev->hw;
524
525 if (IS_ENABLED(CONFIG_MT76_LEDS))
526 mt76_led_cleanup(dev);
527 mt76_tx_status_check(dev, NULL, true);
528 ieee80211_unregister_hw(hw);
529 }
530 EXPORT_SYMBOL_GPL(mt76_unregister_device);
531
mt76_free_device(struct mt76_dev * dev)532 void mt76_free_device(struct mt76_dev *dev)
533 {
534 mt76_worker_teardown(&dev->tx_worker);
535 if (dev->wq) {
536 destroy_workqueue(dev->wq);
537 dev->wq = NULL;
538 }
539 ieee80211_free_hw(dev->hw);
540 }
541 EXPORT_SYMBOL_GPL(mt76_free_device);
542
mt76_rx_release_amsdu(struct mt76_phy * phy,enum mt76_rxq_id q)543 static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
544 {
545 struct sk_buff *skb = phy->rx_amsdu[q].head;
546 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
547 struct mt76_dev *dev = phy->dev;
548
549 phy->rx_amsdu[q].head = NULL;
550 phy->rx_amsdu[q].tail = NULL;
551
552 /*
553 * Validate if the amsdu has a proper first subframe.
554 * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
555 * flag of the QoS header gets flipped. In such cases, the first
556 * subframe has a LLC/SNAP header in the location of the destination
557 * address.
558 */
559 if (skb_shinfo(skb)->frag_list) {
560 int offset = 0;
561
562 if (!(status->flag & RX_FLAG_8023)) {
563 offset = ieee80211_get_hdrlen_from_skb(skb);
564
565 if ((status->flag &
566 (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
567 RX_FLAG_DECRYPTED)
568 offset += 8;
569 }
570
571 if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
572 dev_kfree_skb(skb);
573 return;
574 }
575 }
576 __skb_queue_tail(&dev->rx_skb[q], skb);
577 }
578
mt76_rx_release_burst(struct mt76_phy * phy,enum mt76_rxq_id q,struct sk_buff * skb)579 static void mt76_rx_release_burst(struct mt76_phy *phy, enum mt76_rxq_id q,
580 struct sk_buff *skb)
581 {
582 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
583
584 if (phy->rx_amsdu[q].head &&
585 (!status->amsdu || status->first_amsdu ||
586 status->seqno != phy->rx_amsdu[q].seqno))
587 mt76_rx_release_amsdu(phy, q);
588
589 if (!phy->rx_amsdu[q].head) {
590 phy->rx_amsdu[q].tail = &skb_shinfo(skb)->frag_list;
591 phy->rx_amsdu[q].seqno = status->seqno;
592 phy->rx_amsdu[q].head = skb;
593 } else {
594 *phy->rx_amsdu[q].tail = skb;
595 phy->rx_amsdu[q].tail = &skb->next;
596 }
597
598 if (!status->amsdu || status->last_amsdu)
599 mt76_rx_release_amsdu(phy, q);
600 }
601
mt76_rx(struct mt76_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb)602 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
603 {
604 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
605 struct mt76_phy *phy = mt76_dev_phy(dev, status->ext_phy);
606
607 if (!test_bit(MT76_STATE_RUNNING, &phy->state)) {
608 dev_kfree_skb(skb);
609 return;
610 }
611
612 #ifdef CONFIG_NL80211_TESTMODE
613 if (phy->test.state == MT76_TM_STATE_RX_FRAMES) {
614 phy->test.rx_stats.packets[q]++;
615 if (status->flag & RX_FLAG_FAILED_FCS_CRC)
616 phy->test.rx_stats.fcs_error[q]++;
617 }
618 #endif
619
620 mt76_rx_release_burst(phy, q, skb);
621 }
622 EXPORT_SYMBOL_GPL(mt76_rx);
623
mt76_has_tx_pending(struct mt76_phy * phy)624 bool mt76_has_tx_pending(struct mt76_phy *phy)
625 {
626 struct mt76_queue *q;
627 int i;
628
629 for (i = 0; i < __MT_TXQ_MAX; i++) {
630 q = phy->q_tx[i];
631 if (q && q->queued)
632 return true;
633 }
634
635 return false;
636 }
637 EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
638
639 static struct mt76_channel_state *
mt76_channel_state(struct mt76_phy * phy,struct ieee80211_channel * c)640 mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
641 {
642 struct mt76_sband *msband;
643 int idx;
644
645 if (c->band == NL80211_BAND_2GHZ)
646 msband = &phy->sband_2g;
647 else
648 msband = &phy->sband_5g;
649
650 idx = c - &msband->sband.channels[0];
651 return &msband->chan[idx];
652 }
653
mt76_update_survey_active_time(struct mt76_phy * phy,ktime_t time)654 void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
655 {
656 struct mt76_channel_state *state = phy->chan_state;
657
658 state->cc_active += ktime_to_us(ktime_sub(time,
659 phy->survey_time));
660 phy->survey_time = time;
661 }
662 EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
663
mt76_update_survey(struct mt76_phy * phy)664 void mt76_update_survey(struct mt76_phy *phy)
665 {
666 struct mt76_dev *dev = phy->dev;
667 ktime_t cur_time;
668
669 if (dev->drv->update_survey)
670 dev->drv->update_survey(phy);
671
672 cur_time = ktime_get_boottime();
673 mt76_update_survey_active_time(phy, cur_time);
674
675 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME) {
676 struct mt76_channel_state *state = phy->chan_state;
677
678 spin_lock_bh(&dev->cc_lock);
679 state->cc_bss_rx += dev->cur_cc_bss_rx;
680 dev->cur_cc_bss_rx = 0;
681 spin_unlock_bh(&dev->cc_lock);
682 }
683 }
684 EXPORT_SYMBOL_GPL(mt76_update_survey);
685
mt76_set_channel(struct mt76_phy * phy)686 void mt76_set_channel(struct mt76_phy *phy)
687 {
688 struct mt76_dev *dev = phy->dev;
689 struct ieee80211_hw *hw = phy->hw;
690 struct cfg80211_chan_def *chandef = &hw->conf.chandef;
691 bool offchannel = hw->conf.flags & IEEE80211_CONF_OFFCHANNEL;
692 int timeout = HZ / 5;
693
694 wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(phy), timeout);
695 mt76_update_survey(phy);
696
697 phy->chandef = *chandef;
698 phy->chan_state = mt76_channel_state(phy, chandef->chan);
699
700 if (!offchannel)
701 phy->main_chan = chandef->chan;
702
703 if (chandef->chan != phy->main_chan)
704 memset(phy->chan_state, 0, sizeof(*phy->chan_state));
705 }
706 EXPORT_SYMBOL_GPL(mt76_set_channel);
707
mt76_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)708 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
709 struct survey_info *survey)
710 {
711 struct mt76_phy *phy = hw->priv;
712 struct mt76_dev *dev = phy->dev;
713 struct mt76_sband *sband;
714 struct ieee80211_channel *chan;
715 struct mt76_channel_state *state;
716 int ret = 0;
717
718 mutex_lock(&dev->mutex);
719 if (idx == 0 && dev->drv->update_survey)
720 mt76_update_survey(phy);
721
722 sband = &phy->sband_2g;
723 if (idx >= sband->sband.n_channels) {
724 idx -= sband->sband.n_channels;
725 sband = &phy->sband_5g;
726 }
727
728 if (idx >= sband->sband.n_channels) {
729 ret = -ENOENT;
730 goto out;
731 }
732
733 chan = &sband->sband.channels[idx];
734 state = mt76_channel_state(phy, chan);
735
736 memset(survey, 0, sizeof(*survey));
737 survey->channel = chan;
738 survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY;
739 survey->filled |= dev->drv->survey_flags;
740 if (state->noise)
741 survey->filled |= SURVEY_INFO_NOISE_DBM;
742
743 if (chan == phy->main_chan) {
744 survey->filled |= SURVEY_INFO_IN_USE;
745
746 if (dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME)
747 survey->filled |= SURVEY_INFO_TIME_BSS_RX;
748 }
749
750 survey->time_busy = div_u64(state->cc_busy, 1000);
751 survey->time_rx = div_u64(state->cc_rx, 1000);
752 survey->time = div_u64(state->cc_active, 1000);
753 survey->noise = state->noise;
754
755 spin_lock_bh(&dev->cc_lock);
756 survey->time_bss_rx = div_u64(state->cc_bss_rx, 1000);
757 survey->time_tx = div_u64(state->cc_tx, 1000);
758 spin_unlock_bh(&dev->cc_lock);
759
760 out:
761 mutex_unlock(&dev->mutex);
762
763 return ret;
764 }
765 EXPORT_SYMBOL_GPL(mt76_get_survey);
766
mt76_wcid_key_setup(struct mt76_dev * dev,struct mt76_wcid * wcid,struct ieee80211_key_conf * key)767 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
768 struct ieee80211_key_conf *key)
769 {
770 struct ieee80211_key_seq seq;
771 int i;
772
773 wcid->rx_check_pn = false;
774
775 if (!key)
776 return;
777
778 if (key->cipher != WLAN_CIPHER_SUITE_CCMP)
779 return;
780
781 wcid->rx_check_pn = true;
782 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
783 ieee80211_get_key_rx_seq(key, i, &seq);
784 memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
785 }
786 }
787 EXPORT_SYMBOL(mt76_wcid_key_setup);
788
789 static void
mt76_rx_convert(struct mt76_dev * dev,struct sk_buff * skb,struct ieee80211_hw ** hw,struct ieee80211_sta ** sta)790 mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
791 struct ieee80211_hw **hw,
792 struct ieee80211_sta **sta)
793 {
794 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
795 struct mt76_rx_status mstat;
796
797 mstat = *((struct mt76_rx_status *)skb->cb);
798 memset(status, 0, sizeof(*status));
799
800 status->flag = mstat.flag;
801 status->freq = mstat.freq;
802 status->enc_flags = mstat.enc_flags;
803 status->encoding = mstat.encoding;
804 status->bw = mstat.bw;
805 status->he_ru = mstat.he_ru;
806 status->he_gi = mstat.he_gi;
807 status->he_dcm = mstat.he_dcm;
808 status->rate_idx = mstat.rate_idx;
809 status->nss = mstat.nss;
810 status->band = mstat.band;
811 status->signal = mstat.signal;
812 status->chains = mstat.chains;
813 status->ampdu_reference = mstat.ampdu_ref;
814 status->device_timestamp = mstat.timestamp;
815 status->mactime = mstat.timestamp;
816
817 BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
818 BUILD_BUG_ON(sizeof(status->chain_signal) !=
819 sizeof(mstat.chain_signal));
820 memcpy(status->chain_signal, mstat.chain_signal,
821 sizeof(mstat.chain_signal));
822
823 *sta = wcid_to_sta(mstat.wcid);
824 *hw = mt76_phy_hw(dev, mstat.ext_phy);
825 }
826
827 static int
mt76_check_ccmp_pn(struct sk_buff * skb)828 mt76_check_ccmp_pn(struct sk_buff *skb)
829 {
830 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
831 struct mt76_wcid *wcid = status->wcid;
832 struct ieee80211_hdr *hdr;
833 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
834 int ret;
835
836 if (!(status->flag & RX_FLAG_DECRYPTED))
837 return 0;
838
839 if (!wcid || !wcid->rx_check_pn)
840 return 0;
841
842 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
843 /*
844 * Validate the first fragment both here and in mac80211
845 * All further fragments will be validated by mac80211 only.
846 */
847 hdr = mt76_skb_get_hdr(skb);
848 if (ieee80211_is_frag(hdr) &&
849 !ieee80211_is_first_frag(hdr->frame_control))
850 return 0;
851 }
852
853 BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
854 ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
855 sizeof(status->iv));
856 if (ret <= 0)
857 return -EINVAL; /* replay */
858
859 memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
860
861 if (status->flag & RX_FLAG_IV_STRIPPED)
862 status->flag |= RX_FLAG_PN_VALIDATED;
863
864 return 0;
865 }
866
867 static void
mt76_airtime_report(struct mt76_dev * dev,struct mt76_rx_status * status,int len)868 mt76_airtime_report(struct mt76_dev *dev, struct mt76_rx_status *status,
869 int len)
870 {
871 struct mt76_wcid *wcid = status->wcid;
872 struct ieee80211_rx_status info = {
873 .enc_flags = status->enc_flags,
874 .rate_idx = status->rate_idx,
875 .encoding = status->encoding,
876 .band = status->band,
877 .nss = status->nss,
878 .bw = status->bw,
879 };
880 struct ieee80211_sta *sta;
881 u32 airtime;
882 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
883
884 airtime = ieee80211_calc_rx_airtime(dev->hw, &info, len);
885 spin_lock(&dev->cc_lock);
886 dev->cur_cc_bss_rx += airtime;
887 spin_unlock(&dev->cc_lock);
888
889 if (!wcid || !wcid->sta)
890 return;
891
892 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
893 ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
894 }
895
896 static void
mt76_airtime_flush_ampdu(struct mt76_dev * dev)897 mt76_airtime_flush_ampdu(struct mt76_dev *dev)
898 {
899 struct mt76_wcid *wcid;
900 int wcid_idx;
901
902 if (!dev->rx_ampdu_len)
903 return;
904
905 wcid_idx = dev->rx_ampdu_status.wcid_idx;
906 if (wcid_idx < ARRAY_SIZE(dev->wcid))
907 wcid = rcu_dereference(dev->wcid[wcid_idx]);
908 else
909 wcid = NULL;
910 dev->rx_ampdu_status.wcid = wcid;
911
912 mt76_airtime_report(dev, &dev->rx_ampdu_status, dev->rx_ampdu_len);
913
914 dev->rx_ampdu_len = 0;
915 dev->rx_ampdu_ref = 0;
916 }
917
918 static void
mt76_airtime_check(struct mt76_dev * dev,struct sk_buff * skb)919 mt76_airtime_check(struct mt76_dev *dev, struct sk_buff *skb)
920 {
921 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
922 struct mt76_wcid *wcid = status->wcid;
923
924 if (!(dev->drv->drv_flags & MT_DRV_SW_RX_AIRTIME))
925 return;
926
927 if (!wcid || !wcid->sta) {
928 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
929
930 if (status->flag & RX_FLAG_8023)
931 return;
932
933 if (!ether_addr_equal(hdr->addr1, dev->phy.macaddr))
934 return;
935
936 wcid = NULL;
937 }
938
939 if (!(status->flag & RX_FLAG_AMPDU_DETAILS) ||
940 status->ampdu_ref != dev->rx_ampdu_ref)
941 mt76_airtime_flush_ampdu(dev);
942
943 if (status->flag & RX_FLAG_AMPDU_DETAILS) {
944 if (!dev->rx_ampdu_len ||
945 status->ampdu_ref != dev->rx_ampdu_ref) {
946 dev->rx_ampdu_status = *status;
947 dev->rx_ampdu_status.wcid_idx = wcid ? wcid->idx : 0xff;
948 dev->rx_ampdu_ref = status->ampdu_ref;
949 }
950
951 dev->rx_ampdu_len += skb->len;
952 return;
953 }
954
955 mt76_airtime_report(dev, status, skb->len);
956 }
957
958 static void
mt76_check_sta(struct mt76_dev * dev,struct sk_buff * skb)959 mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
960 {
961 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
962 struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
963 struct ieee80211_sta *sta;
964 struct ieee80211_hw *hw;
965 struct mt76_wcid *wcid = status->wcid;
966 u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
967 bool ps;
968
969 hw = mt76_phy_hw(dev, status->ext_phy);
970 if (ieee80211_is_pspoll(hdr->frame_control) && !wcid &&
971 !(status->flag & RX_FLAG_8023)) {
972 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
973 if (sta)
974 wcid = status->wcid = (struct mt76_wcid *)sta->drv_priv;
975 }
976
977 mt76_airtime_check(dev, skb);
978
979 if (!wcid || !wcid->sta)
980 return;
981
982 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
983
984 if (status->signal <= 0)
985 ewma_signal_add(&wcid->rssi, -status->signal);
986
987 wcid->inactive_count = 0;
988
989 if (status->flag & RX_FLAG_8023)
990 return;
991
992 if (!test_bit(MT_WCID_FLAG_CHECK_PS, &wcid->flags))
993 return;
994
995 if (ieee80211_is_pspoll(hdr->frame_control)) {
996 ieee80211_sta_pspoll(sta);
997 return;
998 }
999
1000 if (ieee80211_has_morefrags(hdr->frame_control) ||
1001 !(ieee80211_is_mgmt(hdr->frame_control) ||
1002 ieee80211_is_data(hdr->frame_control)))
1003 return;
1004
1005 ps = ieee80211_has_pm(hdr->frame_control);
1006
1007 if (ps && (ieee80211_is_data_qos(hdr->frame_control) ||
1008 ieee80211_is_qos_nullfunc(hdr->frame_control)))
1009 ieee80211_sta_uapsd_trigger(sta, tidno);
1010
1011 if (!!test_bit(MT_WCID_FLAG_PS, &wcid->flags) == ps)
1012 return;
1013
1014 if (ps)
1015 set_bit(MT_WCID_FLAG_PS, &wcid->flags);
1016 else
1017 clear_bit(MT_WCID_FLAG_PS, &wcid->flags);
1018
1019 dev->drv->sta_ps(dev, sta, ps);
1020 ieee80211_sta_ps_transition(sta, ps);
1021 }
1022
mt76_rx_complete(struct mt76_dev * dev,struct sk_buff_head * frames,struct napi_struct * napi)1023 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
1024 struct napi_struct *napi)
1025 {
1026 struct ieee80211_sta *sta;
1027 struct ieee80211_hw *hw;
1028 struct sk_buff *skb, *tmp;
1029 LIST_HEAD(list);
1030
1031 spin_lock(&dev->rx_lock);
1032 while ((skb = __skb_dequeue(frames)) != NULL) {
1033 struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1034
1035 if (mt76_check_ccmp_pn(skb)) {
1036 dev_kfree_skb(skb);
1037 continue;
1038 }
1039
1040 skb_shinfo(skb)->frag_list = NULL;
1041 mt76_rx_convert(dev, skb, &hw, &sta);
1042 ieee80211_rx_list(hw, sta, skb, &list);
1043
1044 /* subsequent amsdu frames */
1045 while (nskb) {
1046 skb = nskb;
1047 nskb = nskb->next;
1048 skb->next = NULL;
1049
1050 mt76_rx_convert(dev, skb, &hw, &sta);
1051 ieee80211_rx_list(hw, sta, skb, &list);
1052 }
1053 }
1054 spin_unlock(&dev->rx_lock);
1055
1056 if (!napi) {
1057 netif_receive_skb_list(&list);
1058 return;
1059 }
1060
1061 list_for_each_entry_safe(skb, tmp, &list, list) {
1062 skb_list_del_init(skb);
1063 napi_gro_receive(napi, skb);
1064 }
1065 }
1066
mt76_rx_poll_complete(struct mt76_dev * dev,enum mt76_rxq_id q,struct napi_struct * napi)1067 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
1068 struct napi_struct *napi)
1069 {
1070 struct sk_buff_head frames;
1071 struct sk_buff *skb;
1072
1073 __skb_queue_head_init(&frames);
1074
1075 while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) {
1076 mt76_check_sta(dev, skb);
1077 mt76_rx_aggr_reorder(skb, &frames);
1078 }
1079
1080 mt76_rx_complete(dev, &frames, napi);
1081 }
1082 EXPORT_SYMBOL_GPL(mt76_rx_poll_complete);
1083
1084 static int
mt76_sta_add(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta,bool ext_phy)1085 mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
1086 struct ieee80211_sta *sta, bool ext_phy)
1087 {
1088 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1089 int ret;
1090 int i;
1091
1092 mutex_lock(&dev->mutex);
1093
1094 ret = dev->drv->sta_add(dev, vif, sta);
1095 if (ret)
1096 goto out;
1097
1098 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1099 struct mt76_txq *mtxq;
1100
1101 if (!sta->txq[i])
1102 continue;
1103
1104 mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
1105 mtxq->wcid = wcid->idx;
1106 }
1107
1108 ewma_signal_init(&wcid->rssi);
1109 if (ext_phy)
1110 mt76_wcid_mask_set(dev->wcid_phy_mask, wcid->idx);
1111 wcid->ext_phy = ext_phy;
1112 rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
1113
1114 out:
1115 mutex_unlock(&dev->mutex);
1116
1117 return ret;
1118 }
1119
__mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1120 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1121 struct ieee80211_sta *sta)
1122 {
1123 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1124 int i, idx = wcid->idx;
1125
1126 for (i = 0; i < ARRAY_SIZE(wcid->aggr); i++)
1127 mt76_rx_aggr_stop(dev, wcid, i);
1128
1129 if (dev->drv->sta_remove)
1130 dev->drv->sta_remove(dev, vif, sta);
1131
1132 mt76_tx_status_check(dev, wcid, true);
1133 mt76_wcid_mask_clear(dev->wcid_mask, idx);
1134 mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
1135 }
1136 EXPORT_SYMBOL_GPL(__mt76_sta_remove);
1137
1138 static void
mt76_sta_remove(struct mt76_dev * dev,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1139 mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
1140 struct ieee80211_sta *sta)
1141 {
1142 mutex_lock(&dev->mutex);
1143 __mt76_sta_remove(dev, vif, sta);
1144 mutex_unlock(&dev->mutex);
1145 }
1146
mt76_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)1147 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1148 struct ieee80211_sta *sta,
1149 enum ieee80211_sta_state old_state,
1150 enum ieee80211_sta_state new_state)
1151 {
1152 struct mt76_phy *phy = hw->priv;
1153 struct mt76_dev *dev = phy->dev;
1154 bool ext_phy = phy != &dev->phy;
1155
1156 if (old_state == IEEE80211_STA_NOTEXIST &&
1157 new_state == IEEE80211_STA_NONE)
1158 return mt76_sta_add(dev, vif, sta, ext_phy);
1159
1160 if (old_state == IEEE80211_STA_AUTH &&
1161 new_state == IEEE80211_STA_ASSOC &&
1162 dev->drv->sta_assoc)
1163 dev->drv->sta_assoc(dev, vif, sta);
1164
1165 if (old_state == IEEE80211_STA_NONE &&
1166 new_state == IEEE80211_STA_NOTEXIST)
1167 mt76_sta_remove(dev, vif, sta);
1168
1169 return 0;
1170 }
1171 EXPORT_SYMBOL_GPL(mt76_sta_state);
1172
mt76_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1173 void mt76_sta_pre_rcu_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1174 struct ieee80211_sta *sta)
1175 {
1176 struct mt76_phy *phy = hw->priv;
1177 struct mt76_dev *dev = phy->dev;
1178 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1179
1180 mutex_lock(&dev->mutex);
1181 rcu_assign_pointer(dev->wcid[wcid->idx], NULL);
1182 mutex_unlock(&dev->mutex);
1183 }
1184 EXPORT_SYMBOL_GPL(mt76_sta_pre_rcu_remove);
1185
mt76_get_txpower(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int * dbm)1186 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1187 int *dbm)
1188 {
1189 struct mt76_phy *phy = hw->priv;
1190 int n_chains = hweight8(phy->antenna_mask);
1191 int delta = mt76_tx_power_nss_delta(n_chains);
1192
1193 *dbm = DIV_ROUND_UP(phy->txpower_cur + delta, 2);
1194
1195 return 0;
1196 }
1197 EXPORT_SYMBOL_GPL(mt76_get_txpower);
1198
1199 static void
__mt76_csa_finish(void * priv,u8 * mac,struct ieee80211_vif * vif)1200 __mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
1201 {
1202 if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
1203 ieee80211_csa_finish(vif);
1204 }
1205
mt76_csa_finish(struct mt76_dev * dev)1206 void mt76_csa_finish(struct mt76_dev *dev)
1207 {
1208 if (!dev->csa_complete)
1209 return;
1210
1211 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1212 IEEE80211_IFACE_ITER_RESUME_ALL,
1213 __mt76_csa_finish, dev);
1214
1215 dev->csa_complete = 0;
1216 }
1217 EXPORT_SYMBOL_GPL(mt76_csa_finish);
1218
1219 static void
__mt76_csa_check(void * priv,u8 * mac,struct ieee80211_vif * vif)1220 __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
1221 {
1222 struct mt76_dev *dev = priv;
1223
1224 if (!vif->csa_active)
1225 return;
1226
1227 dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
1228 }
1229
mt76_csa_check(struct mt76_dev * dev)1230 void mt76_csa_check(struct mt76_dev *dev)
1231 {
1232 ieee80211_iterate_active_interfaces_atomic(dev->hw,
1233 IEEE80211_IFACE_ITER_RESUME_ALL,
1234 __mt76_csa_check, dev);
1235 }
1236 EXPORT_SYMBOL_GPL(mt76_csa_check);
1237
1238 int
mt76_set_tim(struct ieee80211_hw * hw,struct ieee80211_sta * sta,bool set)1239 mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
1240 {
1241 return 0;
1242 }
1243 EXPORT_SYMBOL_GPL(mt76_set_tim);
1244
mt76_insert_ccmp_hdr(struct sk_buff * skb,u8 key_id)1245 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
1246 {
1247 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
1248 int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
1249 u8 *hdr, *pn = status->iv;
1250
1251 __skb_push(skb, 8);
1252 memmove(skb->data, skb->data + 8, hdr_len);
1253 hdr = skb->data + hdr_len;
1254
1255 hdr[0] = pn[5];
1256 hdr[1] = pn[4];
1257 hdr[2] = 0;
1258 hdr[3] = 0x20 | (key_id << 6);
1259 hdr[4] = pn[3];
1260 hdr[5] = pn[2];
1261 hdr[6] = pn[1];
1262 hdr[7] = pn[0];
1263
1264 status->flag &= ~RX_FLAG_IV_STRIPPED;
1265 }
1266 EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
1267
mt76_get_rate(struct mt76_dev * dev,struct ieee80211_supported_band * sband,int idx,bool cck)1268 int mt76_get_rate(struct mt76_dev *dev,
1269 struct ieee80211_supported_band *sband,
1270 int idx, bool cck)
1271 {
1272 int i, offset = 0, len = sband->n_bitrates;
1273
1274 if (cck) {
1275 if (sband == &dev->phy.sband_5g.sband)
1276 return 0;
1277
1278 idx &= ~BIT(2); /* short preamble */
1279 } else if (sband == &dev->phy.sband_2g.sband) {
1280 offset = 4;
1281 }
1282
1283 for (i = offset; i < len; i++) {
1284 if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
1285 return i;
1286 }
1287
1288 return 0;
1289 }
1290 EXPORT_SYMBOL_GPL(mt76_get_rate);
1291
mt76_sw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const u8 * mac)1292 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1293 const u8 *mac)
1294 {
1295 struct mt76_phy *phy = hw->priv;
1296
1297 set_bit(MT76_SCANNING, &phy->state);
1298 }
1299 EXPORT_SYMBOL_GPL(mt76_sw_scan);
1300
mt76_sw_scan_complete(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1301 void mt76_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1302 {
1303 struct mt76_phy *phy = hw->priv;
1304
1305 clear_bit(MT76_SCANNING, &phy->state);
1306 }
1307 EXPORT_SYMBOL_GPL(mt76_sw_scan_complete);
1308
mt76_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)1309 int mt76_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
1310 {
1311 struct mt76_phy *phy = hw->priv;
1312 struct mt76_dev *dev = phy->dev;
1313
1314 mutex_lock(&dev->mutex);
1315 *tx_ant = phy->antenna_mask;
1316 *rx_ant = phy->antenna_mask;
1317 mutex_unlock(&dev->mutex);
1318
1319 return 0;
1320 }
1321 EXPORT_SYMBOL_GPL(mt76_get_antenna);
1322
1323 struct mt76_queue *
mt76_init_queue(struct mt76_dev * dev,int qid,int idx,int n_desc,int ring_base)1324 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
1325 int ring_base)
1326 {
1327 struct mt76_queue *hwq;
1328 int err;
1329
1330 hwq = devm_kzalloc(dev->dev, sizeof(*hwq), GFP_KERNEL);
1331 if (!hwq)
1332 return ERR_PTR(-ENOMEM);
1333
1334 err = dev->queue_ops->alloc(dev, hwq, idx, n_desc, 0, ring_base);
1335 if (err < 0)
1336 return ERR_PTR(err);
1337
1338 return hwq;
1339 }
1340 EXPORT_SYMBOL_GPL(mt76_init_queue);
1341