1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/timekeeping.h>
6 #include "mt7915.h"
7 #include "../dma.h"
8 #include "mac.h"
9
10 #define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
11
12 #define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 IEEE80211_RADIOTAP_HE_##f)
15
16 static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7915_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7915_rx_get_wcid(struct mt7915_dev * dev,u16 idx,bool unicast)55 static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
56 u16 idx, bool unicast)
57 {
58 struct mt7915_sta *sta;
59 struct mt76_wcid *wcid;
60
61 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
62 return NULL;
63
64 wcid = rcu_dereference(dev->mt76.wcid[idx]);
65 if (unicast || !wcid)
66 return wcid;
67
68 if (!wcid->sta)
69 return NULL;
70
71 sta = container_of(wcid, struct mt7915_sta, wcid);
72 if (!sta->vif)
73 return NULL;
74
75 return &sta->vif->sta.wcid;
76 }
77
mt7915_sta_ps(struct mt76_dev * mdev,struct ieee80211_sta * sta,bool ps)78 void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
79 {
80 }
81
mt7915_mac_wtbl_update(struct mt7915_dev * dev,int idx,u32 mask)82 bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
83 {
84 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
85 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
86
87 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
88 0, 5000);
89 }
90
mt7915_mac_wtbl_lmac_addr(struct mt7915_dev * dev,u16 wcid)91 static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
92 {
93 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
94 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
95
96 return MT_WTBL_LMAC_OFFS(wcid, 0);
97 }
98
99 /* TODO: use txfree airtime info to avoid runtime accessing in the long run */
mt7915_mac_sta_poll(struct mt7915_dev * dev)100 static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
101 {
102 static const u8 ac_to_tid[] = {
103 [IEEE80211_AC_BE] = 0,
104 [IEEE80211_AC_BK] = 1,
105 [IEEE80211_AC_VI] = 4,
106 [IEEE80211_AC_VO] = 6
107 };
108 struct ieee80211_sta *sta;
109 struct mt7915_sta *msta;
110 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
111 LIST_HEAD(sta_poll_list);
112 int i;
113
114 spin_lock_bh(&dev->sta_poll_lock);
115 list_splice_init(&dev->sta_poll_list, &sta_poll_list);
116 spin_unlock_bh(&dev->sta_poll_lock);
117
118 rcu_read_lock();
119
120 while (true) {
121 bool clear = false;
122 u32 addr;
123 u16 idx;
124
125 spin_lock_bh(&dev->sta_poll_lock);
126 if (list_empty(&sta_poll_list)) {
127 spin_unlock_bh(&dev->sta_poll_lock);
128 break;
129 }
130 msta = list_first_entry(&sta_poll_list,
131 struct mt7915_sta, poll_list);
132 list_del_init(&msta->poll_list);
133 spin_unlock_bh(&dev->sta_poll_lock);
134
135 idx = msta->wcid.idx;
136 addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
137
138 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
139 u32 tx_last = msta->airtime_ac[i];
140 u32 rx_last = msta->airtime_ac[i + 4];
141
142 msta->airtime_ac[i] = mt76_rr(dev, addr);
143 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
144
145 tx_time[i] = msta->airtime_ac[i] - tx_last;
146 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
147
148 if ((tx_last | rx_last) & BIT(30))
149 clear = true;
150
151 addr += 8;
152 }
153
154 if (clear) {
155 mt7915_mac_wtbl_update(dev, idx,
156 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
157 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
158 }
159
160 if (!msta->wcid.sta)
161 continue;
162
163 sta = container_of((void *)msta, struct ieee80211_sta,
164 drv_priv);
165 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
166 u8 q = mt7915_lmac_mapping(dev, i);
167 u32 tx_cur = tx_time[q];
168 u32 rx_cur = rx_time[q];
169 u8 tid = ac_to_tid[i];
170
171 if (!tx_cur && !rx_cur)
172 continue;
173
174 ieee80211_sta_register_airtime(sta, tid, tx_cur,
175 rx_cur);
176 }
177 }
178
179 rcu_read_unlock();
180 }
181
182 static void
mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status * status,struct ieee80211_radiotap_he * he,__le32 * rxv)183 mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
184 struct ieee80211_radiotap_he *he,
185 __le32 *rxv)
186 {
187 u32 ru_h, ru_l;
188 u8 ru, offs = 0;
189
190 ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
191 ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
192 ru = (u8)(ru_l | ru_h << 4);
193
194 status->bw = RATE_INFO_BW_HE_RU;
195
196 switch (ru) {
197 case 0 ... 36:
198 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
199 offs = ru;
200 break;
201 case 37 ... 52:
202 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
203 offs = ru - 37;
204 break;
205 case 53 ... 60:
206 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
207 offs = ru - 53;
208 break;
209 case 61 ... 64:
210 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
211 offs = ru - 61;
212 break;
213 case 65 ... 66:
214 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
215 offs = ru - 65;
216 break;
217 case 67:
218 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
219 break;
220 case 68:
221 status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
222 break;
223 }
224
225 he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
226 he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
227 le16_encode_bits(offs,
228 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
229 }
230
231 static void
mt7915_mac_decode_he_radiotap(struct sk_buff * skb,struct mt76_rx_status * status,__le32 * rxv,u32 phy)232 mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
233 struct mt76_rx_status *status,
234 __le32 *rxv, u32 phy)
235 {
236 /* TODO: struct ieee80211_radiotap_he_mu */
237 static const struct ieee80211_radiotap_he known = {
238 .data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
239 HE_BITS(DATA1_DATA_DCM_KNOWN) |
240 HE_BITS(DATA1_STBC_KNOWN) |
241 HE_BITS(DATA1_CODING_KNOWN) |
242 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
243 HE_BITS(DATA1_DOPPLER_KNOWN) |
244 HE_BITS(DATA1_BSS_COLOR_KNOWN),
245 .data2 = HE_BITS(DATA2_GI_KNOWN) |
246 HE_BITS(DATA2_TXBF_KNOWN) |
247 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
248 HE_BITS(DATA2_TXOP_KNOWN),
249 };
250 struct ieee80211_radiotap_he *he = NULL;
251 u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
252
253 he = skb_push(skb, sizeof(known));
254 memcpy(he, &known, sizeof(known));
255
256 he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
257 HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
258 he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
259 le16_encode_bits(ltf_size,
260 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
261 he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
262 HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
263
264 switch (phy) {
265 case MT_PHY_TYPE_HE_SU:
266 he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
267 HE_BITS(DATA1_UL_DL_KNOWN) |
268 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
269 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
270
271 he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
272 HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
273 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
274 break;
275 case MT_PHY_TYPE_HE_EXT_SU:
276 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
277 HE_BITS(DATA1_UL_DL_KNOWN);
278
279 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
280 break;
281 case MT_PHY_TYPE_HE_MU:
282 he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
283 HE_BITS(DATA1_UL_DL_KNOWN) |
284 HE_BITS(DATA1_SPTL_REUSE_KNOWN);
285
286 he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
287 he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
288
289 mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
290 break;
291 case MT_PHY_TYPE_HE_TB:
292 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
293 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
294 HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
295 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
296 HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
297
298 he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
299 HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
300 HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
301 HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
302
303 mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
304 break;
305 default:
306 break;
307 }
308 }
309
mt7915_mac_fill_rx(struct mt7915_dev * dev,struct sk_buff * skb)310 int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
311 {
312 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
313 struct mt76_phy *mphy = &dev->mt76.phy;
314 struct mt7915_phy *phy = &dev->phy;
315 struct ieee80211_supported_band *sband;
316 struct ieee80211_hdr *hdr;
317 __le32 *rxd = (__le32 *)skb->data;
318 __le32 *rxv = NULL;
319 u32 mode = 0;
320 u32 rxd1 = le32_to_cpu(rxd[1]);
321 u32 rxd2 = le32_to_cpu(rxd[2]);
322 u32 rxd3 = le32_to_cpu(rxd[3]);
323 bool unicast, insert_ccmp_hdr = false;
324 u8 remove_pad;
325 int i, idx;
326
327 memset(status, 0, sizeof(*status));
328
329 if (rxd1 & MT_RXD1_NORMAL_BAND_IDX) {
330 mphy = dev->mt76.phy2;
331 if (!mphy)
332 return -EINVAL;
333
334 phy = mphy->priv;
335 status->ext_phy = true;
336 }
337
338 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
339 return -EINVAL;
340
341 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
342 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
343 status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
344
345 if (status->wcid) {
346 struct mt7915_sta *msta;
347
348 msta = container_of(status->wcid, struct mt7915_sta, wcid);
349 spin_lock_bh(&dev->sta_poll_lock);
350 if (list_empty(&msta->poll_list))
351 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
352 spin_unlock_bh(&dev->sta_poll_lock);
353 }
354
355 status->freq = mphy->chandef.chan->center_freq;
356 status->band = mphy->chandef.chan->band;
357 if (status->band == NL80211_BAND_5GHZ)
358 sband = &mphy->sband_5g.sband;
359 else
360 sband = &mphy->sband_2g.sband;
361
362 if (!sband->channels)
363 return -EINVAL;
364
365 if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
366 status->flag |= RX_FLAG_FAILED_FCS_CRC;
367
368 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
369 status->flag |= RX_FLAG_MMIC_ERROR;
370
371 if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
372 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
373 status->flag |= RX_FLAG_DECRYPTED;
374 status->flag |= RX_FLAG_IV_STRIPPED;
375 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
376 }
377
378 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
379 status->flag |= RX_FLAG_AMPDU_DETAILS;
380
381 /* all subframes of an A-MPDU have the same timestamp */
382 if (phy->rx_ampdu_ts != rxd[14]) {
383 if (!++phy->ampdu_ref)
384 phy->ampdu_ref++;
385 }
386 phy->rx_ampdu_ts = rxd[14];
387
388 status->ampdu_ref = phy->ampdu_ref;
389 }
390
391 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
392
393 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
394 return -EINVAL;
395
396 rxd += 6;
397 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
398 rxd += 4;
399 if ((u8 *)rxd - skb->data >= skb->len)
400 return -EINVAL;
401 }
402
403 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
404 u8 *data = (u8 *)rxd;
405
406 if (status->flag & RX_FLAG_DECRYPTED) {
407 status->iv[0] = data[5];
408 status->iv[1] = data[4];
409 status->iv[2] = data[3];
410 status->iv[3] = data[2];
411 status->iv[4] = data[1];
412 status->iv[5] = data[0];
413
414 insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
415 }
416 rxd += 4;
417 if ((u8 *)rxd - skb->data >= skb->len)
418 return -EINVAL;
419 }
420
421 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
422 rxd += 2;
423 if ((u8 *)rxd - skb->data >= skb->len)
424 return -EINVAL;
425 }
426
427 /* RXD Group 3 - P-RXV */
428 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
429 u32 v0, v1, v2;
430
431 rxv = rxd;
432 rxd += 2;
433 if ((u8 *)rxd - skb->data >= skb->len)
434 return -EINVAL;
435
436 v0 = le32_to_cpu(rxv[0]);
437 v1 = le32_to_cpu(rxv[1]);
438 v2 = le32_to_cpu(rxv[2]);
439
440 if (v0 & MT_PRXV_HT_AD_CODE)
441 status->enc_flags |= RX_ENC_FLAG_LDPC;
442
443 status->chains = mphy->antenna_mask;
444 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
445 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
446 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
447 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
448 status->signal = status->chain_signal[0];
449
450 for (i = 1; i < hweight8(mphy->antenna_mask); i++) {
451 if (!(status->chains & BIT(i)))
452 continue;
453
454 status->signal = max(status->signal,
455 status->chain_signal[i]);
456 }
457
458 /* RXD Group 5 - C-RXV */
459 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
460 u8 stbc = FIELD_GET(MT_CRXV_HT_STBC, v2);
461 u8 gi = FIELD_GET(MT_CRXV_HT_SHORT_GI, v2);
462 bool cck = false;
463
464 rxd += 18;
465 if ((u8 *)rxd - skb->data >= skb->len)
466 return -EINVAL;
467
468 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
469 mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
470
471 switch (mode) {
472 case MT_PHY_TYPE_CCK:
473 cck = true;
474 fallthrough;
475 case MT_PHY_TYPE_OFDM:
476 i = mt76_get_rate(&dev->mt76, sband, i, cck);
477 break;
478 case MT_PHY_TYPE_HT_GF:
479 case MT_PHY_TYPE_HT:
480 status->encoding = RX_ENC_HT;
481 if (i > 31)
482 return -EINVAL;
483 break;
484 case MT_PHY_TYPE_VHT:
485 status->nss =
486 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
487 status->encoding = RX_ENC_VHT;
488 if (i > 9)
489 return -EINVAL;
490 break;
491 case MT_PHY_TYPE_HE_MU:
492 status->flag |= RX_FLAG_RADIOTAP_HE_MU;
493 fallthrough;
494 case MT_PHY_TYPE_HE_SU:
495 case MT_PHY_TYPE_HE_EXT_SU:
496 case MT_PHY_TYPE_HE_TB:
497 status->nss =
498 FIELD_GET(MT_PRXV_NSTS, v0) + 1;
499 status->encoding = RX_ENC_HE;
500 status->flag |= RX_FLAG_RADIOTAP_HE;
501 i &= GENMASK(3, 0);
502
503 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
504 status->he_gi = gi;
505
506 status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
507 break;
508 default:
509 return -EINVAL;
510 }
511 status->rate_idx = i;
512
513 switch (FIELD_GET(MT_CRXV_FRAME_MODE, v2)) {
514 case IEEE80211_STA_RX_BW_20:
515 break;
516 case IEEE80211_STA_RX_BW_40:
517 if (mode & MT_PHY_TYPE_HE_EXT_SU &&
518 (idx & MT_PRXV_TX_ER_SU_106T)) {
519 status->bw = RATE_INFO_BW_HE_RU;
520 status->he_ru =
521 NL80211_RATE_INFO_HE_RU_ALLOC_106;
522 } else {
523 status->bw = RATE_INFO_BW_40;
524 }
525 break;
526 case IEEE80211_STA_RX_BW_80:
527 status->bw = RATE_INFO_BW_80;
528 break;
529 case IEEE80211_STA_RX_BW_160:
530 status->bw = RATE_INFO_BW_160;
531 break;
532 default:
533 return -EINVAL;
534 }
535
536 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
537 if (mode < MT_PHY_TYPE_HE_SU && gi)
538 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
539 }
540 }
541
542 skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
543
544 if (insert_ccmp_hdr) {
545 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
546
547 mt76_insert_ccmp_hdr(skb, key_id);
548 }
549
550 if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
551 mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
552
553 hdr = mt76_skb_get_hdr(skb);
554 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
555 return 0;
556
557 status->aggr = unicast &&
558 !ieee80211_is_qos_nullfunc(hdr->frame_control);
559 status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
560 status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
561
562 return 0;
563 }
564
mt7915_mac_write_txwi(struct mt7915_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,bool beacon)565 void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
566 struct sk_buff *skb, struct mt76_wcid *wcid,
567 struct ieee80211_key_conf *key, bool beacon)
568 {
569 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
570 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
571 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
572 bool multicast = is_multicast_ether_addr(hdr->addr1);
573 struct ieee80211_vif *vif = info->control.vif;
574 struct mt76_phy *mphy = &dev->mphy;
575 bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
576 u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
577 __le16 fc = hdr->frame_control;
578 u16 tx_count = 15, seqno = 0;
579 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
580 u32 val;
581
582 if (vif) {
583 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
584
585 omac_idx = mvif->omac_idx;
586 wmm_idx = mvif->wmm_idx;
587 }
588
589 if (ext_phy && dev->mt76.phy2)
590 mphy = dev->mt76.phy2;
591
592 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
593 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
594
595 txwi[4] = 0;
596 txwi[5] = 0;
597 txwi[6] = 0;
598
599 if (beacon) {
600 p_fmt = MT_TX_TYPE_FW;
601 q_idx = MT_LMAC_BCN0;
602 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
603 p_fmt = MT_TX_TYPE_CT;
604 q_idx = MT_LMAC_ALTX0;
605 } else {
606 p_fmt = MT_TX_TYPE_CT;
607 q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
608 mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
609 }
610
611 if (ieee80211_is_action(fc) &&
612 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
613 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
614 u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
615
616 txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
617 tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
618 } else if (ieee80211_is_back_req(hdr->frame_control)) {
619 struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
620 u16 control = le16_to_cpu(bar->control);
621
622 tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
623 }
624
625 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
626 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
627 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
628 txwi[0] = cpu_to_le32(val);
629
630 val = MT_TXD1_LONG_FORMAT |
631 FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
632 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
633 FIELD_PREP(MT_TXD1_HDR_INFO,
634 ieee80211_get_hdrlen_from_skb(skb) / 2) |
635 FIELD_PREP(MT_TXD1_TID, tid) |
636 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
637
638 if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
639 val |= MT_TXD1_TGID;
640
641 txwi[1] = cpu_to_le32(val);
642
643 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
644 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
645 FIELD_PREP(MT_TXD2_MULTICAST, multicast);
646 if (key) {
647 if (multicast && ieee80211_is_robust_mgmt_frame(skb) &&
648 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
649 val |= MT_TXD2_BIP;
650 txwi[3] = 0;
651 } else {
652 txwi[3] = cpu_to_le32(MT_TXD3_PROTECT_FRAME);
653 }
654 } else {
655 txwi[3] = 0;
656 }
657 txwi[2] = cpu_to_le32(val);
658
659 if (!ieee80211_is_data(fc) || multicast) {
660 u16 rate;
661
662 /* hardware won't add HTC for mgmt/ctrl frame */
663 txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE | MT_TXD2_HTC_VLD);
664
665 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
666 rate = MT7915_5G_RATE_DEFAULT;
667 else
668 rate = MT7915_2G_RATE_DEFAULT;
669
670 val = MT_TXD6_FIXED_BW |
671 FIELD_PREP(MT_TXD6_TX_RATE, rate);
672 txwi[6] |= cpu_to_le32(val);
673 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
674 }
675
676 if (!ieee80211_is_beacon(fc))
677 txwi[3] |= cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
678 else
679 tx_count = 0x1f;
680
681 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
682 txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
683
684 val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
685 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
686 if (wcid->amsdu)
687 val |= MT_TXD7_HW_AMSDU;
688 txwi[7] = cpu_to_le32(val);
689
690 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
691 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
692 seqno = le16_to_cpu(hdr->seq_ctrl);
693
694 if (ieee80211_is_back_req(hdr->frame_control)) {
695 struct ieee80211_bar *bar;
696
697 bar = (struct ieee80211_bar *)skb->data;
698 seqno = le16_to_cpu(bar->start_seq_num);
699 }
700
701 val |= MT_TXD3_SN_VALID |
702 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
703 }
704 txwi[3] |= cpu_to_le32(val);
705 }
706
mt7915_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)707 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
708 enum mt76_txq_id qid, struct mt76_wcid *wcid,
709 struct ieee80211_sta *sta,
710 struct mt76_tx_info *tx_info)
711 {
712 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
713 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
714 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
715 struct ieee80211_key_conf *key = info->control.hw_key;
716 struct ieee80211_vif *vif = info->control.vif;
717 struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
718 struct mt76_txwi_cache *t;
719 struct mt7915_txp *txp;
720 int id, i, nbuf = tx_info->nbuf - 1;
721 u8 *txwi = (u8 *)txwi_ptr;
722
723 if (!wcid)
724 wcid = &dev->mt76.global_wcid;
725
726 cb->wcid = wcid->idx;
727
728 mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
729 false);
730
731 txp = (struct mt7915_txp *)(txwi + MT_TXD_SIZE);
732 for (i = 0; i < nbuf; i++) {
733 txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
734 txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
735 }
736 txp->nbuf = nbuf;
737
738 /* pass partial skb header to fw */
739 tx_info->buf[1].len = MT_CT_PARSE_LEN;
740 tx_info->buf[1].skip_unmap = true;
741 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
742
743 txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
744
745 if (!key)
746 txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
747
748 if (ieee80211_is_mgmt(hdr->frame_control))
749 txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
750
751 if (vif) {
752 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
753
754 txp->bss_idx = mvif->idx;
755 }
756
757 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
758 t->skb = tx_info->skb;
759
760 spin_lock_bh(&dev->token_lock);
761 id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC);
762 spin_unlock_bh(&dev->token_lock);
763 if (id < 0)
764 return id;
765
766 txp->token = cpu_to_le16(id);
767 txp->rept_wds_wcid = 0xff;
768 tx_info->skb = DMA_DUMMY_DATA;
769
770 return 0;
771 }
772
773 static void
mt7915_tx_check_aggr(struct ieee80211_sta * sta,__le32 * txwi)774 mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
775 {
776 struct mt7915_sta *msta;
777 u16 fc, tid;
778 u32 val;
779
780 if (!sta || !sta->ht_cap.ht_supported)
781 return;
782
783 tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
784 if (tid >= 6) /* skip VO queue */
785 return;
786
787 val = le32_to_cpu(txwi[2]);
788 fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
789 FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
790 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
791 return;
792
793 msta = (struct mt7915_sta *)sta->drv_priv;
794 if (!test_and_set_bit(tid, &msta->ampdu_state))
795 ieee80211_start_tx_ba_session(sta, tid, 0);
796 }
797
798 static inline void
mt7915_tx_status(struct ieee80211_sta * sta,struct ieee80211_hw * hw,struct ieee80211_tx_info * info,struct sk_buff * skb)799 mt7915_tx_status(struct ieee80211_sta *sta, struct ieee80211_hw *hw,
800 struct ieee80211_tx_info *info, struct sk_buff *skb)
801 {
802 struct ieee80211_tx_status status = {
803 .sta = sta,
804 .info = info,
805 };
806
807 if (skb)
808 status.skb = skb;
809
810 if (sta) {
811 struct mt7915_sta *msta;
812
813 msta = (struct mt7915_sta *)sta->drv_priv;
814 status.rate = &msta->stats.tx_rate;
815 }
816
817 /* use status_ext to report HE rate */
818 ieee80211_tx_status_ext(hw, &status);
819 }
820
821 static void
mt7915_tx_complete_status(struct mt76_dev * mdev,struct sk_buff * skb,struct ieee80211_sta * sta,u8 stat)822 mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
823 struct ieee80211_sta *sta, u8 stat)
824 {
825 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
826 struct ieee80211_hw *hw;
827
828 hw = mt76_tx_status_get_hw(mdev, skb);
829
830 if (info->flags & IEEE80211_TX_CTL_AMPDU)
831 info->flags |= IEEE80211_TX_STAT_AMPDU;
832
833 if (stat)
834 ieee80211_tx_info_clear_status(info);
835
836 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
837 info->flags |= IEEE80211_TX_STAT_ACK;
838
839 info->status.tx_time = 0;
840
841 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
842 mt7915_tx_status(sta, hw, info, skb);
843 return;
844 }
845
846 if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK))
847 mt7915_tx_status(sta, hw, info, NULL);
848
849 ieee80211_free_txskb(hw, skb);
850 }
851
mt7915_txp_skb_unmap(struct mt76_dev * dev,struct mt76_txwi_cache * t)852 void mt7915_txp_skb_unmap(struct mt76_dev *dev,
853 struct mt76_txwi_cache *t)
854 {
855 struct mt7915_txp *txp;
856 int i;
857
858 txp = mt7915_txwi_to_txp(dev, t);
859 for (i = 0; i < txp->nbuf; i++)
860 dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
861 le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
862 }
863
mt7915_mac_tx_free(struct mt7915_dev * dev,struct sk_buff * skb)864 void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
865 {
866 struct mt7915_tx_free *free = (struct mt7915_tx_free *)skb->data;
867 struct mt76_dev *mdev = &dev->mt76;
868 struct mt76_txwi_cache *txwi;
869 struct ieee80211_sta *sta = NULL;
870 u8 i, count;
871
872 /* clean DMA queues and unmap buffers first */
873 mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
874 mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
875
876 /*
877 * TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
878 * to the time ack is received or dropped by hw (air + hw queue time).
879 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
880 */
881 count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
882 for (i = 0; i < count; i++) {
883 u32 msdu, info = le32_to_cpu(free->info[i]);
884 u8 stat;
885
886 /*
887 * 1'b1: new wcid pair.
888 * 1'b0: msdu_id with the same 'wcid pair' as above.
889 */
890 if (info & MT_TX_FREE_PAIR) {
891 struct mt7915_sta *msta;
892 struct mt7915_phy *phy;
893 struct mt76_wcid *wcid;
894 u16 idx;
895
896 count++;
897 idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
898 wcid = rcu_dereference(dev->mt76.wcid[idx]);
899 sta = wcid_to_sta(wcid);
900 if (!sta)
901 continue;
902
903 msta = container_of(wcid, struct mt7915_sta, wcid);
904 phy = msta->vif->phy;
905 spin_lock_bh(&dev->sta_poll_lock);
906 if (list_empty(&msta->stats_list))
907 list_add_tail(&msta->stats_list, &phy->stats_list);
908 if (list_empty(&msta->poll_list))
909 list_add_tail(&msta->poll_list, &dev->sta_poll_list);
910 spin_unlock_bh(&dev->sta_poll_lock);
911 }
912
913 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
914 stat = FIELD_GET(MT_TX_FREE_STATUS, info);
915
916 spin_lock_bh(&dev->token_lock);
917 txwi = idr_remove(&dev->token, msdu);
918 spin_unlock_bh(&dev->token_lock);
919
920 if (!txwi)
921 continue;
922
923 mt7915_txp_skb_unmap(mdev, txwi);
924 if (txwi->skb) {
925 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
926 void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
927
928 if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
929 mt7915_tx_check_aggr(sta, txwi_ptr);
930
931 if (sta && !info->tx_time_est) {
932 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
933 int pending;
934
935 pending = atomic_dec_return(&wcid->non_aql_packets);
936 if (pending < 0)
937 atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
938 }
939
940 mt7915_tx_complete_status(mdev, txwi->skb, sta, stat);
941 txwi->skb = NULL;
942 }
943
944 mt76_put_txwi(mdev, txwi);
945 }
946 dev_kfree_skb(skb);
947
948 mt7915_mac_sta_poll(dev);
949 mt76_worker_schedule(&dev->mt76.tx_worker);
950 }
951
mt7915_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue_entry * e)952 void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
953 {
954 struct mt7915_dev *dev;
955
956 if (!e->txwi) {
957 dev_kfree_skb_any(e->skb);
958 return;
959 }
960
961 dev = container_of(mdev, struct mt7915_dev, mt76);
962
963 /* error path */
964 if (e->skb == DMA_DUMMY_DATA) {
965 struct mt76_txwi_cache *t;
966 struct mt7915_txp *txp;
967
968 txp = mt7915_txwi_to_txp(mdev, e->txwi);
969
970 spin_lock_bh(&dev->token_lock);
971 t = idr_remove(&dev->token, le16_to_cpu(txp->token));
972 spin_unlock_bh(&dev->token_lock);
973 e->skb = t ? t->skb : NULL;
974 }
975
976 if (e->skb) {
977 struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
978 struct mt76_wcid *wcid;
979
980 wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
981
982 mt7915_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0);
983 }
984 }
985
mt7915_mac_cca_stats_reset(struct mt7915_phy * phy)986 void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
987 {
988 struct mt7915_dev *dev = phy->dev;
989 bool ext_phy = phy != &dev->phy;
990 u32 reg = MT_WF_PHY_RX_CTRL1(ext_phy);
991
992 mt7915_l2_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
993 mt7915_l2_set(dev, reg, BIT(11) | BIT(9));
994 }
995
mt7915_mac_reset_counters(struct mt7915_phy * phy)996 void mt7915_mac_reset_counters(struct mt7915_phy *phy)
997 {
998 struct mt7915_dev *dev = phy->dev;
999 bool ext_phy = phy != &dev->phy;
1000 int i;
1001
1002 for (i = 0; i < 4; i++) {
1003 mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1004 mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1005 }
1006
1007 if (ext_phy) {
1008 dev->mt76.phy2->survey_time = ktime_get_boottime();
1009 i = ARRAY_SIZE(dev->mt76.aggr_stats) / 2;
1010 } else {
1011 dev->mt76.phy.survey_time = ktime_get_boottime();
1012 i = 0;
1013 }
1014 memset(&dev->mt76.aggr_stats[i], 0, sizeof(dev->mt76.aggr_stats) / 2);
1015
1016 /* reset airtime counters */
1017 mt76_rr(dev, MT_MIB_SDR9(ext_phy));
1018 mt76_rr(dev, MT_MIB_SDR36(ext_phy));
1019 mt76_rr(dev, MT_MIB_SDR37(ext_phy));
1020
1021 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(ext_phy),
1022 MT_WF_RMAC_MIB_RXTIME_CLR);
1023 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(ext_phy),
1024 MT_WF_RMAC_MIB_RXTIME_CLR);
1025 }
1026
mt7915_mac_set_timing(struct mt7915_phy * phy)1027 void mt7915_mac_set_timing(struct mt7915_phy *phy)
1028 {
1029 s16 coverage_class = phy->coverage_class;
1030 struct mt7915_dev *dev = phy->dev;
1031 bool ext_phy = phy != &dev->phy;
1032 u32 val, reg_offset;
1033 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1034 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1035 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1036 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1037 int sifs, offset;
1038 bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1039
1040 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1041 return;
1042
1043 if (is_5ghz)
1044 sifs = 16;
1045 else
1046 sifs = 10;
1047
1048 if (ext_phy) {
1049 coverage_class = max_t(s16, dev->phy.coverage_class,
1050 coverage_class);
1051 } else {
1052 struct mt7915_phy *phy_ext = mt7915_ext_phy(dev);
1053
1054 if (phy_ext)
1055 coverage_class = max_t(s16, phy_ext->coverage_class,
1056 coverage_class);
1057 }
1058 mt76_set(dev, MT_ARB_SCR(ext_phy),
1059 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1060 udelay(1);
1061
1062 offset = 3 * coverage_class;
1063 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1064 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1065
1066 mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
1067 mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
1068 mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
1069 FIELD_PREP(MT_IFS_EIFS, 360) |
1070 FIELD_PREP(MT_IFS_RIFS, 2) |
1071 FIELD_PREP(MT_IFS_SIFS, sifs) |
1072 FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1073
1074 if (phy->slottime < 20 || is_5ghz)
1075 val = MT7915_CFEND_RATE_DEFAULT;
1076 else
1077 val = MT7915_CFEND_RATE_11B;
1078
1079 mt76_rmw_field(dev, MT_AGG_ACR0(ext_phy), MT_AGG_ACR_CFEND_RATE, val);
1080 mt76_clear(dev, MT_ARB_SCR(ext_phy),
1081 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1082 }
1083
1084 /*
1085 * TODO: mib counters are read-clear and there're many HE functionalities need
1086 * such info, hence firmware prepares a task to read the fields out to a shared
1087 * structure. User should switch to use event format to avoid race condition.
1088 */
1089 static void
mt7915_phy_update_channel(struct mt76_phy * mphy,int idx)1090 mt7915_phy_update_channel(struct mt76_phy *mphy, int idx)
1091 {
1092 struct mt7915_dev *dev = container_of(mphy->dev, struct mt7915_dev, mt76);
1093 struct mt76_channel_state *state;
1094 u64 busy_time, tx_time, rx_time, obss_time;
1095
1096 busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1097 MT_MIB_SDR9_BUSY_MASK);
1098 tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1099 MT_MIB_SDR36_TXTIME_MASK);
1100 rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1101 MT_MIB_SDR37_RXTIME_MASK);
1102 obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1103 MT_MIB_OBSSTIME_MASK);
1104
1105 /* TODO: state->noise */
1106 state = mphy->chan_state;
1107 state->cc_busy += busy_time;
1108 state->cc_tx += tx_time;
1109 state->cc_rx += rx_time + obss_time;
1110 state->cc_bss_rx += rx_time;
1111 }
1112
mt7915_update_channel(struct mt76_dev * mdev)1113 void mt7915_update_channel(struct mt76_dev *mdev)
1114 {
1115 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
1116
1117 mt7915_phy_update_channel(&mdev->phy, 0);
1118 if (mdev->phy2)
1119 mt7915_phy_update_channel(mdev->phy2, 1);
1120
1121 /* reset obss airtime */
1122 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1123 if (mdev->phy2)
1124 mt76_set(dev, MT_WF_RMAC_MIB_TIME0(1),
1125 MT_WF_RMAC_MIB_RXTIME_CLR);
1126 }
1127
1128 static bool
mt7915_wait_reset_state(struct mt7915_dev * dev,u32 state)1129 mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
1130 {
1131 bool ret;
1132
1133 ret = wait_event_timeout(dev->reset_wait,
1134 (READ_ONCE(dev->reset_state) & state),
1135 MT7915_RESET_TIMEOUT);
1136
1137 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1138 return ret;
1139 }
1140
1141 static void
mt7915_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1142 mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1143 {
1144 struct ieee80211_hw *hw = priv;
1145
1146 mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1147 }
1148
1149 static void
mt7915_update_beacons(struct mt7915_dev * dev)1150 mt7915_update_beacons(struct mt7915_dev *dev)
1151 {
1152 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1153 IEEE80211_IFACE_ITER_RESUME_ALL,
1154 mt7915_update_vif_beacon, dev->mt76.hw);
1155
1156 if (!dev->mt76.phy2)
1157 return;
1158
1159 ieee80211_iterate_active_interfaces(dev->mt76.phy2->hw,
1160 IEEE80211_IFACE_ITER_RESUME_ALL,
1161 mt7915_update_vif_beacon, dev->mt76.phy2->hw);
1162 }
1163
1164 static void
mt7915_dma_reset(struct mt7915_dev * dev)1165 mt7915_dma_reset(struct mt7915_dev *dev)
1166 {
1167 int i;
1168
1169 mt76_clear(dev, MT_WFDMA0_GLO_CFG,
1170 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1171 mt76_clear(dev, MT_WFDMA1_GLO_CFG,
1172 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1173 usleep_range(1000, 2000);
1174
1175 for (i = 0; i < __MT_TXQ_MAX; i++)
1176 mt76_queue_tx_cleanup(dev, i, true);
1177
1178 mt76_for_each_q_rx(&dev->mt76, i) {
1179 mt76_queue_rx_reset(dev, i);
1180 }
1181
1182 /* re-init prefetch settings after reset */
1183 mt7915_dma_prefetch(dev);
1184
1185 mt76_set(dev, MT_WFDMA0_GLO_CFG,
1186 MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
1187 mt76_set(dev, MT_WFDMA1_GLO_CFG,
1188 MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
1189 }
1190
1191 /* system error recovery */
mt7915_mac_reset_work(struct work_struct * work)1192 void mt7915_mac_reset_work(struct work_struct *work)
1193 {
1194 struct mt7915_phy *phy2;
1195 struct mt76_phy *ext_phy;
1196 struct mt7915_dev *dev;
1197
1198 dev = container_of(work, struct mt7915_dev, reset_work);
1199 ext_phy = dev->mt76.phy2;
1200 phy2 = ext_phy ? ext_phy->priv : NULL;
1201
1202 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
1203 return;
1204
1205 ieee80211_stop_queues(mt76_hw(dev));
1206 if (ext_phy)
1207 ieee80211_stop_queues(ext_phy->hw);
1208
1209 set_bit(MT76_RESET, &dev->mphy.state);
1210 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1211 wake_up(&dev->mt76.mcu.wait);
1212 cancel_delayed_work_sync(&dev->phy.mac_work);
1213 if (phy2)
1214 cancel_delayed_work_sync(&phy2->mac_work);
1215
1216 /* lock/unlock all queues to ensure that no tx is pending */
1217 mt76_txq_schedule_all(&dev->mphy);
1218 if (ext_phy)
1219 mt76_txq_schedule_all(ext_phy);
1220
1221 mt76_worker_disable(&dev->mt76.tx_worker);
1222 napi_disable(&dev->mt76.napi[0]);
1223 napi_disable(&dev->mt76.napi[1]);
1224 napi_disable(&dev->mt76.napi[2]);
1225 napi_disable(&dev->mt76.tx_napi);
1226
1227 mutex_lock(&dev->mt76.mutex);
1228
1229 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1230
1231 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1232 mt7915_dma_reset(dev);
1233
1234 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1235 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1236 }
1237
1238 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1239 clear_bit(MT76_RESET, &dev->mphy.state);
1240
1241 mt76_worker_enable(&dev->mt76.tx_worker);
1242 napi_enable(&dev->mt76.tx_napi);
1243 napi_schedule(&dev->mt76.tx_napi);
1244
1245 napi_enable(&dev->mt76.napi[0]);
1246 napi_schedule(&dev->mt76.napi[0]);
1247
1248 napi_enable(&dev->mt76.napi[1]);
1249 napi_schedule(&dev->mt76.napi[1]);
1250
1251 napi_enable(&dev->mt76.napi[2]);
1252 napi_schedule(&dev->mt76.napi[2]);
1253
1254 ieee80211_wake_queues(mt76_hw(dev));
1255 if (ext_phy)
1256 ieee80211_wake_queues(ext_phy->hw);
1257
1258 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1259 mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1260
1261 mutex_unlock(&dev->mt76.mutex);
1262
1263 mt7915_update_beacons(dev);
1264
1265 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
1266 MT7915_WATCHDOG_TIME);
1267 if (phy2)
1268 ieee80211_queue_delayed_work(ext_phy->hw, &phy2->mac_work,
1269 MT7915_WATCHDOG_TIME);
1270 }
1271
1272 static void
mt7915_mac_update_mib_stats(struct mt7915_phy * phy)1273 mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
1274 {
1275 struct mt7915_dev *dev = phy->dev;
1276 struct mib_stats *mib = &phy->mib;
1277 bool ext_phy = phy != &dev->phy;
1278 int i, aggr0, aggr1;
1279
1280 mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
1281 MT_MIB_SDR3_FCS_ERR_MASK);
1282
1283 aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
1284 for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
1285 u32 val;
1286
1287 val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
1288 mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
1289 mib->ack_fail_cnt +=
1290 FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
1291
1292 val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
1293 mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
1294 mib->rts_retries_cnt +=
1295 FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
1296
1297 val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
1298 dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1299 dev->mt76.aggr_stats[aggr0++] += val >> 16;
1300
1301 val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
1302 dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
1303 dev->mt76.aggr_stats[aggr1++] += val >> 16;
1304 }
1305 }
1306
1307 static void
mt7915_mac_sta_stats_work(struct mt7915_phy * phy)1308 mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
1309 {
1310 struct mt7915_dev *dev = phy->dev;
1311 struct mt7915_sta *msta;
1312 LIST_HEAD(list);
1313
1314 spin_lock_bh(&dev->sta_poll_lock);
1315 list_splice_init(&phy->stats_list, &list);
1316
1317 while (!list_empty(&list)) {
1318 msta = list_first_entry(&list, struct mt7915_sta, stats_list);
1319 list_del_init(&msta->stats_list);
1320 spin_unlock_bh(&dev->sta_poll_lock);
1321
1322 /* use MT_TX_FREE_RATE to report Tx rate for further devices */
1323 mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
1324
1325 spin_lock_bh(&dev->sta_poll_lock);
1326 }
1327
1328 spin_unlock_bh(&dev->sta_poll_lock);
1329 }
1330
mt7915_mac_sta_rc_work(struct work_struct * work)1331 void mt7915_mac_sta_rc_work(struct work_struct *work)
1332 {
1333 struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
1334 struct ieee80211_sta *sta;
1335 struct ieee80211_vif *vif;
1336 struct mt7915_sta *msta;
1337 u32 changed;
1338 LIST_HEAD(list);
1339
1340 spin_lock_bh(&dev->sta_poll_lock);
1341 list_splice_init(&dev->sta_rc_list, &list);
1342
1343 while (!list_empty(&list)) {
1344 msta = list_first_entry(&list, struct mt7915_sta, rc_list);
1345 list_del_init(&msta->rc_list);
1346 changed = msta->stats.changed;
1347 msta->stats.changed = 0;
1348 spin_unlock_bh(&dev->sta_poll_lock);
1349
1350 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
1351 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
1352
1353 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
1354 IEEE80211_RC_NSS_CHANGED |
1355 IEEE80211_RC_BW_CHANGED))
1356 mt7915_mcu_add_rate_ctrl(dev, vif, sta);
1357
1358 if (changed & IEEE80211_RC_SMPS_CHANGED)
1359 mt7915_mcu_add_smps(dev, vif, sta);
1360
1361 spin_lock_bh(&dev->sta_poll_lock);
1362 }
1363
1364 spin_unlock_bh(&dev->sta_poll_lock);
1365 }
1366
mt7915_mac_work(struct work_struct * work)1367 void mt7915_mac_work(struct work_struct *work)
1368 {
1369 struct mt7915_phy *phy;
1370 struct mt76_dev *mdev;
1371
1372 phy = (struct mt7915_phy *)container_of(work, struct mt7915_phy,
1373 mac_work.work);
1374 mdev = &phy->dev->mt76;
1375
1376 mutex_lock(&mdev->mutex);
1377
1378 mt76_update_survey(mdev);
1379 if (++phy->mac_work_count == 5) {
1380 phy->mac_work_count = 0;
1381
1382 mt7915_mac_update_mib_stats(phy);
1383 }
1384
1385 if (++phy->sta_work_count == 10) {
1386 phy->sta_work_count = 0;
1387 mt7915_mac_sta_stats_work(phy);
1388 };
1389
1390 mutex_unlock(&mdev->mutex);
1391
1392 ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
1393 MT7915_WATCHDOG_TIME);
1394 }
1395
mt7915_dfs_stop_radar_detector(struct mt7915_phy * phy)1396 static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
1397 {
1398 struct mt7915_dev *dev = phy->dev;
1399
1400 if (phy->rdd_state & BIT(0))
1401 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 0, MT_RX_SEL0, 0);
1402 if (phy->rdd_state & BIT(1))
1403 mt7915_mcu_rdd_cmd(dev, RDD_STOP, 1, MT_RX_SEL0, 0);
1404 }
1405
mt7915_dfs_start_rdd(struct mt7915_dev * dev,int chain)1406 static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
1407 {
1408 int err;
1409
1410 err = mt7915_mcu_rdd_cmd(dev, RDD_START, chain, MT_RX_SEL0, 0);
1411 if (err < 0)
1412 return err;
1413
1414 return mt7915_mcu_rdd_cmd(dev, RDD_DET_MODE, chain, MT_RX_SEL0, 1);
1415 }
1416
mt7915_dfs_start_radar_detector(struct mt7915_phy * phy)1417 static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
1418 {
1419 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1420 struct mt7915_dev *dev = phy->dev;
1421 bool ext_phy = phy != &dev->phy;
1422 int err;
1423
1424 /* start CAC */
1425 err = mt7915_mcu_rdd_cmd(dev, RDD_CAC_START, ext_phy, MT_RX_SEL0, 0);
1426 if (err < 0)
1427 return err;
1428
1429 err = mt7915_dfs_start_rdd(dev, ext_phy);
1430 if (err < 0)
1431 return err;
1432
1433 phy->rdd_state |= BIT(ext_phy);
1434
1435 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
1436 chandef->width == NL80211_CHAN_WIDTH_80P80) {
1437 err = mt7915_dfs_start_rdd(dev, 1);
1438 if (err < 0)
1439 return err;
1440
1441 phy->rdd_state |= BIT(1);
1442 }
1443
1444 return 0;
1445 }
1446
1447 static int
mt7915_dfs_init_radar_specs(struct mt7915_phy * phy)1448 mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
1449 {
1450 const struct mt7915_dfs_radar_spec *radar_specs;
1451 struct mt7915_dev *dev = phy->dev;
1452 int err, i;
1453
1454 switch (dev->mt76.region) {
1455 case NL80211_DFS_FCC:
1456 radar_specs = &fcc_radar_specs;
1457 err = mt7915_mcu_set_fcc5_lpn(dev, 8);
1458 if (err < 0)
1459 return err;
1460 break;
1461 case NL80211_DFS_ETSI:
1462 radar_specs = &etsi_radar_specs;
1463 break;
1464 case NL80211_DFS_JP:
1465 radar_specs = &jp_radar_specs;
1466 break;
1467 default:
1468 return -EINVAL;
1469 }
1470
1471 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
1472 err = mt7915_mcu_set_radar_th(dev, i,
1473 &radar_specs->radar_pattern[i]);
1474 if (err < 0)
1475 return err;
1476 }
1477
1478 return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
1479 }
1480
mt7915_dfs_init_radar_detector(struct mt7915_phy * phy)1481 int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
1482 {
1483 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
1484 struct mt7915_dev *dev = phy->dev;
1485 bool ext_phy = phy != &dev->phy;
1486 int err;
1487
1488 if (dev->mt76.region == NL80211_DFS_UNSET) {
1489 phy->dfs_state = -1;
1490 if (phy->rdd_state)
1491 goto stop;
1492
1493 return 0;
1494 }
1495
1496 if (test_bit(MT76_SCANNING, &phy->mt76->state))
1497 return 0;
1498
1499 if (phy->dfs_state == chandef->chan->dfs_state)
1500 return 0;
1501
1502 err = mt7915_dfs_init_radar_specs(phy);
1503 if (err < 0) {
1504 phy->dfs_state = -1;
1505 goto stop;
1506 }
1507
1508 phy->dfs_state = chandef->chan->dfs_state;
1509
1510 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) {
1511 if (chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
1512 return mt7915_dfs_start_radar_detector(phy);
1513
1514 return mt7915_mcu_rdd_cmd(dev, RDD_CAC_END, ext_phy,
1515 MT_RX_SEL0, 0);
1516 }
1517
1518 stop:
1519 err = mt7915_mcu_rdd_cmd(dev, RDD_NORMAL_START, ext_phy,
1520 MT_RX_SEL0, 0);
1521 if (err < 0)
1522 return err;
1523
1524 mt7915_dfs_stop_radar_detector(phy);
1525 return 0;
1526 }
1527