1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,bool unicast)55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
56 u16 idx, bool unicast)
57 {
58 struct mt7996_sta *sta;
59 struct mt76_wcid *wcid;
60
61 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
62 return NULL;
63
64 wcid = rcu_dereference(dev->mt76.wcid[idx]);
65 if (unicast || !wcid)
66 return wcid;
67
68 if (!wcid->sta)
69 return NULL;
70
71 sta = container_of(wcid, struct mt7996_sta, wcid);
72 if (!sta->vif)
73 return NULL;
74
75 return &sta->vif->sta.wcid;
76 }
77
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)78 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
79 {
80 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
81 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
82
83 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
84 0, 5000);
85 }
86
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)87 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
88 {
89 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
90 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
91
92 return MT_WTBL_LMAC_OFFS(wcid, dw);
93 }
94
mt7996_mac_sta_poll(struct mt7996_dev * dev)95 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
96 {
97 static const u8 ac_to_tid[] = {
98 [IEEE80211_AC_BE] = 0,
99 [IEEE80211_AC_BK] = 1,
100 [IEEE80211_AC_VI] = 4,
101 [IEEE80211_AC_VO] = 6
102 };
103 struct ieee80211_sta *sta;
104 struct mt7996_sta *msta;
105 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
106 LIST_HEAD(sta_poll_list);
107 int i;
108
109 spin_lock_bh(&dev->mt76.sta_poll_lock);
110 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
111 spin_unlock_bh(&dev->mt76.sta_poll_lock);
112
113 rcu_read_lock();
114
115 while (true) {
116 bool clear = false;
117 u32 addr, val;
118 u16 idx;
119 s8 rssi[4];
120
121 spin_lock_bh(&dev->mt76.sta_poll_lock);
122 if (list_empty(&sta_poll_list)) {
123 spin_unlock_bh(&dev->mt76.sta_poll_lock);
124 break;
125 }
126 msta = list_first_entry(&sta_poll_list,
127 struct mt7996_sta, wcid.poll_list);
128 list_del_init(&msta->wcid.poll_list);
129 spin_unlock_bh(&dev->mt76.sta_poll_lock);
130
131 idx = msta->wcid.idx;
132
133 /* refresh peer's airtime reporting */
134 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
135
136 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
137 u32 tx_last = msta->airtime_ac[i];
138 u32 rx_last = msta->airtime_ac[i + 4];
139
140 msta->airtime_ac[i] = mt76_rr(dev, addr);
141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
142
143 tx_time[i] = msta->airtime_ac[i] - tx_last;
144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
145
146 if ((tx_last | rx_last) & BIT(30))
147 clear = true;
148
149 addr += 8;
150 }
151
152 if (clear) {
153 mt7996_mac_wtbl_update(dev, idx,
154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
156 }
157
158 if (!msta->wcid.sta)
159 continue;
160
161 sta = container_of((void *)msta, struct ieee80211_sta,
162 drv_priv);
163 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
164 u8 q = mt76_connac_lmac_mapping(i);
165 u32 tx_cur = tx_time[q];
166 u32 rx_cur = rx_time[q];
167 u8 tid = ac_to_tid[i];
168
169 if (!tx_cur && !rx_cur)
170 continue;
171
172 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
173 }
174
175 /* get signal strength of resp frames (CTS/BA/ACK) */
176 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
177 val = mt76_rr(dev, addr);
178
179 rssi[0] = to_rssi(GENMASK(7, 0), val);
180 rssi[1] = to_rssi(GENMASK(15, 8), val);
181 rssi[2] = to_rssi(GENMASK(23, 16), val);
182 rssi[3] = to_rssi(GENMASK(31, 14), val);
183
184 msta->ack_signal =
185 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
186
187 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
188 }
189
190 rcu_read_unlock();
191 }
192
mt7996_mac_enable_rtscts(struct mt7996_dev * dev,struct ieee80211_vif * vif,bool enable)193 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
194 struct ieee80211_vif *vif, bool enable)
195 {
196 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
197 u32 addr;
198
199 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
200 if (enable)
201 mt76_set(dev, addr, BIT(5));
202 else
203 mt76_clear(dev, addr, BIT(5));
204 }
205
206 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)207 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
208 {
209 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
210 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
211 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
212 __le32 *rxd = (__le32 *)skb->data;
213 struct ieee80211_sta *sta;
214 struct ieee80211_vif *vif;
215 struct ieee80211_hdr hdr;
216 u16 frame_control;
217
218 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
219 MT_RXD3_NORMAL_U2M)
220 return -EINVAL;
221
222 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
223 return -EINVAL;
224
225 if (!msta || !msta->vif)
226 return -EINVAL;
227
228 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
229 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
230
231 /* store the info from RXD and ethhdr to avoid being overridden */
232 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
233 hdr.frame_control = cpu_to_le16(frame_control);
234 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
235 hdr.duration_id = 0;
236
237 ether_addr_copy(hdr.addr1, vif->addr);
238 ether_addr_copy(hdr.addr2, sta->addr);
239 switch (frame_control & (IEEE80211_FCTL_TODS |
240 IEEE80211_FCTL_FROMDS)) {
241 case 0:
242 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
243 break;
244 case IEEE80211_FCTL_FROMDS:
245 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
246 break;
247 case IEEE80211_FCTL_TODS:
248 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
249 break;
250 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
251 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
252 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
253 break;
254 default:
255 return -EINVAL;
256 }
257
258 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
259 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
260 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
261 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
262 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
263 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
264 else
265 skb_pull(skb, 2);
266
267 if (ieee80211_has_order(hdr.frame_control))
268 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
269 IEEE80211_HT_CTL_LEN);
270 if (ieee80211_is_data_qos(hdr.frame_control)) {
271 __le16 qos_ctrl;
272
273 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
274 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
275 IEEE80211_QOS_CTL_LEN);
276 }
277
278 if (ieee80211_has_a4(hdr.frame_control))
279 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
280 else
281 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
282
283 return 0;
284 }
285
286 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)287 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
288 struct mt76_rx_status *status,
289 struct ieee80211_supported_band *sband,
290 __le32 *rxv, u8 *mode)
291 {
292 u32 v0, v2;
293 u8 stbc, gi, bw, dcm, nss;
294 int i, idx;
295 bool cck = false;
296
297 v0 = le32_to_cpu(rxv[0]);
298 v2 = le32_to_cpu(rxv[2]);
299
300 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
301 i = idx;
302 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
303
304 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
305 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
306 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
307 dcm = FIELD_GET(MT_PRXV_DCM, v2);
308 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
309
310 switch (*mode) {
311 case MT_PHY_TYPE_CCK:
312 cck = true;
313 fallthrough;
314 case MT_PHY_TYPE_OFDM:
315 i = mt76_get_rate(&dev->mt76, sband, i, cck);
316 break;
317 case MT_PHY_TYPE_HT_GF:
318 case MT_PHY_TYPE_HT:
319 status->encoding = RX_ENC_HT;
320 if (gi)
321 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
322 if (i > 31)
323 return -EINVAL;
324 break;
325 case MT_PHY_TYPE_VHT:
326 status->nss = nss;
327 status->encoding = RX_ENC_VHT;
328 if (gi)
329 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
330 if (i > 11)
331 return -EINVAL;
332 break;
333 case MT_PHY_TYPE_HE_MU:
334 case MT_PHY_TYPE_HE_SU:
335 case MT_PHY_TYPE_HE_EXT_SU:
336 case MT_PHY_TYPE_HE_TB:
337 status->nss = nss;
338 status->encoding = RX_ENC_HE;
339 i &= GENMASK(3, 0);
340
341 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
342 status->he_gi = gi;
343
344 status->he_dcm = dcm;
345 break;
346 case MT_PHY_TYPE_EHT_SU:
347 case MT_PHY_TYPE_EHT_TRIG:
348 case MT_PHY_TYPE_EHT_MU:
349 status->nss = nss;
350 status->encoding = RX_ENC_EHT;
351 i &= GENMASK(3, 0);
352
353 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
354 status->eht.gi = gi;
355 break;
356 default:
357 return -EINVAL;
358 }
359 status->rate_idx = i;
360
361 switch (bw) {
362 case IEEE80211_STA_RX_BW_20:
363 break;
364 case IEEE80211_STA_RX_BW_40:
365 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
366 (idx & MT_PRXV_TX_ER_SU_106T)) {
367 status->bw = RATE_INFO_BW_HE_RU;
368 status->he_ru =
369 NL80211_RATE_INFO_HE_RU_ALLOC_106;
370 } else {
371 status->bw = RATE_INFO_BW_40;
372 }
373 break;
374 case IEEE80211_STA_RX_BW_80:
375 status->bw = RATE_INFO_BW_80;
376 break;
377 case IEEE80211_STA_RX_BW_160:
378 status->bw = RATE_INFO_BW_160;
379 break;
380 /* rxv reports bw 320-1 and 320-2 separately */
381 case IEEE80211_STA_RX_BW_320:
382 case IEEE80211_STA_RX_BW_320 + 1:
383 status->bw = RATE_INFO_BW_320;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
390 if (*mode < MT_PHY_TYPE_HE_SU && gi)
391 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
392
393 return 0;
394 }
395
396 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)397 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
398 struct mt7996_sta *msta, struct sk_buff *skb,
399 u32 info)
400 {
401 struct ieee80211_vif *vif;
402 struct wireless_dev *wdev;
403
404 if (!msta || !msta->vif)
405 return;
406
407 if (!mt76_queue_is_wed_rx(q))
408 return;
409
410 if (!(info & MT_DMA_INFO_PPE_VLD))
411 return;
412
413 vif = container_of((void *)msta->vif, struct ieee80211_vif,
414 drv_priv);
415 wdev = ieee80211_vif_to_wdev(vif);
416 skb->dev = wdev->netdev;
417
418 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
419 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
420 FIELD_GET(MT_DMA_PPE_ENTRY, info));
421 }
422
423 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)424 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
425 struct sk_buff *skb, u32 *info)
426 {
427 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
428 struct mt76_phy *mphy = &dev->mt76.phy;
429 struct mt7996_phy *phy = &dev->phy;
430 struct ieee80211_supported_band *sband;
431 __le32 *rxd = (__le32 *)skb->data;
432 __le32 *rxv = NULL;
433 u32 rxd0 = le32_to_cpu(rxd[0]);
434 u32 rxd1 = le32_to_cpu(rxd[1]);
435 u32 rxd2 = le32_to_cpu(rxd[2]);
436 u32 rxd3 = le32_to_cpu(rxd[3]);
437 u32 rxd4 = le32_to_cpu(rxd[4]);
438 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
439 u32 csum_status = *(u32 *)skb->cb;
440 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
441 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
442 bool unicast, insert_ccmp_hdr = false;
443 u8 remove_pad, amsdu_info, band_idx;
444 u8 mode = 0, qos_ctl = 0;
445 bool hdr_trans;
446 u16 hdr_gap;
447 u16 seq_ctrl = 0;
448 __le16 fc = 0;
449 int idx;
450 u8 hw_aggr = false;
451 struct mt7996_sta *msta = NULL;
452
453 hw_aggr = status->aggr;
454 memset(status, 0, sizeof(*status));
455
456 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
457 mphy = dev->mt76.phys[band_idx];
458 phy = mphy->priv;
459 status->phy_idx = mphy->band_idx;
460
461 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
462 return -EINVAL;
463
464 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
465 return -EINVAL;
466
467 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
468 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
469 return -EINVAL;
470
471 /* ICV error or CCMP/BIP/WPI MIC error */
472 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
473 status->flag |= RX_FLAG_ONLY_MONITOR;
474
475 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
476 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
477 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
478
479 if (status->wcid) {
480 msta = container_of(status->wcid, struct mt7996_sta, wcid);
481 spin_lock_bh(&dev->mt76.sta_poll_lock);
482 if (list_empty(&msta->wcid.poll_list))
483 list_add_tail(&msta->wcid.poll_list,
484 &dev->mt76.sta_poll_list);
485 spin_unlock_bh(&dev->mt76.sta_poll_lock);
486 }
487
488 status->freq = mphy->chandef.chan->center_freq;
489 status->band = mphy->chandef.chan->band;
490 if (status->band == NL80211_BAND_5GHZ)
491 sband = &mphy->sband_5g.sband;
492 else if (status->band == NL80211_BAND_6GHZ)
493 sband = &mphy->sband_6g.sband;
494 else
495 sband = &mphy->sband_2g.sband;
496
497 if (!sband->channels)
498 return -EINVAL;
499
500 if ((rxd3 & csum_mask) == csum_mask &&
501 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
502 skb->ip_summed = CHECKSUM_UNNECESSARY;
503
504 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
505 status->flag |= RX_FLAG_FAILED_FCS_CRC;
506
507 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
508 status->flag |= RX_FLAG_MMIC_ERROR;
509
510 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
511 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
512 status->flag |= RX_FLAG_DECRYPTED;
513 status->flag |= RX_FLAG_IV_STRIPPED;
514 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
515 }
516
517 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
518
519 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
520 return -EINVAL;
521
522 rxd += 8;
523 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
524 u32 v0 = le32_to_cpu(rxd[0]);
525 u32 v2 = le32_to_cpu(rxd[2]);
526
527 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
528 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
529 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
530
531 rxd += 4;
532 if ((u8 *)rxd - skb->data >= skb->len)
533 return -EINVAL;
534 }
535
536 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
537 u8 *data = (u8 *)rxd;
538
539 if (status->flag & RX_FLAG_DECRYPTED) {
540 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
541 case MT_CIPHER_AES_CCMP:
542 case MT_CIPHER_CCMP_CCX:
543 case MT_CIPHER_CCMP_256:
544 insert_ccmp_hdr =
545 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
546 fallthrough;
547 case MT_CIPHER_TKIP:
548 case MT_CIPHER_TKIP_NO_MIC:
549 case MT_CIPHER_GCMP:
550 case MT_CIPHER_GCMP_256:
551 status->iv[0] = data[5];
552 status->iv[1] = data[4];
553 status->iv[2] = data[3];
554 status->iv[3] = data[2];
555 status->iv[4] = data[1];
556 status->iv[5] = data[0];
557 break;
558 default:
559 break;
560 }
561 }
562 rxd += 4;
563 if ((u8 *)rxd - skb->data >= skb->len)
564 return -EINVAL;
565 }
566
567 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
568 status->timestamp = le32_to_cpu(rxd[0]);
569 status->flag |= RX_FLAG_MACTIME_START;
570
571 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
572 status->flag |= RX_FLAG_AMPDU_DETAILS;
573
574 /* all subframes of an A-MPDU have the same timestamp */
575 if (phy->rx_ampdu_ts != status->timestamp) {
576 if (!++phy->ampdu_ref)
577 phy->ampdu_ref++;
578 }
579 phy->rx_ampdu_ts = status->timestamp;
580
581 status->ampdu_ref = phy->ampdu_ref;
582 }
583
584 rxd += 4;
585 if ((u8 *)rxd - skb->data >= skb->len)
586 return -EINVAL;
587 }
588
589 /* RXD Group 3 - P-RXV */
590 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
591 u32 v3;
592 int ret;
593
594 rxv = rxd;
595 rxd += 4;
596 if ((u8 *)rxd - skb->data >= skb->len)
597 return -EINVAL;
598
599 v3 = le32_to_cpu(rxv[3]);
600
601 status->chains = mphy->antenna_mask;
602 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
603 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
604 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
605 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
606
607 /* RXD Group 5 - C-RXV */
608 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
609 rxd += 24;
610 if ((u8 *)rxd - skb->data >= skb->len)
611 return -EINVAL;
612 }
613
614 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
615 if (ret < 0)
616 return ret;
617 }
618
619 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
620 status->amsdu = !!amsdu_info;
621 if (status->amsdu) {
622 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
623 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
624 }
625
626 /* IEEE 802.11 fragmentation can only be applied to unicast frames.
627 * Hence, drop fragments with multicast/broadcast RA.
628 * This check fixes vulnerabilities, like CVE-2020-26145.
629 */
630 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
631 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
632 return -EINVAL;
633
634 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
635 if (hdr_trans && ieee80211_has_morefrags(fc)) {
636 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
637 return -EINVAL;
638 hdr_trans = false;
639 } else {
640 int pad_start = 0;
641
642 skb_pull(skb, hdr_gap);
643 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
644 pad_start = ieee80211_get_hdrlen_from_skb(skb);
645 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
646 /* When header translation failure is indicated,
647 * the hardware will insert an extra 2-byte field
648 * containing the data length after the protocol
649 * type field. This happens either when the LLC-SNAP
650 * pattern did not match, or if a VLAN header was
651 * detected.
652 */
653 pad_start = 12;
654 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
655 pad_start += 4;
656 else
657 pad_start = 0;
658 }
659
660 if (pad_start) {
661 memmove(skb->data + 2, skb->data, pad_start);
662 skb_pull(skb, 2);
663 }
664 }
665
666 if (!hdr_trans) {
667 struct ieee80211_hdr *hdr;
668
669 if (insert_ccmp_hdr) {
670 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
671
672 mt76_insert_ccmp_hdr(skb, key_id);
673 }
674
675 hdr = mt76_skb_get_hdr(skb);
676 fc = hdr->frame_control;
677 if (ieee80211_is_data_qos(fc)) {
678 u8 *qos = ieee80211_get_qos_ctl(hdr);
679
680 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
681 qos_ctl = *qos;
682
683 /* Mesh DA/SA/Length will be stripped after hardware
684 * de-amsdu, so here needs to clear amsdu present bit
685 * to mark it as a normal mesh frame.
686 */
687 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
688 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
689 }
690 } else {
691 status->flag |= RX_FLAG_8023;
692 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
693 *info);
694 }
695
696 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
697 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
698
699 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
700 return 0;
701
702 status->aggr = unicast &&
703 !ieee80211_is_qos_nullfunc(fc);
704 status->qos_ctl = qos_ctl;
705 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
706
707 return 0;
708 }
709
710 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)711 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
712 struct sk_buff *skb, struct mt76_wcid *wcid)
713 {
714 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
715 u8 fc_type, fc_stype;
716 u16 ethertype;
717 bool wmm = false;
718 u32 val;
719
720 if (wcid->sta) {
721 struct ieee80211_sta *sta;
722
723 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
724 wmm = sta->wme;
725 }
726
727 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
728 FIELD_PREP(MT_TXD1_TID, tid);
729
730 ethertype = get_unaligned_be16(&skb->data[12]);
731 if (ethertype >= ETH_P_802_3_MIN)
732 val |= MT_TXD1_ETH_802_3;
733
734 txwi[1] |= cpu_to_le32(val);
735
736 fc_type = IEEE80211_FTYPE_DATA >> 2;
737 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
738
739 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
740 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
741
742 txwi[2] |= cpu_to_le32(val);
743
744 if (wcid->amsdu)
745 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
746 }
747
748 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)749 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
750 struct sk_buff *skb, struct ieee80211_key_conf *key)
751 {
752 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
753 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
754 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
755 bool multicast = is_multicast_ether_addr(hdr->addr1);
756 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
757 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
758 u8 fc_type, fc_stype;
759 u32 val;
760
761 if (ieee80211_is_action(fc) &&
762 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
763 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
764 tid = MT_TX_ADDBA;
765 else if (ieee80211_is_mgmt(hdr->frame_control))
766 tid = MT_TX_NORMAL;
767
768 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
769 FIELD_PREP(MT_TXD1_HDR_INFO,
770 ieee80211_get_hdrlen_from_skb(skb) / 2) |
771 FIELD_PREP(MT_TXD1_TID, tid);
772
773 if (!ieee80211_is_data(fc) || multicast ||
774 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
775 val |= MT_TXD1_FIXED_RATE;
776
777 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
778 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
779 val |= MT_TXD1_BIP;
780 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
781 }
782
783 txwi[1] |= cpu_to_le32(val);
784
785 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
786 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
787
788 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
789 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
790
791 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
792 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
793 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
794 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
795 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
796 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
797 else
798 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
799
800 txwi[2] |= cpu_to_le32(val);
801
802 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
803 if (ieee80211_is_beacon(fc)) {
804 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
805 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
806 }
807
808 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
809 u16 seqno = le16_to_cpu(sc);
810
811 if (ieee80211_is_back_req(hdr->frame_control)) {
812 struct ieee80211_bar *bar;
813
814 bar = (struct ieee80211_bar *)skb->data;
815 seqno = le16_to_cpu(bar->start_seq_num);
816 }
817
818 val = MT_TXD3_SN_VALID |
819 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
820 txwi[3] |= cpu_to_le32(val);
821 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
822 }
823 }
824
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)825 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
826 struct sk_buff *skb, struct mt76_wcid *wcid,
827 struct ieee80211_key_conf *key, int pid,
828 enum mt76_txq_id qid, u32 changed)
829 {
830 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
831 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
832 struct ieee80211_vif *vif = info->control.vif;
833 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
834 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
835 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
836 struct mt76_vif *mvif;
837 u16 tx_count = 15;
838 u32 val;
839 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
840 BSS_CHANGED_FILS_DISCOVERY));
841 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
842 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
843
844 mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
845 if (mvif) {
846 omac_idx = mvif->omac_idx;
847 wmm_idx = mvif->wmm_idx;
848 band_idx = mvif->band_idx;
849 }
850
851 if (inband_disc) {
852 p_fmt = MT_TX_TYPE_FW;
853 q_idx = MT_LMAC_ALTX0;
854 } else if (beacon) {
855 p_fmt = MT_TX_TYPE_FW;
856 q_idx = MT_LMAC_BCN0;
857 } else if (qid >= MT_TXQ_PSD) {
858 p_fmt = MT_TX_TYPE_CT;
859 q_idx = MT_LMAC_ALTX0;
860 } else {
861 p_fmt = MT_TX_TYPE_CT;
862 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
863 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
864 }
865
866 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
867 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
868 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
869 txwi[0] = cpu_to_le32(val);
870
871 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
872 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
873
874 if (band_idx)
875 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
876
877 txwi[1] = cpu_to_le32(val);
878 txwi[2] = 0;
879
880 val = MT_TXD3_SW_POWER_MGMT |
881 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
882 if (key)
883 val |= MT_TXD3_PROTECT_FRAME;
884 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
885 val |= MT_TXD3_NO_ACK;
886
887 txwi[3] = cpu_to_le32(val);
888 txwi[4] = 0;
889
890 val = FIELD_PREP(MT_TXD5_PID, pid);
891 if (pid >= MT_PACKET_ID_FIRST)
892 val |= MT_TXD5_TX_STATUS_HOST;
893 txwi[5] = cpu_to_le32(val);
894
895 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
896 if (is_mt7996(&dev->mt76))
897 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
898 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
899 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
900
901 txwi[6] = cpu_to_le32(val);
902 txwi[7] = 0;
903
904 if (is_8023)
905 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
906 else
907 mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
908
909 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
910 bool mcast = ieee80211_is_data(hdr->frame_control) &&
911 is_multicast_ether_addr(hdr->addr1);
912 u8 idx = MT7996_BASIC_RATES_TBL;
913
914 if (mvif) {
915 if (mcast && mvif->mcast_rates_idx)
916 idx = mvif->mcast_rates_idx;
917 else if (beacon && mvif->beacon_rates_idx)
918 idx = mvif->beacon_rates_idx;
919 else
920 idx = mvif->basic_rates_idx;
921 }
922
923 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
924 txwi[6] |= cpu_to_le32(val);
925 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
926 }
927 }
928
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)929 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
930 enum mt76_txq_id qid, struct mt76_wcid *wcid,
931 struct ieee80211_sta *sta,
932 struct mt76_tx_info *tx_info)
933 {
934 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
935 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
936 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
937 struct ieee80211_key_conf *key = info->control.hw_key;
938 struct ieee80211_vif *vif = info->control.vif;
939 struct mt76_connac_txp_common *txp;
940 struct mt76_txwi_cache *t;
941 int id, i, pid, nbuf = tx_info->nbuf - 1;
942 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
943 u8 *txwi = (u8 *)txwi_ptr;
944
945 if (unlikely(tx_info->skb->len <= ETH_HLEN))
946 return -EINVAL;
947
948 if (!wcid)
949 wcid = &dev->mt76.global_wcid;
950
951 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
952 t->skb = tx_info->skb;
953
954 id = mt76_token_consume(mdev, &t);
955 if (id < 0)
956 return id;
957
958 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
959 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
960 pid, qid, 0);
961
962 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
963 for (i = 0; i < nbuf; i++) {
964 u16 len;
965
966 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
967 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
968 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
969 tx_info->buf[i + 1].addr >> 32);
970 #endif
971
972 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
973 txp->fw.len[i] = cpu_to_le16(len);
974 }
975 txp->fw.nbuf = nbuf;
976
977 txp->fw.flags =
978 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
979
980 if (!key)
981 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
982
983 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
984 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
985
986 if (vif) {
987 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
988
989 txp->fw.bss_idx = mvif->mt76.idx;
990 }
991
992 txp->fw.token = cpu_to_le16(id);
993 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
994
995 tx_info->skb = NULL;
996
997 /* pass partial skb header to fw */
998 tx_info->buf[1].len = MT_CT_PARSE_LEN;
999 tx_info->buf[1].skip_unmap = true;
1000 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1001
1002 return 0;
1003 }
1004
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1005 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1006 {
1007 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1008 __le32 *txwi = ptr;
1009 u32 val;
1010
1011 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1012
1013 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1014 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1015 txwi[0] = cpu_to_le32(val);
1016
1017 val = BIT(31) |
1018 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1019 txwi[1] = cpu_to_le32(val);
1020
1021 txp->token = cpu_to_le16(token_id);
1022 txp->nbuf = 1;
1023 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1024
1025 return MT_TXD_SIZE + sizeof(*txp);
1026 }
1027
1028 static void
mt7996_tx_check_aggr(struct ieee80211_sta * sta,struct sk_buff * skb)1029 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
1030 {
1031 struct mt7996_sta *msta;
1032 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1033 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1034 u16 fc, tid;
1035
1036 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1037 return;
1038
1039 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1040 if (tid >= 6) /* skip VO queue */
1041 return;
1042
1043 if (is_8023) {
1044 fc = IEEE80211_FTYPE_DATA |
1045 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA);
1046 } else {
1047 /* No need to get precise TID for Action/Management Frame,
1048 * since it will not meet the following Frame Control
1049 * condition anyway.
1050 */
1051
1052 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1053
1054 fc = le16_to_cpu(hdr->frame_control) &
1055 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1056 }
1057
1058 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1059 return;
1060
1061 msta = (struct mt7996_sta *)sta->drv_priv;
1062 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
1063 ieee80211_start_tx_ba_session(sta, tid, 0);
1064 }
1065
1066 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct list_head * free_list)1067 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1068 struct ieee80211_sta *sta, struct list_head *free_list)
1069 {
1070 struct mt76_dev *mdev = &dev->mt76;
1071 struct mt76_wcid *wcid;
1072 __le32 *txwi;
1073 u16 wcid_idx;
1074
1075 mt76_connac_txp_skb_unmap(mdev, t);
1076 if (!t->skb)
1077 goto out;
1078
1079 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1080 if (sta) {
1081 wcid = (struct mt76_wcid *)sta->drv_priv;
1082 wcid_idx = wcid->idx;
1083
1084 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1085 mt7996_tx_check_aggr(sta, t->skb);
1086 } else {
1087 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1088 }
1089
1090 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1091
1092 out:
1093 t->skb = NULL;
1094 mt76_put_txwi(mdev, t);
1095 }
1096
1097 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1098 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1099 {
1100 __le32 *tx_free = (__le32 *)data, *cur_info;
1101 struct mt76_dev *mdev = &dev->mt76;
1102 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1103 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1104 struct mt76_txwi_cache *txwi;
1105 struct ieee80211_sta *sta = NULL;
1106 struct mt76_wcid *wcid = NULL;
1107 LIST_HEAD(free_list);
1108 struct sk_buff *skb, *tmp;
1109 void *end = data + len;
1110 bool wake = false;
1111 u16 total, count = 0;
1112
1113 /* clean DMA queues and unmap buffers first */
1114 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1115 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1116 if (phy2) {
1117 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1118 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1119 }
1120 if (phy3) {
1121 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1122 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1123 }
1124
1125 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5))
1126 return;
1127
1128 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1129 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1130 u32 msdu, info;
1131 u8 i;
1132
1133 if (WARN_ON_ONCE((void *)cur_info >= end))
1134 return;
1135 /* 1'b1: new wcid pair.
1136 * 1'b0: msdu_id with the same 'wcid pair' as above.
1137 */
1138 info = le32_to_cpu(*cur_info);
1139 if (info & MT_TXFREE_INFO_PAIR) {
1140 struct mt7996_sta *msta;
1141 u16 idx;
1142
1143 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1144 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1145 sta = wcid_to_sta(wcid);
1146 if (!sta)
1147 continue;
1148
1149 msta = container_of(wcid, struct mt7996_sta, wcid);
1150 spin_lock_bh(&mdev->sta_poll_lock);
1151 if (list_empty(&msta->wcid.poll_list))
1152 list_add_tail(&msta->wcid.poll_list,
1153 &mdev->sta_poll_list);
1154 spin_unlock_bh(&mdev->sta_poll_lock);
1155 continue;
1156 } else if (info & MT_TXFREE_INFO_HEADER) {
1157 u32 tx_retries = 0, tx_failed = 0;
1158
1159 if (!wcid)
1160 continue;
1161
1162 tx_retries =
1163 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1164 tx_failed = tx_retries +
1165 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1166
1167 wcid->stats.tx_retries += tx_retries;
1168 wcid->stats.tx_failed += tx_failed;
1169 continue;
1170 }
1171
1172 for (i = 0; i < 2; i++) {
1173 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1174 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1175 continue;
1176
1177 count++;
1178 txwi = mt76_token_release(mdev, msdu, &wake);
1179 if (!txwi)
1180 continue;
1181
1182 mt7996_txwi_free(dev, txwi, sta, &free_list);
1183 }
1184 }
1185
1186 mt7996_mac_sta_poll(dev);
1187
1188 if (wake)
1189 mt76_set_tx_blocked(&dev->mt76, false);
1190
1191 mt76_worker_schedule(&dev->mt76.tx_worker);
1192
1193 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1194 skb_list_del_init(skb);
1195 napi_consume_skb(skb, 1);
1196 }
1197 }
1198
1199 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1200 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1201 int pid, __le32 *txs_data)
1202 {
1203 struct mt76_sta_stats *stats = &wcid->stats;
1204 struct ieee80211_supported_band *sband;
1205 struct mt76_dev *mdev = &dev->mt76;
1206 struct mt76_phy *mphy;
1207 struct ieee80211_tx_info *info;
1208 struct sk_buff_head list;
1209 struct rate_info rate = {};
1210 struct sk_buff *skb = NULL;
1211 bool cck = false;
1212 u32 txrate, txs, mode, stbc;
1213
1214 txs = le32_to_cpu(txs_data[0]);
1215
1216 mt76_tx_status_lock(mdev, &list);
1217
1218 /* only report MPDU TXS */
1219 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1220 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1221 if (skb) {
1222 info = IEEE80211_SKB_CB(skb);
1223 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1224 info->flags |= IEEE80211_TX_STAT_ACK;
1225
1226 info->status.ampdu_len = 1;
1227 info->status.ampdu_ack_len =
1228 !!(info->flags & IEEE80211_TX_STAT_ACK);
1229
1230 info->status.rates[0].idx = -1;
1231 }
1232 }
1233
1234 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1235 struct ieee80211_sta *sta;
1236 u8 tid;
1237
1238 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1239 tid = FIELD_GET(MT_TXS0_TID, txs);
1240 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1241 }
1242
1243 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1244
1245 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1246 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1247 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1248
1249 if (stbc && rate.nss > 1)
1250 rate.nss >>= 1;
1251
1252 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1253 stats->tx_nss[rate.nss - 1]++;
1254 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1255 stats->tx_mcs[rate.mcs]++;
1256
1257 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1258 switch (mode) {
1259 case MT_PHY_TYPE_CCK:
1260 cck = true;
1261 fallthrough;
1262 case MT_PHY_TYPE_OFDM:
1263 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1264
1265 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1266 sband = &mphy->sband_5g.sband;
1267 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1268 sband = &mphy->sband_6g.sband;
1269 else
1270 sband = &mphy->sband_2g.sband;
1271
1272 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1273 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1274 break;
1275 case MT_PHY_TYPE_HT:
1276 case MT_PHY_TYPE_HT_GF:
1277 if (rate.mcs > 31)
1278 goto out;
1279
1280 rate.flags = RATE_INFO_FLAGS_MCS;
1281 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1282 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1283 break;
1284 case MT_PHY_TYPE_VHT:
1285 if (rate.mcs > 9)
1286 goto out;
1287
1288 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1289 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1290 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1291 break;
1292 case MT_PHY_TYPE_HE_SU:
1293 case MT_PHY_TYPE_HE_EXT_SU:
1294 case MT_PHY_TYPE_HE_TB:
1295 case MT_PHY_TYPE_HE_MU:
1296 if (rate.mcs > 11)
1297 goto out;
1298
1299 rate.he_gi = wcid->rate.he_gi;
1300 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1301 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1302 break;
1303 case MT_PHY_TYPE_EHT_SU:
1304 case MT_PHY_TYPE_EHT_TRIG:
1305 case MT_PHY_TYPE_EHT_MU:
1306 if (rate.mcs > 13)
1307 goto out;
1308
1309 rate.eht_gi = wcid->rate.eht_gi;
1310 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1311 break;
1312 default:
1313 goto out;
1314 }
1315
1316 stats->tx_mode[mode]++;
1317
1318 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1319 case IEEE80211_STA_RX_BW_320:
1320 rate.bw = RATE_INFO_BW_320;
1321 stats->tx_bw[4]++;
1322 break;
1323 case IEEE80211_STA_RX_BW_160:
1324 rate.bw = RATE_INFO_BW_160;
1325 stats->tx_bw[3]++;
1326 break;
1327 case IEEE80211_STA_RX_BW_80:
1328 rate.bw = RATE_INFO_BW_80;
1329 stats->tx_bw[2]++;
1330 break;
1331 case IEEE80211_STA_RX_BW_40:
1332 rate.bw = RATE_INFO_BW_40;
1333 stats->tx_bw[1]++;
1334 break;
1335 default:
1336 rate.bw = RATE_INFO_BW_20;
1337 stats->tx_bw[0]++;
1338 break;
1339 }
1340 wcid->rate = rate;
1341
1342 out:
1343 if (skb)
1344 mt76_tx_status_skb_done(mdev, skb, &list);
1345 mt76_tx_status_unlock(mdev, &list);
1346
1347 return !!skb;
1348 }
1349
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1350 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1351 {
1352 struct mt7996_sta *msta = NULL;
1353 struct mt76_wcid *wcid;
1354 __le32 *txs_data = data;
1355 u16 wcidx;
1356 u8 pid;
1357
1358 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1359 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1360
1361 if (pid < MT_PACKET_ID_NO_SKB)
1362 return;
1363
1364 if (wcidx >= mt7996_wtbl_size(dev))
1365 return;
1366
1367 rcu_read_lock();
1368
1369 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1370 if (!wcid)
1371 goto out;
1372
1373 msta = container_of(wcid, struct mt7996_sta, wcid);
1374
1375 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1376
1377 if (!wcid->sta)
1378 goto out;
1379
1380 spin_lock_bh(&dev->mt76.sta_poll_lock);
1381 if (list_empty(&msta->wcid.poll_list))
1382 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1383 spin_unlock_bh(&dev->mt76.sta_poll_lock);
1384
1385 out:
1386 rcu_read_unlock();
1387 }
1388
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1389 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1390 {
1391 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1392 __le32 *rxd = (__le32 *)data;
1393 __le32 *end = (__le32 *)&rxd[len / 4];
1394 enum rx_pkt_type type;
1395
1396 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1397 if (type != PKT_TYPE_NORMAL) {
1398 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1399
1400 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1401 MT_RXD0_SW_PKT_TYPE_FRAME))
1402 return true;
1403 }
1404
1405 switch (type) {
1406 case PKT_TYPE_TXRX_NOTIFY:
1407 mt7996_mac_tx_free(dev, data, len);
1408 return false;
1409 case PKT_TYPE_TXS:
1410 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1411 mt7996_mac_add_txs(dev, rxd);
1412 return false;
1413 case PKT_TYPE_RX_FW_MONITOR:
1414 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1415 return false;
1416 default:
1417 return true;
1418 }
1419 }
1420
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1421 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1422 struct sk_buff *skb, u32 *info)
1423 {
1424 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1425 __le32 *rxd = (__le32 *)skb->data;
1426 __le32 *end = (__le32 *)&skb->data[skb->len];
1427 enum rx_pkt_type type;
1428
1429 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1430 if (type != PKT_TYPE_NORMAL) {
1431 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1432
1433 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1434 MT_RXD0_SW_PKT_TYPE_FRAME))
1435 type = PKT_TYPE_NORMAL;
1436 }
1437
1438 switch (type) {
1439 case PKT_TYPE_TXRX_NOTIFY:
1440 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1441 q == MT_RXQ_TXFREE_BAND2) {
1442 dev_kfree_skb(skb);
1443 break;
1444 }
1445
1446 mt7996_mac_tx_free(dev, skb->data, skb->len);
1447 napi_consume_skb(skb, 1);
1448 break;
1449 case PKT_TYPE_RX_EVENT:
1450 mt7996_mcu_rx_event(dev, skb);
1451 break;
1452 case PKT_TYPE_TXS:
1453 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1454 mt7996_mac_add_txs(dev, rxd);
1455 dev_kfree_skb(skb);
1456 break;
1457 case PKT_TYPE_RX_FW_MONITOR:
1458 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1459 dev_kfree_skb(skb);
1460 break;
1461 case PKT_TYPE_NORMAL:
1462 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1463 mt76_rx(&dev->mt76, q, skb);
1464 return;
1465 }
1466 fallthrough;
1467 default:
1468 dev_kfree_skb(skb);
1469 break;
1470 }
1471 }
1472
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1473 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1474 {
1475 struct mt7996_dev *dev = phy->dev;
1476 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1477
1478 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1479 mt76_set(dev, reg, BIT(11) | BIT(9));
1480 }
1481
mt7996_mac_reset_counters(struct mt7996_phy * phy)1482 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1483 {
1484 struct mt7996_dev *dev = phy->dev;
1485 u8 band_idx = phy->mt76->band_idx;
1486 int i;
1487
1488 for (i = 0; i < 16; i++)
1489 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1490
1491 phy->mt76->survey_time = ktime_get_boottime();
1492
1493 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1494
1495 /* reset airtime counters */
1496 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1497 MT_WF_RMAC_MIB_RXTIME_CLR);
1498
1499 mt7996_mcu_get_chan_mib_info(phy, true);
1500 }
1501
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1502 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1503 {
1504 s16 coverage_class = phy->coverage_class;
1505 struct mt7996_dev *dev = phy->dev;
1506 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1507 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1508 u32 reg_offset;
1509 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1510 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1511 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1512 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1513 u8 band_idx = phy->mt76->band_idx;
1514 int offset;
1515
1516 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1517 return;
1518
1519 if (phy2)
1520 coverage_class = max_t(s16, dev->phy.coverage_class,
1521 phy2->coverage_class);
1522
1523 if (phy3)
1524 coverage_class = max_t(s16, coverage_class,
1525 phy3->coverage_class);
1526
1527 offset = 3 * coverage_class;
1528 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1529 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1530
1531 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1532 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1533 }
1534
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1535 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1536 {
1537 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1538 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1539 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1540
1541 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1542 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1543 }
1544
1545 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1546 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1547 {
1548 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1549 struct mt7996_dev *dev = phy->dev;
1550 u32 val, sum = 0, n = 0;
1551 int ant, i;
1552
1553 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1554 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1555
1556 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1557 val = mt76_rr(dev, reg);
1558 sum += val * nf_power[i];
1559 n += val;
1560 }
1561 }
1562
1563 return n ? sum / n : 0;
1564 }
1565
mt7996_update_channel(struct mt76_phy * mphy)1566 void mt7996_update_channel(struct mt76_phy *mphy)
1567 {
1568 struct mt7996_phy *phy = mphy->priv;
1569 struct mt76_channel_state *state = mphy->chan_state;
1570 int nf;
1571
1572 mt7996_mcu_get_chan_mib_info(phy, false);
1573
1574 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1575 if (!phy->noise)
1576 phy->noise = nf << 4;
1577 else if (nf)
1578 phy->noise += nf - (phy->noise >> 4);
1579
1580 state->noise = -(phy->noise >> 4);
1581 }
1582
1583 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1584 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1585 {
1586 bool ret;
1587
1588 ret = wait_event_timeout(dev->reset_wait,
1589 (READ_ONCE(dev->recovery.state) & state),
1590 MT7996_RESET_TIMEOUT);
1591
1592 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1593 return ret;
1594 }
1595
1596 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1597 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1598 {
1599 struct ieee80211_hw *hw = priv;
1600
1601 switch (vif->type) {
1602 case NL80211_IFTYPE_MESH_POINT:
1603 case NL80211_IFTYPE_ADHOC:
1604 case NL80211_IFTYPE_AP:
1605 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1606 break;
1607 default:
1608 break;
1609 }
1610 }
1611
1612 static void
mt7996_update_beacons(struct mt7996_dev * dev)1613 mt7996_update_beacons(struct mt7996_dev *dev)
1614 {
1615 struct mt76_phy *phy2, *phy3;
1616
1617 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1618 IEEE80211_IFACE_ITER_RESUME_ALL,
1619 mt7996_update_vif_beacon, dev->mt76.hw);
1620
1621 phy2 = dev->mt76.phys[MT_BAND1];
1622 if (!phy2)
1623 return;
1624
1625 ieee80211_iterate_active_interfaces(phy2->hw,
1626 IEEE80211_IFACE_ITER_RESUME_ALL,
1627 mt7996_update_vif_beacon, phy2->hw);
1628
1629 phy3 = dev->mt76.phys[MT_BAND2];
1630 if (!phy3)
1631 return;
1632
1633 ieee80211_iterate_active_interfaces(phy3->hw,
1634 IEEE80211_IFACE_ITER_RESUME_ALL,
1635 mt7996_update_vif_beacon, phy3->hw);
1636 }
1637
mt7996_tx_token_put(struct mt7996_dev * dev)1638 void mt7996_tx_token_put(struct mt7996_dev *dev)
1639 {
1640 struct mt76_txwi_cache *txwi;
1641 int id;
1642
1643 spin_lock_bh(&dev->mt76.token_lock);
1644 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1645 mt7996_txwi_free(dev, txwi, NULL, NULL);
1646 dev->mt76.token_count--;
1647 }
1648 spin_unlock_bh(&dev->mt76.token_lock);
1649 idr_destroy(&dev->mt76.token);
1650 }
1651
1652 static int
mt7996_mac_restart(struct mt7996_dev * dev)1653 mt7996_mac_restart(struct mt7996_dev *dev)
1654 {
1655 struct mt7996_phy *phy2, *phy3;
1656 struct mt76_dev *mdev = &dev->mt76;
1657 int i, ret;
1658
1659 phy2 = mt7996_phy2(dev);
1660 phy3 = mt7996_phy3(dev);
1661
1662 if (dev->hif2) {
1663 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1664 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1665 }
1666
1667 if (dev_is_pci(mdev->dev)) {
1668 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1669 if (dev->hif2)
1670 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1671 }
1672
1673 set_bit(MT76_RESET, &dev->mphy.state);
1674 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1675 wake_up(&dev->mt76.mcu.wait);
1676 if (phy2)
1677 set_bit(MT76_RESET, &phy2->mt76->state);
1678 if (phy3)
1679 set_bit(MT76_RESET, &phy3->mt76->state);
1680
1681 /* lock/unlock all queues to ensure that no tx is pending */
1682 mt76_txq_schedule_all(&dev->mphy);
1683 if (phy2)
1684 mt76_txq_schedule_all(phy2->mt76);
1685 if (phy3)
1686 mt76_txq_schedule_all(phy3->mt76);
1687
1688 /* disable all tx/rx napi */
1689 mt76_worker_disable(&dev->mt76.tx_worker);
1690 mt76_for_each_q_rx(mdev, i) {
1691 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1692 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1693 continue;
1694
1695 if (mdev->q_rx[i].ndesc)
1696 napi_disable(&dev->mt76.napi[i]);
1697 }
1698 napi_disable(&dev->mt76.tx_napi);
1699
1700 /* token reinit */
1701 mt7996_tx_token_put(dev);
1702 idr_init(&dev->mt76.token);
1703
1704 mt7996_dma_reset(dev, true);
1705
1706 local_bh_disable();
1707 mt76_for_each_q_rx(mdev, i) {
1708 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1709 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1710 continue;
1711
1712 if (mdev->q_rx[i].ndesc) {
1713 napi_enable(&dev->mt76.napi[i]);
1714 napi_schedule(&dev->mt76.napi[i]);
1715 }
1716 }
1717 local_bh_enable();
1718 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1719 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1720
1721 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1722 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1723 if (dev->hif2) {
1724 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1725 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1726 }
1727 if (dev_is_pci(mdev->dev)) {
1728 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1729 if (dev->hif2)
1730 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1731 }
1732
1733 /* load firmware */
1734 ret = mt7996_mcu_init_firmware(dev);
1735 if (ret)
1736 goto out;
1737
1738 /* set the necessary init items */
1739 ret = mt7996_mcu_set_eeprom(dev);
1740 if (ret)
1741 goto out;
1742
1743 mt7996_mac_init(dev);
1744 mt7996_init_txpower(&dev->phy);
1745 mt7996_init_txpower(phy2);
1746 mt7996_init_txpower(phy3);
1747 ret = mt7996_txbf_init(dev);
1748
1749 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1750 ret = mt7996_run(dev->mphy.hw);
1751 if (ret)
1752 goto out;
1753 }
1754
1755 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1756 ret = mt7996_run(phy2->mt76->hw);
1757 if (ret)
1758 goto out;
1759 }
1760
1761 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1762 ret = mt7996_run(phy3->mt76->hw);
1763 if (ret)
1764 goto out;
1765 }
1766
1767 out:
1768 /* reset done */
1769 clear_bit(MT76_RESET, &dev->mphy.state);
1770 if (phy2)
1771 clear_bit(MT76_RESET, &phy2->mt76->state);
1772 if (phy3)
1773 clear_bit(MT76_RESET, &phy3->mt76->state);
1774
1775 local_bh_disable();
1776 napi_enable(&dev->mt76.tx_napi);
1777 napi_schedule(&dev->mt76.tx_napi);
1778 local_bh_enable();
1779
1780 mt76_worker_enable(&dev->mt76.tx_worker);
1781 return ret;
1782 }
1783
1784 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1785 mt7996_mac_full_reset(struct mt7996_dev *dev)
1786 {
1787 struct mt7996_phy *phy2, *phy3;
1788 int i;
1789
1790 phy2 = mt7996_phy2(dev);
1791 phy3 = mt7996_phy3(dev);
1792 dev->recovery.hw_full_reset = true;
1793
1794 wake_up(&dev->mt76.mcu.wait);
1795 ieee80211_stop_queues(mt76_hw(dev));
1796 if (phy2)
1797 ieee80211_stop_queues(phy2->mt76->hw);
1798 if (phy3)
1799 ieee80211_stop_queues(phy3->mt76->hw);
1800
1801 cancel_work_sync(&dev->wed_rro.work);
1802 cancel_delayed_work_sync(&dev->mphy.mac_work);
1803 if (phy2)
1804 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1805 if (phy3)
1806 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1807
1808 mutex_lock(&dev->mt76.mutex);
1809 for (i = 0; i < 10; i++) {
1810 if (!mt7996_mac_restart(dev))
1811 break;
1812 }
1813 mutex_unlock(&dev->mt76.mutex);
1814
1815 if (i == 10)
1816 dev_err(dev->mt76.dev, "chip full reset failed\n");
1817
1818 ieee80211_restart_hw(mt76_hw(dev));
1819 if (phy2)
1820 ieee80211_restart_hw(phy2->mt76->hw);
1821 if (phy3)
1822 ieee80211_restart_hw(phy3->mt76->hw);
1823
1824 ieee80211_wake_queues(mt76_hw(dev));
1825 if (phy2)
1826 ieee80211_wake_queues(phy2->mt76->hw);
1827 if (phy3)
1828 ieee80211_wake_queues(phy3->mt76->hw);
1829
1830 dev->recovery.hw_full_reset = false;
1831 ieee80211_queue_delayed_work(mt76_hw(dev),
1832 &dev->mphy.mac_work,
1833 MT7996_WATCHDOG_TIME);
1834 if (phy2)
1835 ieee80211_queue_delayed_work(phy2->mt76->hw,
1836 &phy2->mt76->mac_work,
1837 MT7996_WATCHDOG_TIME);
1838 if (phy3)
1839 ieee80211_queue_delayed_work(phy3->mt76->hw,
1840 &phy3->mt76->mac_work,
1841 MT7996_WATCHDOG_TIME);
1842 }
1843
mt7996_mac_reset_work(struct work_struct * work)1844 void mt7996_mac_reset_work(struct work_struct *work)
1845 {
1846 struct mt7996_phy *phy2, *phy3;
1847 struct mt7996_dev *dev;
1848 int i;
1849
1850 dev = container_of(work, struct mt7996_dev, reset_work);
1851 phy2 = mt7996_phy2(dev);
1852 phy3 = mt7996_phy3(dev);
1853
1854 /* chip full reset */
1855 if (dev->recovery.restart) {
1856 /* disable WA/WM WDT */
1857 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1858 MT_MCU_CMD_WDT_MASK);
1859
1860 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1861 dev->recovery.wa_reset_count++;
1862 else
1863 dev->recovery.wm_reset_count++;
1864
1865 mt7996_mac_full_reset(dev);
1866
1867 /* enable mcu irq */
1868 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1869 mt7996_irq_disable(dev, 0);
1870
1871 /* enable WA/WM WDT */
1872 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1873
1874 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1875 dev->recovery.restart = false;
1876 return;
1877 }
1878
1879 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1880 return;
1881
1882 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1883 wiphy_name(dev->mt76.hw->wiphy));
1884
1885 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
1886 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
1887
1888 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1889 mtk_wed_device_stop(&dev->mt76.mmio.wed);
1890
1891 ieee80211_stop_queues(mt76_hw(dev));
1892 if (phy2)
1893 ieee80211_stop_queues(phy2->mt76->hw);
1894 if (phy3)
1895 ieee80211_stop_queues(phy3->mt76->hw);
1896
1897 set_bit(MT76_RESET, &dev->mphy.state);
1898 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1899 wake_up(&dev->mt76.mcu.wait);
1900
1901 cancel_work_sync(&dev->wed_rro.work);
1902 cancel_delayed_work_sync(&dev->mphy.mac_work);
1903 if (phy2) {
1904 set_bit(MT76_RESET, &phy2->mt76->state);
1905 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1906 }
1907 if (phy3) {
1908 set_bit(MT76_RESET, &phy3->mt76->state);
1909 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1910 }
1911 mt76_worker_disable(&dev->mt76.tx_worker);
1912 mt76_for_each_q_rx(&dev->mt76, i) {
1913 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1914 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1915 continue;
1916
1917 napi_disable(&dev->mt76.napi[i]);
1918 }
1919 napi_disable(&dev->mt76.tx_napi);
1920
1921 mutex_lock(&dev->mt76.mutex);
1922
1923 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1924
1925 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1926 mt7996_dma_reset(dev, false);
1927
1928 mt7996_tx_token_put(dev);
1929 idr_init(&dev->mt76.token);
1930
1931 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1932 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1933 }
1934
1935 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1936 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1937
1938 /* enable DMA Tx/Tx and interrupt */
1939 mt7996_dma_start(dev, false, false);
1940
1941 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1942 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
1943 dev->mt76.mmio.irqmask;
1944
1945 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
1946 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
1947
1948 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1949
1950 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
1951 true);
1952 mt7996_irq_enable(dev, wed_irq_mask);
1953 mt7996_irq_disable(dev, 0);
1954 }
1955
1956 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
1957 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
1958 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
1959 MT_INT_TX_RX_DONE_EXT);
1960 }
1961
1962 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1963 clear_bit(MT76_RESET, &dev->mphy.state);
1964 if (phy2)
1965 clear_bit(MT76_RESET, &phy2->mt76->state);
1966 if (phy3)
1967 clear_bit(MT76_RESET, &phy3->mt76->state);
1968
1969 local_bh_disable();
1970 mt76_for_each_q_rx(&dev->mt76, i) {
1971 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1972 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1973 continue;
1974
1975 napi_enable(&dev->mt76.napi[i]);
1976 napi_schedule(&dev->mt76.napi[i]);
1977 }
1978 local_bh_enable();
1979
1980 tasklet_schedule(&dev->mt76.irq_tasklet);
1981
1982 mt76_worker_enable(&dev->mt76.tx_worker);
1983
1984 local_bh_disable();
1985 napi_enable(&dev->mt76.tx_napi);
1986 napi_schedule(&dev->mt76.tx_napi);
1987 local_bh_enable();
1988
1989 ieee80211_wake_queues(mt76_hw(dev));
1990 if (phy2)
1991 ieee80211_wake_queues(phy2->mt76->hw);
1992 if (phy3)
1993 ieee80211_wake_queues(phy3->mt76->hw);
1994
1995 mutex_unlock(&dev->mt76.mutex);
1996
1997 mt7996_update_beacons(dev);
1998
1999 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
2000 MT7996_WATCHDOG_TIME);
2001 if (phy2)
2002 ieee80211_queue_delayed_work(phy2->mt76->hw,
2003 &phy2->mt76->mac_work,
2004 MT7996_WATCHDOG_TIME);
2005 if (phy3)
2006 ieee80211_queue_delayed_work(phy3->mt76->hw,
2007 &phy3->mt76->mac_work,
2008 MT7996_WATCHDOG_TIME);
2009 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2010 wiphy_name(dev->mt76.hw->wiphy));
2011 }
2012
2013 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2014 void mt7996_mac_dump_work(struct work_struct *work)
2015 {
2016 const struct mt7996_mem_region *mem_region;
2017 struct mt7996_crash_data *crash_data;
2018 struct mt7996_dev *dev;
2019 struct mt7996_mem_hdr *hdr;
2020 size_t buf_len;
2021 int i;
2022 u32 num;
2023 u8 *buf;
2024
2025 dev = container_of(work, struct mt7996_dev, dump_work);
2026
2027 mutex_lock(&dev->dump_mutex);
2028
2029 crash_data = mt7996_coredump_new(dev);
2030 if (!crash_data) {
2031 mutex_unlock(&dev->dump_mutex);
2032 goto skip_coredump;
2033 }
2034
2035 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2036 if (!mem_region || !crash_data->memdump_buf_len) {
2037 mutex_unlock(&dev->dump_mutex);
2038 goto skip_memdump;
2039 }
2040
2041 buf = crash_data->memdump_buf;
2042 buf_len = crash_data->memdump_buf_len;
2043
2044 /* dumping memory content... */
2045 memset(buf, 0, buf_len);
2046 for (i = 0; i < num; i++) {
2047 if (mem_region->len > buf_len) {
2048 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2049 mem_region->name, mem_region->len);
2050 break;
2051 }
2052
2053 /* reserve space for the header */
2054 hdr = (void *)buf;
2055 buf += sizeof(*hdr);
2056 buf_len -= sizeof(*hdr);
2057
2058 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2059 mem_region->len);
2060
2061 hdr->start = mem_region->start;
2062 hdr->len = mem_region->len;
2063
2064 if (!mem_region->len)
2065 /* note: the header remains, just with zero length */
2066 break;
2067
2068 buf += mem_region->len;
2069 buf_len -= mem_region->len;
2070
2071 mem_region++;
2072 }
2073
2074 mutex_unlock(&dev->dump_mutex);
2075
2076 skip_memdump:
2077 mt7996_coredump_submit(dev);
2078 skip_coredump:
2079 queue_work(dev->mt76.wq, &dev->reset_work);
2080 }
2081
mt7996_reset(struct mt7996_dev * dev)2082 void mt7996_reset(struct mt7996_dev *dev)
2083 {
2084 if (!dev->recovery.hw_init_done)
2085 return;
2086
2087 if (dev->recovery.hw_full_reset)
2088 return;
2089
2090 /* wm/wa exception: do full recovery */
2091 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2092 dev->recovery.restart = true;
2093 dev_info(dev->mt76.dev,
2094 "%s indicated firmware crash, attempting recovery\n",
2095 wiphy_name(dev->mt76.hw->wiphy));
2096
2097 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2098 queue_work(dev->mt76.wq, &dev->dump_work);
2099 return;
2100 }
2101
2102 queue_work(dev->mt76.wq, &dev->reset_work);
2103 wake_up(&dev->reset_wait);
2104 }
2105
mt7996_mac_update_stats(struct mt7996_phy * phy)2106 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2107 {
2108 struct mt76_mib_stats *mib = &phy->mib;
2109 struct mt7996_dev *dev = phy->dev;
2110 u8 band_idx = phy->mt76->band_idx;
2111 u32 cnt;
2112 int i;
2113
2114 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2115 mib->fcs_err_cnt += cnt;
2116
2117 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2118 mib->rx_fifo_full_cnt += cnt;
2119
2120 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2121 mib->rx_mpdu_cnt += cnt;
2122
2123 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2124 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2125
2126 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2127 mib->rx_vector_mismatch_cnt += cnt;
2128
2129 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2130 mib->rx_delimiter_fail_cnt += cnt;
2131
2132 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2133 mib->rx_len_mismatch_cnt += cnt;
2134
2135 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2136 mib->tx_ampdu_cnt += cnt;
2137
2138 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2139 mib->tx_stop_q_empty_cnt += cnt;
2140
2141 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2142 mib->tx_mpdu_attempts_cnt += cnt;
2143
2144 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2145 mib->tx_mpdu_success_cnt += cnt;
2146
2147 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2148 mib->rx_ampdu_cnt += cnt;
2149
2150 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2151 mib->rx_ampdu_bytes_cnt += cnt;
2152
2153 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2154 mib->rx_ampdu_valid_subframe_cnt += cnt;
2155
2156 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2157 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2158
2159 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2160 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2161
2162 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2163 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2164
2165 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2166 mib->rx_pfdrop_cnt += cnt;
2167
2168 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2169 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2170
2171 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2172 mib->rx_ba_cnt += cnt;
2173
2174 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2175 mib->tx_bf_ebf_ppdu_cnt += cnt;
2176
2177 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2178 mib->tx_bf_ibf_ppdu_cnt += cnt;
2179
2180 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2181 mib->tx_mu_bf_cnt += cnt;
2182
2183 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2184 mib->tx_mu_mpdu_cnt += cnt;
2185
2186 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2187 mib->tx_mu_acked_mpdu_cnt += cnt;
2188
2189 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2190 mib->tx_su_acked_mpdu_cnt += cnt;
2191
2192 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2193 mib->tx_bf_rx_fb_ht_cnt += cnt;
2194 mib->tx_bf_rx_fb_all_cnt += cnt;
2195
2196 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2197 mib->tx_bf_rx_fb_vht_cnt += cnt;
2198 mib->tx_bf_rx_fb_all_cnt += cnt;
2199
2200 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2201 mib->tx_bf_rx_fb_he_cnt += cnt;
2202 mib->tx_bf_rx_fb_all_cnt += cnt;
2203
2204 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2205 mib->tx_bf_rx_fb_eht_cnt += cnt;
2206 mib->tx_bf_rx_fb_all_cnt += cnt;
2207
2208 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2209 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2210 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2211 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2212
2213 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2214 mib->tx_bf_fb_trig_cnt += cnt;
2215
2216 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2217 mib->tx_bf_fb_cpl_cnt += cnt;
2218
2219 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2220 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2221 mib->tx_amsdu[i] += cnt;
2222 mib->tx_amsdu_cnt += cnt;
2223 }
2224
2225 /* rts count */
2226 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2227 mib->rts_cnt += cnt;
2228
2229 /* rts retry count */
2230 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2231 mib->rts_retries_cnt += cnt;
2232
2233 /* ba miss count */
2234 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2235 mib->ba_miss_cnt += cnt;
2236
2237 /* ack fail count */
2238 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2239 mib->ack_fail_cnt += cnt;
2240
2241 for (i = 0; i < 16; i++) {
2242 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2243 phy->mt76->aggr_stats[i] += cnt;
2244 }
2245 }
2246
mt7996_mac_sta_rc_work(struct work_struct * work)2247 void mt7996_mac_sta_rc_work(struct work_struct *work)
2248 {
2249 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2250 struct ieee80211_sta *sta;
2251 struct ieee80211_vif *vif;
2252 struct mt7996_sta *msta;
2253 u32 changed;
2254 LIST_HEAD(list);
2255
2256 spin_lock_bh(&dev->mt76.sta_poll_lock);
2257 list_splice_init(&dev->sta_rc_list, &list);
2258
2259 while (!list_empty(&list)) {
2260 msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2261 list_del_init(&msta->rc_list);
2262 changed = msta->changed;
2263 msta->changed = 0;
2264 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2265
2266 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2267 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2268
2269 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2270 IEEE80211_RC_NSS_CHANGED |
2271 IEEE80211_RC_BW_CHANGED))
2272 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2273
2274 if (changed & IEEE80211_RC_SMPS_CHANGED)
2275 mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
2276 RATE_PARAM_MMPS_UPDATE);
2277
2278 spin_lock_bh(&dev->mt76.sta_poll_lock);
2279 }
2280
2281 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2282 }
2283
mt7996_mac_work(struct work_struct * work)2284 void mt7996_mac_work(struct work_struct *work)
2285 {
2286 struct mt7996_phy *phy;
2287 struct mt76_phy *mphy;
2288
2289 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2290 mac_work.work);
2291 phy = mphy->priv;
2292
2293 mutex_lock(&mphy->dev->mutex);
2294
2295 mt76_update_survey(mphy);
2296 if (++mphy->mac_work_count == 5) {
2297 mphy->mac_work_count = 0;
2298
2299 mt7996_mac_update_stats(phy);
2300
2301 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2302 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2303 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2304 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2305 }
2306 }
2307
2308 mutex_unlock(&mphy->dev->mutex);
2309
2310 mt76_tx_status_check(mphy->dev, false);
2311
2312 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2313 MT7996_WATCHDOG_TIME);
2314 }
2315
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2316 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2317 {
2318 struct mt7996_dev *dev = phy->dev;
2319
2320 if (phy->rdd_state & BIT(0))
2321 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2322 MT_RX_SEL0, 0);
2323 if (phy->rdd_state & BIT(1))
2324 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2325 MT_RX_SEL0, 0);
2326 }
2327
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int chain)2328 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2329 {
2330 int err, region;
2331
2332 switch (dev->mt76.region) {
2333 case NL80211_DFS_ETSI:
2334 region = 0;
2335 break;
2336 case NL80211_DFS_JP:
2337 region = 2;
2338 break;
2339 case NL80211_DFS_FCC:
2340 default:
2341 region = 1;
2342 break;
2343 }
2344
2345 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2346 MT_RX_SEL0, region);
2347 if (err < 0)
2348 return err;
2349
2350 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2351 MT_RX_SEL0, 1);
2352 }
2353
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2354 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2355 {
2356 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2357 struct mt7996_dev *dev = phy->dev;
2358 u8 band_idx = phy->mt76->band_idx;
2359 int err;
2360
2361 /* start CAC */
2362 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2363 MT_RX_SEL0, 0);
2364 if (err < 0)
2365 return err;
2366
2367 err = mt7996_dfs_start_rdd(dev, band_idx);
2368 if (err < 0)
2369 return err;
2370
2371 phy->rdd_state |= BIT(band_idx);
2372
2373 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2374 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2375 err = mt7996_dfs_start_rdd(dev, 1);
2376 if (err < 0)
2377 return err;
2378
2379 phy->rdd_state |= BIT(1);
2380 }
2381
2382 return 0;
2383 }
2384
2385 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2386 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2387 {
2388 const struct mt7996_dfs_radar_spec *radar_specs;
2389 struct mt7996_dev *dev = phy->dev;
2390 int err, i;
2391
2392 switch (dev->mt76.region) {
2393 case NL80211_DFS_FCC:
2394 radar_specs = &fcc_radar_specs;
2395 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2396 if (err < 0)
2397 return err;
2398 break;
2399 case NL80211_DFS_ETSI:
2400 radar_specs = &etsi_radar_specs;
2401 break;
2402 case NL80211_DFS_JP:
2403 radar_specs = &jp_radar_specs;
2404 break;
2405 default:
2406 return -EINVAL;
2407 }
2408
2409 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2410 err = mt7996_mcu_set_radar_th(dev, i,
2411 &radar_specs->radar_pattern[i]);
2412 if (err < 0)
2413 return err;
2414 }
2415
2416 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2417 }
2418
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2419 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2420 {
2421 struct mt7996_dev *dev = phy->dev;
2422 enum mt76_dfs_state dfs_state, prev_state;
2423 int err;
2424
2425 prev_state = phy->mt76->dfs_state;
2426 dfs_state = mt76_phy_dfs_state(phy->mt76);
2427
2428 if (prev_state == dfs_state)
2429 return 0;
2430
2431 if (prev_state == MT_DFS_STATE_UNKNOWN)
2432 mt7996_dfs_stop_radar_detector(phy);
2433
2434 if (dfs_state == MT_DFS_STATE_DISABLED)
2435 goto stop;
2436
2437 if (prev_state <= MT_DFS_STATE_DISABLED) {
2438 err = mt7996_dfs_init_radar_specs(phy);
2439 if (err < 0)
2440 return err;
2441
2442 err = mt7996_dfs_start_radar_detector(phy);
2443 if (err < 0)
2444 return err;
2445
2446 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2447 }
2448
2449 if (dfs_state == MT_DFS_STATE_CAC)
2450 return 0;
2451
2452 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2453 phy->mt76->band_idx, MT_RX_SEL0, 0);
2454 if (err < 0) {
2455 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2456 return err;
2457 }
2458
2459 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2460 return 0;
2461
2462 stop:
2463 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2464 phy->mt76->band_idx, MT_RX_SEL0, 0);
2465 if (err < 0)
2466 return err;
2467
2468 mt7996_dfs_stop_radar_detector(phy);
2469 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2470
2471 return 0;
2472 }
2473
2474 static int
mt7996_mac_twt_duration_align(int duration)2475 mt7996_mac_twt_duration_align(int duration)
2476 {
2477 return duration << 8;
2478 }
2479
2480 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2481 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2482 struct mt7996_twt_flow *flow)
2483 {
2484 struct mt7996_twt_flow *iter, *iter_next;
2485 u32 duration = flow->duration << 8;
2486 u64 start_tsf;
2487
2488 iter = list_first_entry_or_null(&dev->twt_list,
2489 struct mt7996_twt_flow, list);
2490 if (!iter || !iter->sched || iter->start_tsf > duration) {
2491 /* add flow as first entry in the list */
2492 list_add(&flow->list, &dev->twt_list);
2493 return 0;
2494 }
2495
2496 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2497 start_tsf = iter->start_tsf +
2498 mt7996_mac_twt_duration_align(iter->duration);
2499 if (list_is_last(&iter->list, &dev->twt_list))
2500 break;
2501
2502 if (!iter_next->sched ||
2503 iter_next->start_tsf > start_tsf + duration) {
2504 list_add(&flow->list, &iter->list);
2505 goto out;
2506 }
2507 }
2508
2509 /* add flow as last entry in the list */
2510 list_add_tail(&flow->list, &dev->twt_list);
2511 out:
2512 return start_tsf;
2513 }
2514
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2515 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2516 {
2517 struct ieee80211_twt_params *twt_agrt;
2518 u64 interval, duration;
2519 u16 mantissa;
2520 u8 exp;
2521
2522 /* only individual agreement supported */
2523 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2524 return -EOPNOTSUPP;
2525
2526 /* only 256us unit supported */
2527 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2528 return -EOPNOTSUPP;
2529
2530 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2531
2532 /* explicit agreement not supported */
2533 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2534 return -EOPNOTSUPP;
2535
2536 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2537 le16_to_cpu(twt_agrt->req_type));
2538 mantissa = le16_to_cpu(twt_agrt->mantissa);
2539 duration = twt_agrt->min_twt_dur << 8;
2540
2541 interval = (u64)mantissa << exp;
2542 if (interval < duration)
2543 return -EOPNOTSUPP;
2544
2545 return 0;
2546 }
2547
2548 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta * msta,struct ieee80211_twt_params * twt_agrt)2549 mt7996_mac_twt_param_equal(struct mt7996_sta *msta,
2550 struct ieee80211_twt_params *twt_agrt)
2551 {
2552 u16 type = le16_to_cpu(twt_agrt->req_type);
2553 u8 exp;
2554 int i;
2555
2556 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2557 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
2558 struct mt7996_twt_flow *f;
2559
2560 if (!(msta->twt.flowid_mask & BIT(i)))
2561 continue;
2562
2563 f = &msta->twt.flow[i];
2564 if (f->duration == twt_agrt->min_twt_dur &&
2565 f->mantissa == twt_agrt->mantissa &&
2566 f->exp == exp &&
2567 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2568 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2569 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2570 return true;
2571 }
2572
2573 return false;
2574 }
2575
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2576 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2577 struct ieee80211_sta *sta,
2578 struct ieee80211_twt_setup *twt)
2579 {
2580 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2581 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2582 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2583 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2584 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2585 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2586 struct mt7996_twt_flow *flow;
2587 u8 flowid, table_id, exp;
2588
2589 if (mt7996_mac_check_twt_req(twt))
2590 goto out;
2591
2592 mutex_lock(&dev->mt76.mutex);
2593
2594 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2595 goto unlock;
2596
2597 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2598 goto unlock;
2599
2600 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
2601 setup_cmd = TWT_SETUP_CMD_DICTATE;
2602 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
2603 goto unlock;
2604 }
2605
2606 if (mt7996_mac_twt_param_equal(msta, twt_agrt))
2607 goto unlock;
2608
2609 flowid = ffs(~msta->twt.flowid_mask) - 1;
2610 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2611 twt_agrt->req_type |= le16_encode_bits(flowid,
2612 IEEE80211_TWT_REQTYPE_FLOWID);
2613
2614 table_id = ffs(~dev->twt.table_mask) - 1;
2615 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2616 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2617
2618 flow = &msta->twt.flow[flowid];
2619 memset(flow, 0, sizeof(*flow));
2620 INIT_LIST_HEAD(&flow->list);
2621 flow->wcid = msta->wcid.idx;
2622 flow->table_id = table_id;
2623 flow->id = flowid;
2624 flow->duration = twt_agrt->min_twt_dur;
2625 flow->mantissa = twt_agrt->mantissa;
2626 flow->exp = exp;
2627 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2628 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2629 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2630
2631 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2632 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2633 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2634 u64 flow_tsf, curr_tsf;
2635 u32 rem;
2636
2637 flow->sched = true;
2638 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2639 curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2640 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2641 flow_tsf = curr_tsf + interval - rem;
2642 twt_agrt->twt = cpu_to_le64(flow_tsf);
2643 } else {
2644 list_add_tail(&flow->list, &dev->twt_list);
2645 }
2646 flow->tsf = le64_to_cpu(twt_agrt->twt);
2647
2648 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2649 goto unlock;
2650
2651 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2652 dev->twt.table_mask |= BIT(table_id);
2653 msta->twt.flowid_mask |= BIT(flowid);
2654 dev->twt.n_agrt++;
2655
2656 unlock:
2657 mutex_unlock(&dev->mt76.mutex);
2658 out:
2659 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2660 twt_agrt->req_type |=
2661 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2662 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
2663 }
2664
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_sta * msta,u8 flowid)2665 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2666 struct mt7996_sta *msta,
2667 u8 flowid)
2668 {
2669 struct mt7996_twt_flow *flow;
2670
2671 lockdep_assert_held(&dev->mt76.mutex);
2672
2673 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2674 return;
2675
2676 if (!(msta->twt.flowid_mask & BIT(flowid)))
2677 return;
2678
2679 flow = &msta->twt.flow[flowid];
2680 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2681 MCU_TWT_AGRT_DELETE))
2682 return;
2683
2684 list_del_init(&flow->list);
2685 msta->twt.flowid_mask &= ~BIT(flowid);
2686 dev->twt.table_mask &= ~BIT(flow->table_id);
2687 dev->twt.n_agrt--;
2688 }
2689