• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: ISC
2 /* Copyright (C) 2020 MediaTek Inc. */
3 
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7921.h"
8 #include "../dma.h"
9 #include "mac.h"
10 #include "mcu.h"
11 
12 #define HE_BITS(f)		cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
13 #define HE_PREP(f, m, v)	le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
14 						 IEEE80211_RADIOTAP_HE_##f)
15 
mt7921_rx_get_wcid(struct mt7921_dev * dev,u16 idx,bool unicast)16 static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
17 					    u16 idx, bool unicast)
18 {
19 	struct mt7921_sta *sta;
20 	struct mt76_wcid *wcid;
21 
22 	if (idx >= ARRAY_SIZE(dev->mt76.wcid))
23 		return NULL;
24 
25 	wcid = rcu_dereference(dev->mt76.wcid[idx]);
26 	if (unicast || !wcid)
27 		return wcid;
28 
29 	if (!wcid->sta)
30 		return NULL;
31 
32 	sta = container_of(wcid, struct mt7921_sta, wcid);
33 	if (!sta->vif)
34 		return NULL;
35 
36 	return &sta->vif->sta.wcid;
37 }
38 
mt7921_sta_ps(struct mt76_dev * mdev,struct ieee80211_sta * sta,bool ps)39 void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
40 {
41 }
42 
mt7921_mac_wtbl_update(struct mt7921_dev * dev,int idx,u32 mask)43 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
44 {
45 	mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
46 		 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
47 
48 	return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
49 			 0, 5000);
50 }
51 
mt7921_mac_sta_poll(struct mt7921_dev * dev)52 static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
53 {
54 	static const u8 ac_to_tid[] = {
55 		[IEEE80211_AC_BE] = 0,
56 		[IEEE80211_AC_BK] = 1,
57 		[IEEE80211_AC_VI] = 4,
58 		[IEEE80211_AC_VO] = 6
59 	};
60 	struct ieee80211_sta *sta;
61 	struct mt7921_sta *msta;
62 	u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
63 	LIST_HEAD(sta_poll_list);
64 	int i;
65 
66 	spin_lock_bh(&dev->sta_poll_lock);
67 	list_splice_init(&dev->sta_poll_list, &sta_poll_list);
68 	spin_unlock_bh(&dev->sta_poll_lock);
69 
70 	rcu_read_lock();
71 
72 	while (true) {
73 		bool clear = false;
74 		u32 addr;
75 		u16 idx;
76 
77 		spin_lock_bh(&dev->sta_poll_lock);
78 		if (list_empty(&sta_poll_list)) {
79 			spin_unlock_bh(&dev->sta_poll_lock);
80 			break;
81 		}
82 		msta = list_first_entry(&sta_poll_list,
83 					struct mt7921_sta, poll_list);
84 		list_del_init(&msta->poll_list);
85 		spin_unlock_bh(&dev->sta_poll_lock);
86 
87 		idx = msta->wcid.idx;
88 		addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4;
89 
90 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
91 			u32 tx_last = msta->airtime_ac[i];
92 			u32 rx_last = msta->airtime_ac[i + 4];
93 
94 			msta->airtime_ac[i] = mt76_rr(dev, addr);
95 			msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
96 
97 			tx_time[i] = msta->airtime_ac[i] - tx_last;
98 			rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
99 
100 			if ((tx_last | rx_last) & BIT(30))
101 				clear = true;
102 
103 			addr += 8;
104 		}
105 
106 		if (clear) {
107 			mt7921_mac_wtbl_update(dev, idx,
108 					       MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
109 			memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
110 		}
111 
112 		if (!msta->wcid.sta)
113 			continue;
114 
115 		sta = container_of((void *)msta, struct ieee80211_sta,
116 				   drv_priv);
117 		for (i = 0; i < IEEE80211_NUM_ACS; i++) {
118 			u8 q = mt7921_lmac_mapping(dev, i);
119 			u32 tx_cur = tx_time[q];
120 			u32 rx_cur = rx_time[q];
121 			u8 tid = ac_to_tid[i];
122 
123 			if (!tx_cur && !rx_cur)
124 				continue;
125 
126 			ieee80211_sta_register_airtime(sta, tid, tx_cur,
127 						       rx_cur);
128 		}
129 	}
130 
131 	rcu_read_unlock();
132 }
133 
134 static void
mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status * status,struct ieee80211_radiotap_he * he,__le32 * rxv)135 mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
136 				 struct ieee80211_radiotap_he *he,
137 				 __le32 *rxv)
138 {
139 	u32 ru_h, ru_l;
140 	u8 ru, offs = 0;
141 
142 	ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
143 	ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
144 	ru = (u8)(ru_l | ru_h << 4);
145 
146 	status->bw = RATE_INFO_BW_HE_RU;
147 
148 	switch (ru) {
149 	case 0 ... 36:
150 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
151 		offs = ru;
152 		break;
153 	case 37 ... 52:
154 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
155 		offs = ru - 37;
156 		break;
157 	case 53 ... 60:
158 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
159 		offs = ru - 53;
160 		break;
161 	case 61 ... 64:
162 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
163 		offs = ru - 61;
164 		break;
165 	case 65 ... 66:
166 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
167 		offs = ru - 65;
168 		break;
169 	case 67:
170 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
171 		break;
172 	case 68:
173 		status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
174 		break;
175 	}
176 
177 	he->data1 |= HE_BITS(DATA1_BW_RU_ALLOC_KNOWN);
178 	he->data2 |= HE_BITS(DATA2_RU_OFFSET_KNOWN) |
179 		     le16_encode_bits(offs,
180 				      IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
181 }
182 
183 static void
mt7921_mac_decode_he_mu_radiotap(struct sk_buff * skb,struct mt76_rx_status * status,__le32 * rxv)184 mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb,
185 				 struct mt76_rx_status *status,
186 				 __le32 *rxv)
187 {
188 	static const struct ieee80211_radiotap_he_mu mu_known = {
189 		.flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
190 			  HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
191 			  HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
192 			  HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN) |
193 			  HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN),
194 		.flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN) |
195 			  HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
196 	};
197 	struct ieee80211_radiotap_he_mu *he_mu = NULL;
198 
199 	he_mu = skb_push(skb, sizeof(mu_known));
200 	memcpy(he_mu, &mu_known, sizeof(mu_known));
201 
202 #define MU_PREP(f, v)	le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
203 
204 	he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
205 	if (status->he_dcm)
206 		he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
207 
208 	he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
209 			 MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
210 				 le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
211 
212 	he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, cpu_to_le32(rxv[3]));
213 
214 	if (status->bw >= RATE_INFO_BW_40) {
215 		he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
216 		he_mu->ru_ch2[0] =
217 			FIELD_GET(MT_CRXV_HE_RU1, cpu_to_le32(rxv[3]));
218 	}
219 
220 	if (status->bw >= RATE_INFO_BW_80) {
221 		he_mu->ru_ch1[1] =
222 			FIELD_GET(MT_CRXV_HE_RU2, cpu_to_le32(rxv[3]));
223 		he_mu->ru_ch2[1] =
224 			FIELD_GET(MT_CRXV_HE_RU3, cpu_to_le32(rxv[3]));
225 	}
226 }
227 
228 static void
mt7921_mac_decode_he_radiotap(struct sk_buff * skb,struct mt76_rx_status * status,__le32 * rxv,u32 phy)229 mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
230 			      struct mt76_rx_status *status,
231 			      __le32 *rxv, u32 phy)
232 {
233 	static const struct ieee80211_radiotap_he known = {
234 		.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
235 			 HE_BITS(DATA1_DATA_DCM_KNOWN) |
236 			 HE_BITS(DATA1_STBC_KNOWN) |
237 			 HE_BITS(DATA1_CODING_KNOWN) |
238 			 HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
239 			 HE_BITS(DATA1_DOPPLER_KNOWN) |
240 			 HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
241 			 HE_BITS(DATA1_BSS_COLOR_KNOWN),
242 		.data2 = HE_BITS(DATA2_GI_KNOWN) |
243 			 HE_BITS(DATA2_TXBF_KNOWN) |
244 			 HE_BITS(DATA2_PE_DISAMBIG_KNOWN) |
245 			 HE_BITS(DATA2_TXOP_KNOWN),
246 	};
247 	struct ieee80211_radiotap_he *he = NULL;
248 	u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
249 
250 	he = skb_push(skb, sizeof(known));
251 	memcpy(he, &known, sizeof(known));
252 
253 	he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
254 		    HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
255 	he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
256 	he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
257 		    le16_encode_bits(ltf_size,
258 				     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
259 	if (cpu_to_le32(rxv[0]) & MT_PRXV_TXBF)
260 		he->data5 |= HE_BITS(DATA5_TXBF);
261 	he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
262 		    HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
263 
264 	switch (phy) {
265 	case MT_PHY_TYPE_HE_SU:
266 		he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
267 			     HE_BITS(DATA1_UL_DL_KNOWN) |
268 			     HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
269 
270 		he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
271 			     HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
272 		he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
273 		break;
274 	case MT_PHY_TYPE_HE_EXT_SU:
275 		he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
276 			     HE_BITS(DATA1_UL_DL_KNOWN);
277 
278 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
279 		break;
280 	case MT_PHY_TYPE_HE_MU:
281 		he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
282 			     HE_BITS(DATA1_UL_DL_KNOWN);
283 
284 		he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
285 		he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
286 
287 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
288 		break;
289 	case MT_PHY_TYPE_HE_TB:
290 		he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
291 			     HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
292 			     HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
293 			     HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
294 
295 		he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
296 			     HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
297 			     HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
298 			     HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
299 
300 		mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
301 		break;
302 	default:
303 		break;
304 	}
305 }
306 
307 static void
mt7921_get_status_freq_info(struct mt7921_dev * dev,struct mt76_phy * mphy,struct mt76_rx_status * status,u8 chfreq)308 mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
309 			    struct mt76_rx_status *status, u8 chfreq)
310 {
311 	if (!test_bit(MT76_HW_SCANNING, &mphy->state) &&
312 	    !test_bit(MT76_HW_SCHED_SCANNING, &mphy->state) &&
313 	    !test_bit(MT76_STATE_ROC, &mphy->state)) {
314 		status->freq = mphy->chandef.chan->center_freq;
315 		status->band = mphy->chandef.chan->band;
316 		return;
317 	}
318 
319 	status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
320 	status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
321 }
322 
323 static void
mt7921_mac_rssi_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)324 mt7921_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
325 {
326 	struct sk_buff *skb = priv;
327 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
328 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
329 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
330 
331 	if (status->signal > 0)
332 		return;
333 
334 	if (!ether_addr_equal(vif->addr, hdr->addr1))
335 		return;
336 
337 	ewma_rssi_add(&mvif->rssi, -status->signal);
338 }
339 
340 static void
mt7921_mac_assoc_rssi(struct mt7921_dev * dev,struct sk_buff * skb)341 mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
342 {
343 	struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
344 
345 	if (!ieee80211_is_assoc_resp(hdr->frame_control) &&
346 	    !ieee80211_is_auth(hdr->frame_control))
347 		return;
348 
349 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
350 		IEEE80211_IFACE_ITER_RESUME_ALL,
351 		mt7921_mac_rssi_iter, skb);
352 }
353 
mt7921_mac_fill_rx(struct mt7921_dev * dev,struct sk_buff * skb)354 int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
355 {
356 	u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
357 	struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
358 	bool hdr_trans, unicast, insert_ccmp_hdr = false;
359 	u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
360 	__le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
361 	struct mt76_phy *mphy = &dev->mt76.phy;
362 	struct mt7921_phy *phy = &dev->phy;
363 	struct ieee80211_supported_band *sband;
364 	struct ieee80211_hdr *hdr;
365 	u32 rxd0 = le32_to_cpu(rxd[0]);
366 	u32 rxd1 = le32_to_cpu(rxd[1]);
367 	u32 rxd2 = le32_to_cpu(rxd[2]);
368 	u32 rxd3 = le32_to_cpu(rxd[3]);
369 	u32 rxd4 = le32_to_cpu(rxd[4]);
370 	u16 seq_ctrl = 0;
371 	__le16 fc = 0;
372 	u32 mode = 0;
373 	int i, idx;
374 
375 	memset(status, 0, sizeof(*status));
376 
377 	if (rxd1 & MT_RXD1_NORMAL_BAND_IDX)
378 		return -EINVAL;
379 
380 	if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
381 		return -EINVAL;
382 
383 	if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
384 		return -EINVAL;
385 
386 	hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
387 	if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
388 		return -EINVAL;
389 
390 	/* ICV error or CCMP/BIP/WPI MIC error */
391 	if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
392 		status->flag |= RX_FLAG_ONLY_MONITOR;
393 
394 	chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
395 	unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
396 	idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
397 	status->wcid = mt7921_rx_get_wcid(dev, idx, unicast);
398 
399 	if (status->wcid) {
400 		struct mt7921_sta *msta;
401 
402 		msta = container_of(status->wcid, struct mt7921_sta, wcid);
403 		spin_lock_bh(&dev->sta_poll_lock);
404 		if (list_empty(&msta->poll_list))
405 			list_add_tail(&msta->poll_list, &dev->sta_poll_list);
406 		spin_unlock_bh(&dev->sta_poll_lock);
407 	}
408 
409 	mt7921_get_status_freq_info(dev, mphy, status, chfreq);
410 
411 	if (status->band == NL80211_BAND_5GHZ)
412 		sband = &mphy->sband_5g.sband;
413 	else
414 		sband = &mphy->sband_2g.sband;
415 
416 	if (!sband->channels)
417 		return -EINVAL;
418 
419 	if ((rxd0 & csum_mask) == csum_mask)
420 		skb->ip_summed = CHECKSUM_UNNECESSARY;
421 
422 	if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
423 		status->flag |= RX_FLAG_FAILED_FCS_CRC;
424 
425 	if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
426 		status->flag |= RX_FLAG_MMIC_ERROR;
427 
428 	if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
429 	    !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
430 		status->flag |= RX_FLAG_DECRYPTED;
431 		status->flag |= RX_FLAG_IV_STRIPPED;
432 		status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
433 	}
434 
435 	remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
436 
437 	if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
438 		return -EINVAL;
439 
440 	rxd += 6;
441 	if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
442 		u32 v0 = le32_to_cpu(rxd[0]);
443 		u32 v2 = le32_to_cpu(rxd[2]);
444 
445 		fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
446 		seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
447 		qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
448 
449 		rxd += 4;
450 		if ((u8 *)rxd - skb->data >= skb->len)
451 			return -EINVAL;
452 	}
453 
454 	if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
455 		u8 *data = (u8 *)rxd;
456 
457 		if (status->flag & RX_FLAG_DECRYPTED) {
458 			switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
459 			case MT_CIPHER_AES_CCMP:
460 			case MT_CIPHER_CCMP_CCX:
461 			case MT_CIPHER_CCMP_256:
462 				insert_ccmp_hdr =
463 					FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
464 				fallthrough;
465 			case MT_CIPHER_TKIP:
466 			case MT_CIPHER_TKIP_NO_MIC:
467 			case MT_CIPHER_GCMP:
468 			case MT_CIPHER_GCMP_256:
469 				status->iv[0] = data[5];
470 				status->iv[1] = data[4];
471 				status->iv[2] = data[3];
472 				status->iv[3] = data[2];
473 				status->iv[4] = data[1];
474 				status->iv[5] = data[0];
475 				break;
476 			default:
477 				break;
478 			}
479 		}
480 		rxd += 4;
481 		if ((u8 *)rxd - skb->data >= skb->len)
482 			return -EINVAL;
483 	}
484 
485 	if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
486 		status->timestamp = le32_to_cpu(rxd[0]);
487 		status->flag |= RX_FLAG_MACTIME_START;
488 
489 		if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
490 			status->flag |= RX_FLAG_AMPDU_DETAILS;
491 
492 			/* all subframes of an A-MPDU have the same timestamp */
493 			if (phy->rx_ampdu_ts != status->timestamp) {
494 				if (!++phy->ampdu_ref)
495 					phy->ampdu_ref++;
496 			}
497 			phy->rx_ampdu_ts = status->timestamp;
498 
499 			status->ampdu_ref = phy->ampdu_ref;
500 		}
501 
502 		rxd += 2;
503 		if ((u8 *)rxd - skb->data >= skb->len)
504 			return -EINVAL;
505 	}
506 
507 	/* RXD Group 3 - P-RXV */
508 	if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
509 		u8 stbc, gi;
510 		u32 v0, v1;
511 		bool cck;
512 
513 		rxv = rxd;
514 		rxd += 2;
515 		if ((u8 *)rxd - skb->data >= skb->len)
516 			return -EINVAL;
517 
518 		v0 = le32_to_cpu(rxv[0]);
519 		v1 = le32_to_cpu(rxv[1]);
520 
521 		if (v0 & MT_PRXV_HT_AD_CODE)
522 			status->enc_flags |= RX_ENC_FLAG_LDPC;
523 
524 		status->chains = mphy->antenna_mask;
525 		status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
526 		status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
527 		status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
528 		status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
529 		status->signal = -128;
530 		for (i = 0; i < hweight8(mphy->antenna_mask); i++) {
531 			if (!(status->chains & BIT(i)) ||
532 			    status->chain_signal[i] >= 0)
533 				continue;
534 
535 			status->signal = max(status->signal,
536 					     status->chain_signal[i]);
537 		}
538 
539 		if (status->signal == -128)
540 			status->flag |= RX_FLAG_NO_SIGNAL_VAL;
541 
542 		stbc = FIELD_GET(MT_PRXV_STBC, v0);
543 		gi = FIELD_GET(MT_PRXV_SGI, v0);
544 		cck = false;
545 
546 		idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
547 		mode = FIELD_GET(MT_PRXV_TX_MODE, v0);
548 
549 		switch (mode) {
550 		case MT_PHY_TYPE_CCK:
551 			cck = true;
552 			fallthrough;
553 		case MT_PHY_TYPE_OFDM:
554 			i = mt76_get_rate(&dev->mt76, sband, i, cck);
555 			break;
556 		case MT_PHY_TYPE_HT_GF:
557 		case MT_PHY_TYPE_HT:
558 			status->encoding = RX_ENC_HT;
559 			if (i > 31)
560 				return -EINVAL;
561 			break;
562 		case MT_PHY_TYPE_VHT:
563 			status->nss =
564 				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
565 			status->encoding = RX_ENC_VHT;
566 			if (i > 11)
567 				return -EINVAL;
568 			break;
569 		case MT_PHY_TYPE_HE_MU:
570 			status->flag |= RX_FLAG_RADIOTAP_HE_MU;
571 			fallthrough;
572 		case MT_PHY_TYPE_HE_SU:
573 		case MT_PHY_TYPE_HE_EXT_SU:
574 		case MT_PHY_TYPE_HE_TB:
575 			status->nss =
576 				FIELD_GET(MT_PRXV_NSTS, v0) + 1;
577 			status->encoding = RX_ENC_HE;
578 			status->flag |= RX_FLAG_RADIOTAP_HE;
579 			i &= GENMASK(3, 0);
580 
581 			if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
582 				status->he_gi = gi;
583 
584 			status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
585 			break;
586 		default:
587 			return -EINVAL;
588 		}
589 
590 		status->rate_idx = i;
591 
592 		switch (FIELD_GET(MT_PRXV_FRAME_MODE, v0)) {
593 		case IEEE80211_STA_RX_BW_20:
594 			break;
595 		case IEEE80211_STA_RX_BW_40:
596 			if (mode & MT_PHY_TYPE_HE_EXT_SU &&
597 			    (idx & MT_PRXV_TX_ER_SU_106T)) {
598 				status->bw = RATE_INFO_BW_HE_RU;
599 				status->he_ru =
600 					NL80211_RATE_INFO_HE_RU_ALLOC_106;
601 			} else {
602 				status->bw = RATE_INFO_BW_40;
603 			}
604 			break;
605 		case IEEE80211_STA_RX_BW_80:
606 			status->bw = RATE_INFO_BW_80;
607 			break;
608 		case IEEE80211_STA_RX_BW_160:
609 			status->bw = RATE_INFO_BW_160;
610 			break;
611 		default:
612 			return -EINVAL;
613 		}
614 
615 		status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
616 		if (mode < MT_PHY_TYPE_HE_SU && gi)
617 			status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
618 
619 		if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
620 			rxd += 18;
621 			if ((u8 *)rxd - skb->data >= skb->len)
622 				return -EINVAL;
623 		}
624 	}
625 
626 	skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
627 
628 	amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
629 	status->amsdu = !!amsdu_info;
630 	if (status->amsdu) {
631 		status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
632 		status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
633 		if (!hdr_trans) {
634 			memmove(skb->data + 2, skb->data,
635 				ieee80211_get_hdrlen_from_skb(skb));
636 			skb_pull(skb, 2);
637 		}
638 	}
639 
640 	if (!hdr_trans) {
641 		if (insert_ccmp_hdr) {
642 			u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
643 
644 			mt76_insert_ccmp_hdr(skb, key_id);
645 		}
646 
647 		hdr = mt76_skb_get_hdr(skb);
648 		fc = hdr->frame_control;
649 		if (ieee80211_is_data_qos(fc)) {
650 			seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
651 			qos_ctl = *ieee80211_get_qos_ctl(hdr);
652 		}
653 	} else {
654 		status->flag &= ~(RX_FLAG_RADIOTAP_HE |
655 				  RX_FLAG_RADIOTAP_HE_MU);
656 		status->flag |= RX_FLAG_8023;
657 	}
658 
659 	mt7921_mac_assoc_rssi(dev, skb);
660 
661 	if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) {
662 		mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
663 
664 		if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
665 			mt7921_mac_decode_he_mu_radiotap(skb, status, rxv);
666 	}
667 
668 	if (!status->wcid || !ieee80211_is_data_qos(fc))
669 		return 0;
670 
671 	status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
672 	status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
673 	status->qos_ctl = qos_ctl;
674 
675 	return 0;
676 }
677 
678 static void
mt7921_mac_write_txwi_8023(struct mt7921_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)679 mt7921_mac_write_txwi_8023(struct mt7921_dev *dev, __le32 *txwi,
680 			   struct sk_buff *skb, struct mt76_wcid *wcid)
681 {
682 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
683 	u8 fc_type, fc_stype;
684 	u16 ethertype;
685 	bool wmm = false;
686 	u32 val;
687 
688 	if (wcid->sta) {
689 		struct ieee80211_sta *sta;
690 
691 		sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
692 		wmm = sta->wme;
693 	}
694 
695 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
696 	      FIELD_PREP(MT_TXD1_TID, tid);
697 
698 	ethertype = get_unaligned_be16(&skb->data[12]);
699 	if (ethertype >= ETH_P_802_3_MIN)
700 		val |= MT_TXD1_ETH_802_3;
701 
702 	txwi[1] |= cpu_to_le32(val);
703 
704 	fc_type = IEEE80211_FTYPE_DATA >> 2;
705 	fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
706 
707 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
708 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
709 
710 	txwi[2] |= cpu_to_le32(val);
711 
712 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
713 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
714 	txwi[7] |= cpu_to_le32(val);
715 }
716 
717 static void
mt7921_mac_write_txwi_80211(struct mt7921_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)718 mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
719 			    struct sk_buff *skb, struct ieee80211_key_conf *key)
720 {
721 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
722 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
723 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
724 	bool multicast = is_multicast_ether_addr(hdr->addr1);
725 	u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
726 	__le16 fc = hdr->frame_control;
727 	u8 fc_type, fc_stype;
728 	u32 val;
729 
730 	if (ieee80211_is_action(fc) &&
731 	    mgmt->u.action.category == WLAN_CATEGORY_BACK &&
732 	    mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
733 		u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
734 
735 		txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
736 		tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
737 	} else if (ieee80211_is_back_req(hdr->frame_control)) {
738 		struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
739 		u16 control = le16_to_cpu(bar->control);
740 
741 		tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
742 	}
743 
744 	val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
745 	      FIELD_PREP(MT_TXD1_HDR_INFO,
746 			 ieee80211_get_hdrlen_from_skb(skb) / 2) |
747 	      FIELD_PREP(MT_TXD1_TID, tid);
748 	txwi[1] |= cpu_to_le32(val);
749 
750 	fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
751 	fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
752 
753 	val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
754 	      FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
755 	      FIELD_PREP(MT_TXD2_MULTICAST, multicast);
756 
757 	if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
758 	    key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
759 		val |= MT_TXD2_BIP;
760 		txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
761 	}
762 
763 	if (!ieee80211_is_data(fc) || multicast)
764 		val |= MT_TXD2_FIX_RATE;
765 
766 	txwi[2] |= cpu_to_le32(val);
767 
768 	if (ieee80211_is_beacon(fc)) {
769 		txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
770 		txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
771 	}
772 
773 	if (info->flags & IEEE80211_TX_CTL_INJECTED) {
774 		u16 seqno = le16_to_cpu(hdr->seq_ctrl);
775 
776 		if (ieee80211_is_back_req(hdr->frame_control)) {
777 			struct ieee80211_bar *bar;
778 
779 			bar = (struct ieee80211_bar *)skb->data;
780 			seqno = le16_to_cpu(bar->start_seq_num);
781 		}
782 
783 		val = MT_TXD3_SN_VALID |
784 		      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
785 		txwi[3] |= cpu_to_le32(val);
786 	}
787 
788 	val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
789 	      FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
790 	txwi[7] |= cpu_to_le32(val);
791 }
792 
mt7921_update_txs(struct mt76_wcid * wcid,__le32 * txwi)793 static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
794 {
795 	struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
796 	u32 pid, frame_type;
797 
798 	frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, le32_to_cpu(txwi[2]));
799 	if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2)))
800 		return;
801 
802 	if (time_is_after_eq_jiffies(msta->next_txs_ts))
803 		return;
804 
805 	msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
806 	pid = mt76_get_next_pkt_id(wcid);
807 	txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
808 			       FIELD_PREP(MT_TXD5_PID, pid));
809 }
810 
mt7921_mac_write_txwi(struct mt7921_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,bool beacon)811 void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
812 			   struct sk_buff *skb, struct mt76_wcid *wcid,
813 			   struct ieee80211_key_conf *key, bool beacon)
814 {
815 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
816 	struct ieee80211_vif *vif = info->control.vif;
817 	struct mt76_phy *mphy = &dev->mphy;
818 	u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
819 	bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
820 	u16 tx_count = 15;
821 	u32 val;
822 
823 	if (vif) {
824 		struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
825 
826 		omac_idx = mvif->omac_idx;
827 		wmm_idx = mvif->wmm_idx;
828 	}
829 
830 	if (beacon) {
831 		p_fmt = MT_TX_TYPE_FW;
832 		q_idx = MT_LMAC_BCN0;
833 	} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
834 		p_fmt = MT_TX_TYPE_CT;
835 		q_idx = MT_LMAC_ALTX0;
836 	} else {
837 		p_fmt = MT_TX_TYPE_CT;
838 		q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
839 			mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
840 	}
841 
842 	val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
843 	      FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
844 	      FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
845 	txwi[0] = cpu_to_le32(val);
846 
847 	val = MT_TXD1_LONG_FORMAT |
848 	      FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
849 	      FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
850 
851 	txwi[1] = cpu_to_le32(val);
852 	txwi[2] = 0;
853 
854 	val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
855 	if (key)
856 		val |= MT_TXD3_PROTECT_FRAME;
857 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
858 		val |= MT_TXD3_NO_ACK;
859 
860 	txwi[3] = cpu_to_le32(val);
861 	txwi[4] = 0;
862 	txwi[5] = 0;
863 	txwi[6] = 0;
864 	txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
865 
866 	if (is_8023)
867 		mt7921_mac_write_txwi_8023(dev, txwi, skb, wcid);
868 	else
869 		mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
870 
871 	if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
872 		u16 rate;
873 
874 		/* hardware won't add HTC for mgmt/ctrl frame */
875 		txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
876 
877 		if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
878 			rate = MT7921_5G_RATE_DEFAULT;
879 		else
880 			rate = MT7921_2G_RATE_DEFAULT;
881 
882 		val = MT_TXD6_FIXED_BW |
883 		      FIELD_PREP(MT_TXD6_TX_RATE, rate);
884 		txwi[6] |= cpu_to_le32(val);
885 		txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
886 	}
887 
888 	mt7921_update_txs(wcid, txwi);
889 }
890 
891 static void
mt7921_write_hw_txp(struct mt7921_dev * dev,struct mt76_tx_info * tx_info,void * txp_ptr,u32 id)892 mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
893 		    void *txp_ptr, u32 id)
894 {
895 	struct mt7921_hw_txp *txp = txp_ptr;
896 	struct mt7921_txp_ptr *ptr = &txp->ptr[0];
897 	int i, nbuf = tx_info->nbuf - 1;
898 
899 	tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
900 	tx_info->nbuf = 1;
901 
902 	txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
903 
904 	for (i = 0; i < nbuf; i++) {
905 		u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
906 		u32 addr = tx_info->buf[i + 1].addr;
907 
908 		if (i == nbuf - 1)
909 			len |= MT_TXD_LEN_LAST;
910 
911 		if (i & 1) {
912 			ptr->buf1 = cpu_to_le32(addr);
913 			ptr->len1 = cpu_to_le16(len);
914 			ptr++;
915 		} else {
916 			ptr->buf0 = cpu_to_le32(addr);
917 			ptr->len0 = cpu_to_le16(len);
918 		}
919 	}
920 }
921 
mt7921_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)922 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
923 			  enum mt76_txq_id qid, struct mt76_wcid *wcid,
924 			  struct ieee80211_sta *sta,
925 			  struct mt76_tx_info *tx_info)
926 {
927 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
928 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
929 	struct ieee80211_key_conf *key = info->control.hw_key;
930 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
931 	struct mt76_txwi_cache *t;
932 	struct mt7921_txp_common *txp;
933 	int id;
934 	u8 *txwi = (u8 *)txwi_ptr;
935 
936 	if (unlikely(tx_info->skb->len <= ETH_HLEN))
937 		return -EINVAL;
938 
939 	if (!wcid)
940 		wcid = &dev->mt76.global_wcid;
941 
942 	cb->wcid = wcid->idx;
943 
944 	t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
945 	t->skb = tx_info->skb;
946 
947 	id = mt76_token_consume(mdev, &t);
948 	if (id < 0)
949 		return id;
950 
951 	mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
952 			      false);
953 
954 	txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
955 	memset(txp, 0, sizeof(struct mt7921_txp_common));
956 	mt7921_write_hw_txp(dev, tx_info, txp, id);
957 
958 	tx_info->skb = DMA_DUMMY_DATA;
959 
960 	return 0;
961 }
962 
963 static void
mt7921_tx_check_aggr(struct ieee80211_sta * sta,__le32 * txwi)964 mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
965 {
966 	struct mt7921_sta *msta;
967 	u16 fc, tid;
968 	u32 val;
969 
970 	if (!sta || !sta->ht_cap.ht_supported)
971 		return;
972 
973 	tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
974 	if (tid >= 6) /* skip VO queue */
975 		return;
976 
977 	val = le32_to_cpu(txwi[2]);
978 	fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
979 	     FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
980 	if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
981 		return;
982 
983 	msta = (struct mt7921_sta *)sta->drv_priv;
984 	if (!test_and_set_bit(tid, &msta->ampdu_state))
985 		ieee80211_start_tx_ba_session(sta, tid, 0);
986 }
987 
988 static void
mt7921_tx_complete_status(struct mt76_dev * mdev,struct sk_buff * skb,struct ieee80211_sta * sta,u8 stat,struct list_head * free_list)989 mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
990 			  struct ieee80211_sta *sta, u8 stat,
991 			  struct list_head *free_list)
992 {
993 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
994 	struct ieee80211_tx_status status = {
995 		.sta = sta,
996 		.info = info,
997 		.skb = skb,
998 		.free_list = free_list,
999 	};
1000 	struct ieee80211_hw *hw;
1001 
1002 	if (sta) {
1003 		struct mt7921_sta *msta;
1004 
1005 		msta = (struct mt7921_sta *)sta->drv_priv;
1006 		status.rate = &msta->stats.tx_rate;
1007 	}
1008 
1009 	hw = mt76_tx_status_get_hw(mdev, skb);
1010 
1011 	if (info->flags & IEEE80211_TX_CTL_AMPDU)
1012 		info->flags |= IEEE80211_TX_STAT_AMPDU;
1013 
1014 	if (stat)
1015 		ieee80211_tx_info_clear_status(info);
1016 
1017 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
1018 		info->flags |= IEEE80211_TX_STAT_ACK;
1019 
1020 	info->status.tx_time = 0;
1021 	ieee80211_tx_status_ext(hw, &status);
1022 }
1023 
mt7921_txp_skb_unmap(struct mt76_dev * dev,struct mt76_txwi_cache * t)1024 void mt7921_txp_skb_unmap(struct mt76_dev *dev,
1025 			  struct mt76_txwi_cache *t)
1026 {
1027 	struct mt7921_txp_common *txp;
1028 	int i;
1029 
1030 	txp = mt7921_txwi_to_txp(dev, t);
1031 
1032 	for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
1033 		struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
1034 		bool last;
1035 		u16 len;
1036 
1037 		len = le16_to_cpu(ptr->len0);
1038 		last = len & MT_TXD_LEN_LAST;
1039 		len &= MT_TXD_LEN_MASK;
1040 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
1041 				 DMA_TO_DEVICE);
1042 		if (last)
1043 			break;
1044 
1045 		len = le16_to_cpu(ptr->len1);
1046 		last = len & MT_TXD_LEN_LAST;
1047 		len &= MT_TXD_LEN_MASK;
1048 		dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
1049 				 DMA_TO_DEVICE);
1050 		if (last)
1051 			break;
1052 	}
1053 }
1054 
mt7921_mac_tx_free(struct mt7921_dev * dev,struct sk_buff * skb)1055 void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
1056 {
1057 	struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
1058 	struct mt76_dev *mdev = &dev->mt76;
1059 	struct mt76_txwi_cache *txwi;
1060 	struct ieee80211_sta *sta = NULL;
1061 	LIST_HEAD(free_list);
1062 	struct sk_buff *tmp;
1063 	bool wake = false;
1064 	u8 i, count;
1065 
1066 	/* clean DMA queues and unmap buffers first */
1067 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1068 	mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1069 
1070 	/* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
1071 	 * to the time ack is received or dropped by hw (air + hw queue time).
1072 	 * Should avoid accessing WTBL to get Tx airtime, and use it instead.
1073 	 */
1074 	count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
1075 	for (i = 0; i < count; i++) {
1076 		u32 msdu, info = le32_to_cpu(free->info[i]);
1077 		u8 stat;
1078 
1079 		/* 1'b1: new wcid pair.
1080 		 * 1'b0: msdu_id with the same 'wcid pair' as above.
1081 		 */
1082 		if (info & MT_TX_FREE_PAIR) {
1083 			struct mt7921_sta *msta;
1084 			struct mt7921_phy *phy;
1085 			struct mt76_wcid *wcid;
1086 			u16 idx;
1087 
1088 			count++;
1089 			idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
1090 			wcid = rcu_dereference(dev->mt76.wcid[idx]);
1091 			sta = wcid_to_sta(wcid);
1092 			if (!sta)
1093 				continue;
1094 
1095 			msta = container_of(wcid, struct mt7921_sta, wcid);
1096 			phy = msta->vif->phy;
1097 			spin_lock_bh(&dev->sta_poll_lock);
1098 			if (list_empty(&msta->stats_list))
1099 				list_add_tail(&msta->stats_list, &phy->stats_list);
1100 			if (list_empty(&msta->poll_list))
1101 				list_add_tail(&msta->poll_list, &dev->sta_poll_list);
1102 			spin_unlock_bh(&dev->sta_poll_lock);
1103 			continue;
1104 		}
1105 
1106 		msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
1107 		stat = FIELD_GET(MT_TX_FREE_STATUS, info);
1108 
1109 		txwi = mt76_token_release(mdev, msdu, &wake);
1110 		if (!txwi)
1111 			continue;
1112 
1113 		mt7921_txp_skb_unmap(mdev, txwi);
1114 		if (txwi->skb) {
1115 			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
1116 			void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
1117 
1118 			if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1119 				mt7921_tx_check_aggr(sta, txwi_ptr);
1120 
1121 			if (sta && !info->tx_time_est) {
1122 				struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
1123 				int pending;
1124 
1125 				pending = atomic_dec_return(&wcid->non_aql_packets);
1126 				if (pending < 0)
1127 					atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
1128 			}
1129 
1130 			mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
1131 			txwi->skb = NULL;
1132 		}
1133 
1134 		mt76_put_txwi(mdev, txwi);
1135 	}
1136 
1137 	if (wake)
1138 		mt76_set_tx_blocked(&dev->mt76, false);
1139 
1140 	napi_consume_skb(skb, 1);
1141 
1142 	list_for_each_entry_safe(skb, tmp, &free_list, list) {
1143 		skb_list_del_init(skb);
1144 		napi_consume_skb(skb, 1);
1145 	}
1146 
1147 	mt7921_mac_sta_poll(dev);
1148 	mt76_worker_schedule(&dev->mt76.tx_worker);
1149 }
1150 
mt7921_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue_entry * e)1151 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
1152 {
1153 	struct mt7921_dev *dev;
1154 
1155 	if (!e->txwi) {
1156 		dev_kfree_skb_any(e->skb);
1157 		return;
1158 	}
1159 
1160 	dev = container_of(mdev, struct mt7921_dev, mt76);
1161 
1162 	/* error path */
1163 	if (e->skb == DMA_DUMMY_DATA) {
1164 		struct mt76_txwi_cache *t;
1165 		struct mt7921_txp_common *txp;
1166 		u16 token;
1167 
1168 		txp = mt7921_txwi_to_txp(mdev, e->txwi);
1169 		token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
1170 		t = mt76_token_put(mdev, token);
1171 		e->skb = t ? t->skb : NULL;
1172 	}
1173 
1174 	if (e->skb) {
1175 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
1176 		struct mt76_wcid *wcid;
1177 
1178 		wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
1179 
1180 		mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
1181 					  NULL);
1182 	}
1183 }
1184 
mt7921_mac_reset_counters(struct mt7921_phy * phy)1185 void mt7921_mac_reset_counters(struct mt7921_phy *phy)
1186 {
1187 	struct mt7921_dev *dev = phy->dev;
1188 	int i;
1189 
1190 	for (i = 0; i < 4; i++) {
1191 		mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1192 		mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1193 	}
1194 
1195 	dev->mt76.phy.survey_time = ktime_get_boottime();
1196 	memset(&dev->mt76.aggr_stats[0], 0, sizeof(dev->mt76.aggr_stats) / 2);
1197 
1198 	/* reset airtime counters */
1199 	mt76_rr(dev, MT_MIB_SDR9(0));
1200 	mt76_rr(dev, MT_MIB_SDR36(0));
1201 	mt76_rr(dev, MT_MIB_SDR37(0));
1202 
1203 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1204 	mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1205 }
1206 
mt7921_mac_set_timing(struct mt7921_phy * phy)1207 void mt7921_mac_set_timing(struct mt7921_phy *phy)
1208 {
1209 	s16 coverage_class = phy->coverage_class;
1210 	struct mt7921_dev *dev = phy->dev;
1211 	u32 val, reg_offset;
1212 	u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1213 		  FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1214 	u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1215 		   FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1216 	int sifs, offset;
1217 	bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
1218 
1219 	if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1220 		return;
1221 
1222 	if (is_5ghz)
1223 		sifs = 16;
1224 	else
1225 		sifs = 10;
1226 
1227 	mt76_set(dev, MT_ARB_SCR(0),
1228 		 MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1229 	udelay(1);
1230 
1231 	offset = 3 * coverage_class;
1232 	reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1233 		     FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1234 
1235 	mt76_wr(dev, MT_TMAC_CDTR(0), cck + reg_offset);
1236 	mt76_wr(dev, MT_TMAC_ODTR(0), ofdm + reg_offset);
1237 	mt76_wr(dev, MT_TMAC_ICR0(0),
1238 		FIELD_PREP(MT_IFS_EIFS, 360) |
1239 		FIELD_PREP(MT_IFS_RIFS, 2) |
1240 		FIELD_PREP(MT_IFS_SIFS, sifs) |
1241 		FIELD_PREP(MT_IFS_SLOT, phy->slottime));
1242 
1243 	if (phy->slottime < 20 || is_5ghz)
1244 		val = MT7921_CFEND_RATE_DEFAULT;
1245 	else
1246 		val = MT7921_CFEND_RATE_11B;
1247 
1248 	mt76_rmw_field(dev, MT_AGG_ACR0(0), MT_AGG_ACR_CFEND_RATE, val);
1249 	mt76_clear(dev, MT_ARB_SCR(0),
1250 		   MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
1251 }
1252 
1253 static u8
mt7921_phy_get_nf(struct mt7921_phy * phy,int idx)1254 mt7921_phy_get_nf(struct mt7921_phy *phy, int idx)
1255 {
1256 	return 0;
1257 }
1258 
1259 static void
mt7921_phy_update_channel(struct mt76_phy * mphy,int idx)1260 mt7921_phy_update_channel(struct mt76_phy *mphy, int idx)
1261 {
1262 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1263 	struct mt7921_phy *phy = (struct mt7921_phy *)mphy->priv;
1264 	struct mt76_channel_state *state;
1265 	u64 busy_time, tx_time, rx_time, obss_time;
1266 	int nf;
1267 
1268 	busy_time = mt76_get_field(dev, MT_MIB_SDR9(idx),
1269 				   MT_MIB_SDR9_BUSY_MASK);
1270 	tx_time = mt76_get_field(dev, MT_MIB_SDR36(idx),
1271 				 MT_MIB_SDR36_TXTIME_MASK);
1272 	rx_time = mt76_get_field(dev, MT_MIB_SDR37(idx),
1273 				 MT_MIB_SDR37_RXTIME_MASK);
1274 	obss_time = mt76_get_field(dev, MT_WF_RMAC_MIB_AIRTIME14(idx),
1275 				   MT_MIB_OBSSTIME_MASK);
1276 
1277 	nf = mt7921_phy_get_nf(phy, idx);
1278 	if (!phy->noise)
1279 		phy->noise = nf << 4;
1280 	else if (nf)
1281 		phy->noise += nf - (phy->noise >> 4);
1282 
1283 	state = mphy->chan_state;
1284 	state->cc_busy += busy_time;
1285 	state->cc_tx += tx_time;
1286 	state->cc_rx += rx_time + obss_time;
1287 	state->cc_bss_rx += rx_time;
1288 	state->noise = -(phy->noise >> 4);
1289 }
1290 
mt7921_update_channel(struct mt76_phy * mphy)1291 void mt7921_update_channel(struct mt76_phy *mphy)
1292 {
1293 	struct mt7921_dev *dev = container_of(mphy->dev, struct mt7921_dev, mt76);
1294 
1295 	if (mt76_connac_pm_wake(mphy, &dev->pm))
1296 		return;
1297 
1298 	mt7921_phy_update_channel(mphy, 0);
1299 	/* reset obss airtime */
1300 	mt76_set(dev, MT_WF_RMAC_MIB_TIME0(0), MT_WF_RMAC_MIB_RXTIME_CLR);
1301 
1302 	mt76_connac_power_save_sched(mphy, &dev->pm);
1303 }
1304 
mt7921_tx_token_put(struct mt7921_dev * dev)1305 void mt7921_tx_token_put(struct mt7921_dev *dev)
1306 {
1307 	struct mt76_txwi_cache *txwi;
1308 	int id;
1309 
1310 	spin_lock_bh(&dev->mt76.token_lock);
1311 	idr_for_each_entry(&dev->mt76.token, txwi, id) {
1312 		mt7921_txp_skb_unmap(&dev->mt76, txwi);
1313 		if (txwi->skb) {
1314 			struct ieee80211_hw *hw;
1315 
1316 			hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
1317 			ieee80211_free_txskb(hw, txwi->skb);
1318 		}
1319 		mt76_put_txwi(&dev->mt76, txwi);
1320 		dev->mt76.token_count--;
1321 	}
1322 	spin_unlock_bh(&dev->mt76.token_lock);
1323 	idr_destroy(&dev->mt76.token);
1324 }
1325 
1326 static void
mt7921_vif_connect_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)1327 mt7921_vif_connect_iter(void *priv, u8 *mac,
1328 			struct ieee80211_vif *vif)
1329 {
1330 	struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
1331 	struct mt7921_dev *dev = mvif->phy->dev;
1332 
1333 	if (vif->type == NL80211_IFTYPE_STATION)
1334 		ieee80211_disconnect(vif, true);
1335 
1336 	mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
1337 	mt7921_mcu_set_tx(dev, vif);
1338 }
1339 
1340 static int
mt7921_mac_reset(struct mt7921_dev * dev)1341 mt7921_mac_reset(struct mt7921_dev *dev)
1342 {
1343 	int i, err;
1344 
1345 	mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
1346 
1347 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
1348 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1349 
1350 	set_bit(MT76_RESET, &dev->mphy.state);
1351 	set_bit(MT76_MCU_RESET, &dev->mphy.state);
1352 	wake_up(&dev->mt76.mcu.wait);
1353 	skb_queue_purge(&dev->mt76.mcu.res_q);
1354 
1355 	mt76_txq_schedule_all(&dev->mphy);
1356 
1357 	mt76_worker_disable(&dev->mt76.tx_worker);
1358 	napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
1359 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
1360 	napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
1361 	napi_disable(&dev->mt76.tx_napi);
1362 
1363 	mt7921_tx_token_put(dev);
1364 	idr_init(&dev->mt76.token);
1365 
1366 	mt7921_wpdma_reset(dev, true);
1367 
1368 	mt76_for_each_q_rx(&dev->mt76, i) {
1369 		napi_enable(&dev->mt76.napi[i]);
1370 		napi_schedule(&dev->mt76.napi[i]);
1371 	}
1372 
1373 	clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1374 
1375 	mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
1376 		MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
1377 		MT_INT_MCU_CMD);
1378 	mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1379 
1380 	err = mt7921_run_firmware(dev);
1381 	if (err)
1382 		goto out;
1383 
1384 	err = mt7921_mcu_set_eeprom(dev);
1385 	if (err)
1386 		goto out;
1387 
1388 	err = mt7921_mac_init(dev);
1389 	if (err)
1390 		goto out;
1391 
1392 	err = __mt7921_start(&dev->phy);
1393 out:
1394 	clear_bit(MT76_RESET, &dev->mphy.state);
1395 
1396 	napi_enable(&dev->mt76.tx_napi);
1397 	napi_schedule(&dev->mt76.tx_napi);
1398 	mt76_worker_enable(&dev->mt76.tx_worker);
1399 
1400 	return err;
1401 }
1402 
1403 /* system error recovery */
mt7921_mac_reset_work(struct work_struct * work)1404 void mt7921_mac_reset_work(struct work_struct *work)
1405 {
1406 	struct mt7921_dev *dev = container_of(work, struct mt7921_dev,
1407 					      reset_work);
1408 	struct ieee80211_hw *hw = mt76_hw(dev);
1409 	struct mt76_connac_pm *pm = &dev->pm;
1410 	int i;
1411 
1412 	dev_err(dev->mt76.dev, "chip reset\n");
1413 	dev->hw_full_reset = true;
1414 	ieee80211_stop_queues(hw);
1415 
1416 	cancel_delayed_work_sync(&dev->mphy.mac_work);
1417 	cancel_delayed_work_sync(&pm->ps_work);
1418 	cancel_work_sync(&pm->wake_work);
1419 
1420 	mutex_lock(&dev->mt76.mutex);
1421 	for (i = 0; i < 10; i++) {
1422 		__mt7921_mcu_drv_pmctrl(dev);
1423 
1424 		if (!mt7921_mac_reset(dev))
1425 			break;
1426 	}
1427 	mutex_unlock(&dev->mt76.mutex);
1428 
1429 	if (i == 10)
1430 		dev_err(dev->mt76.dev, "chip reset failed\n");
1431 
1432 	if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1433 		struct cfg80211_scan_info info = {
1434 			.aborted = true,
1435 		};
1436 
1437 		ieee80211_scan_completed(dev->mphy.hw, &info);
1438 	}
1439 
1440 	dev->hw_full_reset = false;
1441 	ieee80211_wake_queues(hw);
1442 	ieee80211_iterate_active_interfaces(hw,
1443 					    IEEE80211_IFACE_ITER_RESUME_ALL,
1444 					    mt7921_vif_connect_iter, NULL);
1445 	mt76_connac_power_save_sched(&dev->mt76.phy, pm);
1446 }
1447 
mt7921_reset(struct mt76_dev * mdev)1448 void mt7921_reset(struct mt76_dev *mdev)
1449 {
1450 	struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
1451 
1452 	if (!dev->hw_init_done)
1453 		return;
1454 
1455 	if (dev->hw_full_reset)
1456 		return;
1457 
1458 	queue_work(dev->mt76.wq, &dev->reset_work);
1459 }
1460 
1461 static void
mt7921_mac_update_mib_stats(struct mt7921_phy * phy)1462 mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
1463 {
1464 	struct mt7921_dev *dev = phy->dev;
1465 	struct mib_stats *mib = &phy->mib;
1466 	int i, aggr0 = 0, aggr1;
1467 
1468 	mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
1469 					   MT_MIB_SDR3_FCS_ERR_MASK);
1470 	mib->ack_fail_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR3(0),
1471 					    MT_MIB_ACK_FAIL_COUNT_MASK);
1472 	mib->ba_miss_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR2(0),
1473 					   MT_MIB_BA_FAIL_COUNT_MASK);
1474 	mib->rts_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR0(0),
1475 				       MT_MIB_RTS_COUNT_MASK);
1476 	mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
1477 					       MT_MIB_RTS_FAIL_COUNT_MASK);
1478 
1479 	for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
1480 		u32 val, val2;
1481 
1482 		val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
1483 		val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
1484 
1485 		dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
1486 		dev->mt76.aggr_stats[aggr0++] += val >> 16;
1487 		dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
1488 		dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
1489 	}
1490 }
1491 
mt7921_mac_work(struct work_struct * work)1492 void mt7921_mac_work(struct work_struct *work)
1493 {
1494 	struct mt7921_phy *phy;
1495 	struct mt76_phy *mphy;
1496 
1497 	mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
1498 					       mac_work.work);
1499 	phy = mphy->priv;
1500 
1501 	mt7921_mutex_acquire(phy->dev);
1502 
1503 	mt76_update_survey(mphy);
1504 	if (++mphy->mac_work_count == 2) {
1505 		mphy->mac_work_count = 0;
1506 
1507 		mt7921_mac_update_mib_stats(phy);
1508 	}
1509 
1510 	mt7921_mutex_release(phy->dev);
1511 	ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
1512 				     MT7921_WATCHDOG_TIME);
1513 }
1514 
mt7921_pm_wake_work(struct work_struct * work)1515 void mt7921_pm_wake_work(struct work_struct *work)
1516 {
1517 	struct mt7921_dev *dev;
1518 	struct mt76_phy *mphy;
1519 
1520 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1521 						pm.wake_work);
1522 	mphy = dev->phy.mt76;
1523 
1524 	if (!mt7921_mcu_drv_pmctrl(dev)) {
1525 		int i;
1526 
1527 		mt76_for_each_q_rx(&dev->mt76, i)
1528 			napi_schedule(&dev->mt76.napi[i]);
1529 		mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
1530 		mt7921_tx_cleanup(dev);
1531 		if (test_bit(MT76_STATE_RUNNING, &mphy->state))
1532 			ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
1533 						     MT7921_WATCHDOG_TIME);
1534 	}
1535 
1536 	ieee80211_wake_queues(mphy->hw);
1537 	wake_up(&dev->pm.wait);
1538 }
1539 
mt7921_pm_power_save_work(struct work_struct * work)1540 void mt7921_pm_power_save_work(struct work_struct *work)
1541 {
1542 	struct mt7921_dev *dev;
1543 	unsigned long delta;
1544 	struct mt76_phy *mphy;
1545 
1546 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1547 						pm.ps_work.work);
1548 	mphy = dev->phy.mt76;
1549 
1550 	delta = dev->pm.idle_timeout;
1551 	if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
1552 	    test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
1553 		goto out;
1554 
1555 	if (mutex_is_locked(&dev->mt76.mutex))
1556 		/* if mt76 mutex is held we should not put the device
1557 		 * to sleep since we are currently accessing device
1558 		 * register map. We need to wait for the next power_save
1559 		 * trigger.
1560 		 */
1561 		goto out;
1562 
1563 	if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
1564 		delta = dev->pm.last_activity + delta - jiffies;
1565 		goto out;
1566 	}
1567 
1568 	if (!mt7921_mcu_fw_pmctrl(dev)) {
1569 		cancel_delayed_work_sync(&mphy->mac_work);
1570 		return;
1571 	}
1572 out:
1573 	queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
1574 }
1575 
mt7921_coredump_work(struct work_struct * work)1576 void mt7921_coredump_work(struct work_struct *work)
1577 {
1578 	struct mt7921_dev *dev;
1579 	char *dump, *data;
1580 
1581 	dev = (struct mt7921_dev *)container_of(work, struct mt7921_dev,
1582 						coredump.work.work);
1583 
1584 	if (time_is_after_jiffies(dev->coredump.last_activity +
1585 				  4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1586 		queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1587 				   MT76_CONNAC_COREDUMP_TIMEOUT);
1588 		return;
1589 	}
1590 
1591 	dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1592 	data = dump;
1593 
1594 	while (true) {
1595 		struct sk_buff *skb;
1596 
1597 		spin_lock_bh(&dev->mt76.lock);
1598 		skb = __skb_dequeue(&dev->coredump.msg_list);
1599 		spin_unlock_bh(&dev->mt76.lock);
1600 
1601 		if (!skb)
1602 			break;
1603 
1604 		skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
1605 		if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1606 			dev_kfree_skb(skb);
1607 			continue;
1608 		}
1609 
1610 		memcpy(data, skb->data, skb->len);
1611 		data += skb->len;
1612 
1613 		dev_kfree_skb(skb);
1614 	}
1615 
1616 	if (dump)
1617 		dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1618 			      GFP_KERNEL);
1619 
1620 	mt7921_reset(&dev->mt76);
1621 }
1622