• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
mt76_txq_get_qid(struct ieee80211_txq * txq)9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
mt76_tx_check_agg_ssn(struct ieee80211_sta * sta,struct sk_buff * skb)18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
mt76_tx_status_lock(struct mt76_dev * dev,struct sk_buff_head * list)40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_list.lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_list.lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
mt76_tx_status_unlock(struct mt76_dev * dev,struct sk_buff_head * list)49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_list.lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_list.lock);
56 
57 	rcu_read_lock();
58 	while ((skb = __skb_dequeue(list)) != NULL) {
59 		struct ieee80211_tx_status status = {
60 			.skb = skb,
61 			.info = IEEE80211_SKB_CB(skb),
62 		};
63 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
64 		struct mt76_wcid *wcid;
65 
66 		wcid = rcu_dereference(dev->wcid[cb->wcid]);
67 		if (wcid)
68 			status.sta = wcid_to_sta(wcid);
69 
70 		hw = mt76_tx_status_get_hw(dev, skb);
71 		spin_lock_bh(&dev->rx_lock);
72 		ieee80211_tx_status_ext(hw, &status);
73 		spin_unlock_bh(&dev->rx_lock);
74 	}
75 	rcu_read_unlock();
76 }
77 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
78 
79 static void
__mt76_tx_status_skb_done(struct mt76_dev * dev,struct sk_buff * skb,u8 flags,struct sk_buff_head * list)80 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
81 			  struct sk_buff_head *list)
82 {
83 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
84 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
85 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
86 
87 	flags |= cb->flags;
88 	cb->flags = flags;
89 
90 	if ((flags & done) != done)
91 		return;
92 
93 	__skb_unlink(skb, &dev->status_list);
94 
95 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
96 	if (flags & MT_TX_CB_TXS_FAILED) {
97 		info->status.rates[0].count = 0;
98 		info->status.rates[0].idx = -1;
99 		info->flags |= IEEE80211_TX_STAT_ACK;
100 	}
101 
102 	__skb_queue_tail(list, skb);
103 }
104 
105 void
mt76_tx_status_skb_done(struct mt76_dev * dev,struct sk_buff * skb,struct sk_buff_head * list)106 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
107 			struct sk_buff_head *list)
108 {
109 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
110 }
111 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
112 
113 int
mt76_tx_status_skb_add(struct mt76_dev * dev,struct mt76_wcid * wcid,struct sk_buff * skb)114 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
115 		       struct sk_buff *skb)
116 {
117 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
118 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
119 	int pid;
120 
121 	if (!wcid)
122 		return MT_PACKET_ID_NO_ACK;
123 
124 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
125 		return MT_PACKET_ID_NO_ACK;
126 
127 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
128 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
129 		return MT_PACKET_ID_NO_SKB;
130 
131 	spin_lock_bh(&dev->status_list.lock);
132 
133 	memset(cb, 0, sizeof(*cb));
134 	pid = mt76_get_next_pkt_id(wcid);
135 	cb->wcid = wcid->idx;
136 	cb->pktid = pid;
137 	cb->jiffies = jiffies;
138 
139 	__skb_queue_tail(&dev->status_list, skb);
140 	spin_unlock_bh(&dev->status_list.lock);
141 
142 	return pid;
143 }
144 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
145 
146 struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev * dev,struct mt76_wcid * wcid,int pktid,struct sk_buff_head * list)147 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
148 		       struct sk_buff_head *list)
149 {
150 	struct sk_buff *skb, *tmp;
151 
152 	skb_queue_walk_safe(&dev->status_list, skb, tmp) {
153 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
154 
155 		if (wcid && cb->wcid != wcid->idx)
156 			continue;
157 
158 		if (cb->pktid == pktid)
159 			return skb;
160 
161 		if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
162 					      MT_TX_STATUS_SKB_TIMEOUT))
163 			continue;
164 
165 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
166 						    MT_TX_CB_TXS_DONE, list);
167 	}
168 
169 	return NULL;
170 }
171 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
172 
173 void
mt76_tx_status_check(struct mt76_dev * dev,struct mt76_wcid * wcid,bool flush)174 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
175 {
176 	struct sk_buff_head list;
177 
178 	mt76_tx_status_lock(dev, &list);
179 	mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
180 	mt76_tx_status_unlock(dev, &list);
181 }
182 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
183 
184 static void
mt76_tx_check_non_aql(struct mt76_dev * dev,struct mt76_wcid * wcid,struct sk_buff * skb)185 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
186 		      struct sk_buff *skb)
187 {
188 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
189 	int pending;
190 
191 	if (!wcid || info->tx_time_est)
192 		return;
193 
194 	pending = atomic_dec_return(&wcid->non_aql_packets);
195 	if (pending < 0)
196 		atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
197 }
198 
__mt76_tx_complete_skb(struct mt76_dev * dev,u16 wcid_idx,struct sk_buff * skb,struct list_head * free_list)199 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
200 			    struct list_head *free_list)
201 {
202 	struct ieee80211_tx_status status = {
203 		.skb = skb,
204 		.free_list = free_list,
205 	};
206 	struct mt76_wcid *wcid = NULL;
207 	struct ieee80211_hw *hw;
208 	struct sk_buff_head list;
209 
210 	rcu_read_lock();
211 
212 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
213 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
214 
215 	mt76_tx_check_non_aql(dev, wcid, skb);
216 
217 #ifdef CONFIG_NL80211_TESTMODE
218 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
219 		struct mt76_phy *phy = hw->priv;
220 
221 		if (skb == phy->test.tx_skb)
222 			phy->test.tx_done++;
223 		if (phy->test.tx_queued == phy->test.tx_done)
224 			wake_up(&dev->tx_wait);
225 
226 		dev_kfree_skb_any(skb);
227 		goto out;
228 	}
229 #endif
230 
231 	if (!skb->prev) {
232 		hw = mt76_tx_status_get_hw(dev, skb);
233 		status.sta = wcid_to_sta(wcid);
234 		spin_lock_bh(&dev->rx_lock);
235 		ieee80211_tx_status_ext(hw, &status);
236 		spin_unlock_bh(&dev->rx_lock);
237 		goto out;
238 	}
239 
240 	mt76_tx_status_lock(dev, &list);
241 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
242 	mt76_tx_status_unlock(dev, &list);
243 
244 out:
245 	rcu_read_unlock();
246 }
247 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
248 
249 static int
__mt76_tx_queue_skb(struct mt76_phy * phy,int qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta,bool * stop)250 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
251 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
252 		    bool *stop)
253 {
254 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
255 	struct mt76_queue *q = phy->q_tx[qid];
256 	struct mt76_dev *dev = phy->dev;
257 	bool non_aql;
258 	int pending;
259 	int idx;
260 
261 	non_aql = !info->tx_time_est;
262 	idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
263 	if (idx < 0 || !sta)
264 		return idx;
265 
266 	wcid = (struct mt76_wcid *)sta->drv_priv;
267 	q->entry[idx].wcid = wcid->idx;
268 
269 	if (!non_aql)
270 		return idx;
271 
272 	pending = atomic_inc_return(&wcid->non_aql_packets);
273 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
274 		*stop = true;
275 
276 	return idx;
277 }
278 
279 void
mt76_tx(struct mt76_phy * phy,struct ieee80211_sta * sta,struct mt76_wcid * wcid,struct sk_buff * skb)280 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
281 	struct mt76_wcid *wcid, struct sk_buff *skb)
282 {
283 	struct mt76_dev *dev = phy->dev;
284 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
285 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
286 	struct mt76_queue *q;
287 	int qid = skb_get_queue_mapping(skb);
288 	bool ext_phy = phy != &dev->phy;
289 
290 	if (mt76_testmode_enabled(phy)) {
291 		ieee80211_free_txskb(phy->hw, skb);
292 		return;
293 	}
294 
295 	if (WARN_ON(qid >= MT_TXQ_PSD)) {
296 		qid = MT_TXQ_BE;
297 		skb_set_queue_mapping(skb, qid);
298 	}
299 
300 	if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
301 	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
302 	    !ieee80211_is_data(hdr->frame_control) &&
303 	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
304 		qid = MT_TXQ_PSD;
305 		skb_set_queue_mapping(skb, qid);
306 	}
307 
308 	if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
309 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
310 				       info->control.rates, 1);
311 
312 	if (ext_phy)
313 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
314 
315 	q = phy->q_tx[qid];
316 
317 	spin_lock_bh(&q->lock);
318 	__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
319 	dev->queue_ops->kick(dev, q);
320 	spin_unlock_bh(&q->lock);
321 }
322 EXPORT_SYMBOL_GPL(mt76_tx);
323 
324 static struct sk_buff *
mt76_txq_dequeue(struct mt76_phy * phy,struct mt76_txq * mtxq)325 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
326 {
327 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
328 	struct ieee80211_tx_info *info;
329 	bool ext_phy = phy != &phy->dev->phy;
330 	struct sk_buff *skb;
331 
332 	skb = ieee80211_tx_dequeue(phy->hw, txq);
333 	if (!skb)
334 		return NULL;
335 
336 	info = IEEE80211_SKB_CB(skb);
337 	if (ext_phy)
338 		info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
339 
340 	return skb;
341 }
342 
343 static void
mt76_queue_ps_skb(struct mt76_phy * phy,struct ieee80211_sta * sta,struct sk_buff * skb,bool last)344 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
345 		  struct sk_buff *skb, bool last)
346 {
347 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
348 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
349 
350 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
351 	if (last)
352 		info->flags |= IEEE80211_TX_STATUS_EOSP |
353 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
354 
355 	mt76_skb_set_moredata(skb, !last);
356 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
357 }
358 
359 void
mt76_release_buffered_frames(struct ieee80211_hw * hw,struct ieee80211_sta * sta,u16 tids,int nframes,enum ieee80211_frame_release_type reason,bool more_data)360 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
361 			     u16 tids, int nframes,
362 			     enum ieee80211_frame_release_type reason,
363 			     bool more_data)
364 {
365 	struct mt76_phy *phy = hw->priv;
366 	struct mt76_dev *dev = phy->dev;
367 	struct sk_buff *last_skb = NULL;
368 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
369 	int i;
370 
371 	spin_lock_bh(&hwq->lock);
372 	for (i = 0; tids && nframes; i++, tids >>= 1) {
373 		struct ieee80211_txq *txq = sta->txq[i];
374 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
375 		struct sk_buff *skb;
376 
377 		if (!(tids & 1))
378 			continue;
379 
380 		do {
381 			skb = mt76_txq_dequeue(phy, mtxq);
382 			if (!skb)
383 				break;
384 
385 			nframes--;
386 			if (last_skb)
387 				mt76_queue_ps_skb(phy, sta, last_skb, false);
388 
389 			last_skb = skb;
390 		} while (nframes);
391 	}
392 
393 	if (last_skb) {
394 		mt76_queue_ps_skb(phy, sta, last_skb, true);
395 		dev->queue_ops->kick(dev, hwq);
396 	} else {
397 		ieee80211_sta_eosp(sta);
398 	}
399 
400 	spin_unlock_bh(&hwq->lock);
401 }
402 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
403 
404 static bool
mt76_txq_stopped(struct mt76_queue * q)405 mt76_txq_stopped(struct mt76_queue *q)
406 {
407 	return q->stopped || q->blocked ||
408 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
409 }
410 
411 static int
mt76_txq_send_burst(struct mt76_phy * phy,struct mt76_queue * q,struct mt76_txq * mtxq,struct mt76_wcid * wcid)412 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
413 		    struct mt76_txq *mtxq, struct mt76_wcid *wcid)
414 {
415 	struct mt76_dev *dev = phy->dev;
416 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
417 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
418 	struct ieee80211_tx_info *info;
419 	struct sk_buff *skb;
420 	int n_frames = 1;
421 	bool stop = false;
422 	int idx;
423 
424 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
425 		return 0;
426 
427 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
428 		return 0;
429 
430 	skb = mt76_txq_dequeue(phy, mtxq);
431 	if (!skb)
432 		return 0;
433 
434 	info = IEEE80211_SKB_CB(skb);
435 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
436 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
437 				       info->control.rates, 1);
438 
439 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
440 	if (idx < 0)
441 		return idx;
442 
443 	do {
444 		if (test_bit(MT76_RESET, &phy->state))
445 			return -EBUSY;
446 
447 		if (stop || mt76_txq_stopped(q))
448 			break;
449 
450 		skb = mt76_txq_dequeue(phy, mtxq);
451 		if (!skb)
452 			break;
453 
454 		info = IEEE80211_SKB_CB(skb);
455 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
456 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
457 					       info->control.rates, 1);
458 
459 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
460 		if (idx < 0)
461 			break;
462 
463 		n_frames++;
464 	} while (1);
465 
466 	dev->queue_ops->kick(dev, q);
467 
468 	return n_frames;
469 }
470 
471 static int
mt76_txq_schedule_list(struct mt76_phy * phy,enum mt76_txq_id qid)472 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
473 {
474 	struct mt76_queue *q = phy->q_tx[qid];
475 	struct mt76_dev *dev = phy->dev;
476 	struct ieee80211_txq *txq;
477 	struct mt76_txq *mtxq;
478 	struct mt76_wcid *wcid;
479 	int ret = 0;
480 
481 	while (1) {
482 		int n_frames = 0;
483 
484 		if (test_bit(MT76_RESET, &phy->state))
485 			return -EBUSY;
486 
487 		if (dev->queue_ops->tx_cleanup &&
488 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
489 			dev->queue_ops->tx_cleanup(dev, q, false);
490 		}
491 
492 		txq = ieee80211_next_txq(phy->hw, qid);
493 		if (!txq)
494 			break;
495 
496 		mtxq = (struct mt76_txq *)txq->drv_priv;
497 		wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
498 		if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
499 			continue;
500 
501 		spin_lock_bh(&q->lock);
502 
503 		if (mtxq->send_bar && mtxq->aggr) {
504 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
505 			struct ieee80211_sta *sta = txq->sta;
506 			struct ieee80211_vif *vif = txq->vif;
507 			u16 agg_ssn = mtxq->agg_ssn;
508 			u8 tid = txq->tid;
509 
510 			mtxq->send_bar = false;
511 			spin_unlock_bh(&q->lock);
512 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
513 			spin_lock_bh(&q->lock);
514 		}
515 
516 		if (!mt76_txq_stopped(q))
517 			n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
518 
519 		spin_unlock_bh(&q->lock);
520 
521 		ieee80211_return_txq(phy->hw, txq, false);
522 
523 		if (unlikely(n_frames < 0))
524 			return n_frames;
525 
526 		ret += n_frames;
527 	}
528 
529 	return ret;
530 }
531 
mt76_txq_schedule(struct mt76_phy * phy,enum mt76_txq_id qid)532 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
533 {
534 	int len;
535 
536 	if (qid >= 4)
537 		return;
538 
539 	rcu_read_lock();
540 
541 	do {
542 		ieee80211_txq_schedule_start(phy->hw, qid);
543 		len = mt76_txq_schedule_list(phy, qid);
544 		ieee80211_txq_schedule_end(phy->hw, qid);
545 	} while (len > 0);
546 
547 	rcu_read_unlock();
548 }
549 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
550 
mt76_txq_schedule_all(struct mt76_phy * phy)551 void mt76_txq_schedule_all(struct mt76_phy *phy)
552 {
553 	int i;
554 
555 	for (i = 0; i <= MT_TXQ_BK; i++)
556 		mt76_txq_schedule(phy, i);
557 }
558 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
559 
mt76_tx_worker_run(struct mt76_dev * dev)560 void mt76_tx_worker_run(struct mt76_dev *dev)
561 {
562 	mt76_txq_schedule_all(&dev->phy);
563 	if (dev->phy2)
564 		mt76_txq_schedule_all(dev->phy2);
565 
566 #ifdef CONFIG_NL80211_TESTMODE
567 	if (dev->phy.test.tx_pending)
568 		mt76_testmode_tx_pending(&dev->phy);
569 	if (dev->phy2 && dev->phy2->test.tx_pending)
570 		mt76_testmode_tx_pending(dev->phy2);
571 #endif
572 }
573 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
574 
mt76_tx_worker(struct mt76_worker * w)575 void mt76_tx_worker(struct mt76_worker *w)
576 {
577 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
578 
579 	mt76_tx_worker_run(dev);
580 }
581 
mt76_stop_tx_queues(struct mt76_phy * phy,struct ieee80211_sta * sta,bool send_bar)582 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
583 			 bool send_bar)
584 {
585 	int i;
586 
587 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
588 		struct ieee80211_txq *txq = sta->txq[i];
589 		struct mt76_queue *hwq;
590 		struct mt76_txq *mtxq;
591 
592 		if (!txq)
593 			continue;
594 
595 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
596 		mtxq = (struct mt76_txq *)txq->drv_priv;
597 
598 		spin_lock_bh(&hwq->lock);
599 		mtxq->send_bar = mtxq->aggr && send_bar;
600 		spin_unlock_bh(&hwq->lock);
601 	}
602 }
603 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
604 
mt76_wake_tx_queue(struct ieee80211_hw * hw,struct ieee80211_txq * txq)605 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
606 {
607 	struct mt76_phy *phy = hw->priv;
608 	struct mt76_dev *dev = phy->dev;
609 
610 	if (!test_bit(MT76_STATE_RUNNING, &phy->state))
611 		return;
612 
613 	mt76_worker_schedule(&dev->tx_worker);
614 }
615 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
616 
mt76_ac_to_hwq(u8 ac)617 u8 mt76_ac_to_hwq(u8 ac)
618 {
619 	static const u8 wmm_queue_map[] = {
620 		[IEEE80211_AC_BE] = 0,
621 		[IEEE80211_AC_BK] = 1,
622 		[IEEE80211_AC_VI] = 2,
623 		[IEEE80211_AC_VO] = 3,
624 	};
625 
626 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
627 		return 0;
628 
629 	return wmm_queue_map[ac];
630 }
631 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
632 
mt76_skb_adjust_pad(struct sk_buff * skb,int pad)633 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
634 {
635 	struct sk_buff *iter, *last = skb;
636 
637 	/* First packet of a A-MSDU burst keeps track of the whole burst
638 	 * length, need to update length of it and the last packet.
639 	 */
640 	skb_walk_frags(skb, iter) {
641 		last = iter;
642 		if (!iter->next) {
643 			skb->data_len += pad;
644 			skb->len += pad;
645 			break;
646 		}
647 	}
648 
649 	if (skb_pad(last, pad))
650 		return -ENOMEM;
651 
652 	__skb_put(last, pad);
653 
654 	return 0;
655 }
656 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
657 
mt76_queue_tx_complete(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_entry * e)658 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
659 			    struct mt76_queue_entry *e)
660 {
661 	if (e->skb)
662 		dev->drv->tx_complete_skb(dev, e);
663 
664 	spin_lock_bh(&q->lock);
665 	q->tail = (q->tail + 1) % q->ndesc;
666 	q->queued--;
667 	spin_unlock_bh(&q->lock);
668 }
669 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
670 
__mt76_set_tx_blocked(struct mt76_dev * dev,bool blocked)671 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
672 {
673 	struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2;
674 	struct mt76_queue *q, *q2 = NULL;
675 
676 	q = phy->q_tx[0];
677 	if (blocked == q->blocked)
678 		return;
679 
680 	q->blocked = blocked;
681 	if (phy2) {
682 		q2 = phy2->q_tx[0];
683 		q2->blocked = blocked;
684 	}
685 
686 	if (!blocked)
687 		mt76_worker_schedule(&dev->tx_worker);
688 }
689 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
690 
mt76_token_consume(struct mt76_dev * dev,struct mt76_txwi_cache ** ptxwi)691 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
692 {
693 	int token;
694 
695 	spin_lock_bh(&dev->token_lock);
696 
697 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size,
698 			  GFP_ATOMIC);
699 	if (token >= 0)
700 		dev->token_count++;
701 
702 	if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR)
703 		__mt76_set_tx_blocked(dev, true);
704 
705 	spin_unlock_bh(&dev->token_lock);
706 
707 	return token;
708 }
709 EXPORT_SYMBOL_GPL(mt76_token_consume);
710 
711 struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev * dev,int token,bool * wake)712 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
713 {
714 	struct mt76_txwi_cache *txwi;
715 
716 	spin_lock_bh(&dev->token_lock);
717 
718 	txwi = idr_remove(&dev->token, token);
719 	if (txwi)
720 		dev->token_count--;
721 
722 	if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR &&
723 	    dev->phy.q_tx[0]->blocked)
724 		*wake = true;
725 
726 	spin_unlock_bh(&dev->token_lock);
727 
728 	return txwi;
729 }
730 EXPORT_SYMBOL_GPL(mt76_token_release);
731