1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6 #include <linux/dma-mapping.h>
7 #include "mt76.h"
8 #include "dma.h"
9
10 static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)11 mt76_alloc_txwi(struct mt76_dev *dev)
12 {
13 struct mt76_txwi_cache *t;
14 dma_addr_t addr;
15 u8 *txwi;
16 int size;
17
18 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
19 txwi = kzalloc(size, GFP_ATOMIC);
20 if (!txwi)
21 return NULL;
22
23 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
24 DMA_TO_DEVICE);
25 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
26 t->dma_addr = addr;
27
28 return t;
29 }
30
31 static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)32 __mt76_get_txwi(struct mt76_dev *dev)
33 {
34 struct mt76_txwi_cache *t = NULL;
35
36 spin_lock(&dev->lock);
37 if (!list_empty(&dev->txwi_cache)) {
38 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
39 list);
40 list_del(&t->list);
41 }
42 spin_unlock(&dev->lock);
43
44 return t;
45 }
46
47 static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)48 mt76_get_txwi(struct mt76_dev *dev)
49 {
50 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
51
52 if (t)
53 return t;
54
55 return mt76_alloc_txwi(dev);
56 }
57
58 void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)59 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
60 {
61 if (!t)
62 return;
63
64 spin_lock(&dev->lock);
65 list_add(&t->list, &dev->txwi_cache);
66 spin_unlock(&dev->lock);
67 }
68 EXPORT_SYMBOL_GPL(mt76_put_txwi);
69
70 static void
mt76_free_pending_txwi(struct mt76_dev * dev)71 mt76_free_pending_txwi(struct mt76_dev *dev)
72 {
73 struct mt76_txwi_cache *t;
74
75 local_bh_disable();
76 while ((t = __mt76_get_txwi(dev)) != NULL) {
77 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
78 DMA_TO_DEVICE);
79 kfree(mt76_get_txwi_ptr(dev, t));
80 }
81 local_bh_enable();
82 }
83
84 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)85 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
86 {
87 writel(q->desc_dma, &q->regs->desc_base);
88 writel(q->ndesc, &q->regs->ring_size);
89 q->head = readl(&q->regs->dma_idx);
90 q->tail = q->head;
91 }
92
93 static void
mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q)94 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
95 {
96 int i;
97
98 if (!q)
99 return;
100
101 /* clear descriptors */
102 for (i = 0; i < q->ndesc; i++)
103 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
104
105 writel(0, &q->regs->cpu_idx);
106 writel(0, &q->regs->dma_idx);
107 mt76_dma_sync_idx(dev, q);
108 }
109
110 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)111 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
112 int idx, int n_desc, int bufsize,
113 u32 ring_base)
114 {
115 int size;
116
117 spin_lock_init(&q->lock);
118 spin_lock_init(&q->cleanup_lock);
119
120 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
121 q->ndesc = n_desc;
122 q->buf_size = bufsize;
123 q->hw_idx = idx;
124
125 size = q->ndesc * sizeof(struct mt76_desc);
126 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
127 if (!q->desc)
128 return -ENOMEM;
129
130 size = q->ndesc * sizeof(*q->entry);
131 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
132 if (!q->entry)
133 return -ENOMEM;
134
135 mt76_dma_queue_reset(dev, q);
136
137 return 0;
138 }
139
140 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)141 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
142 struct mt76_queue_buf *buf, int nbufs, u32 info,
143 struct sk_buff *skb, void *txwi)
144 {
145 struct mt76_queue_entry *entry;
146 struct mt76_desc *desc;
147 u32 ctrl;
148 int i, idx = -1;
149
150 if (txwi) {
151 q->entry[q->head].txwi = DMA_DUMMY_DATA;
152 q->entry[q->head].skip_buf0 = true;
153 }
154
155 for (i = 0; i < nbufs; i += 2, buf += 2) {
156 u32 buf0 = buf[0].addr, buf1 = 0;
157
158 idx = q->head;
159 q->head = (q->head + 1) % q->ndesc;
160
161 desc = &q->desc[idx];
162 entry = &q->entry[idx];
163
164 if (buf[0].skip_unmap)
165 entry->skip_buf0 = true;
166 entry->skip_buf1 = i == nbufs - 1;
167
168 entry->dma_addr[0] = buf[0].addr;
169 entry->dma_len[0] = buf[0].len;
170
171 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
172 if (i < nbufs - 1) {
173 entry->dma_addr[1] = buf[1].addr;
174 entry->dma_len[1] = buf[1].len;
175 buf1 = buf[1].addr;
176 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
177 if (buf[1].skip_unmap)
178 entry->skip_buf1 = true;
179 }
180
181 if (i == nbufs - 1)
182 ctrl |= MT_DMA_CTL_LAST_SEC0;
183 else if (i == nbufs - 2)
184 ctrl |= MT_DMA_CTL_LAST_SEC1;
185
186 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
187 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
188 WRITE_ONCE(desc->info, cpu_to_le32(info));
189 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
190
191 q->queued++;
192 }
193
194 q->entry[idx].txwi = txwi;
195 q->entry[idx].skb = skb;
196 q->entry[idx].wcid = 0xffff;
197
198 return idx;
199 }
200
201 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)202 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
203 struct mt76_queue_entry *prev_e)
204 {
205 struct mt76_queue_entry *e = &q->entry[idx];
206
207 if (!e->skip_buf0)
208 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
209 DMA_TO_DEVICE);
210
211 if (!e->skip_buf1)
212 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
213 DMA_TO_DEVICE);
214
215 if (e->txwi == DMA_DUMMY_DATA)
216 e->txwi = NULL;
217
218 if (e->skb == DMA_DUMMY_DATA)
219 e->skb = NULL;
220
221 *prev_e = *e;
222 memset(e, 0, sizeof(*e));
223 }
224
225 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)226 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
227 {
228 wmb();
229 writel(q->head, &q->regs->cpu_idx);
230 }
231
232 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush)233 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
234 {
235 struct mt76_queue_entry entry;
236 int last;
237
238 if (!q)
239 return;
240
241 spin_lock_bh(&q->cleanup_lock);
242 if (flush)
243 last = -1;
244 else
245 last = readl(&q->regs->dma_idx);
246
247 while (q->queued > 0 && q->tail != last) {
248 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
249 mt76_queue_tx_complete(dev, q, &entry);
250
251 if (entry.txwi) {
252 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
253 mt76_put_txwi(dev, entry.txwi);
254 }
255
256 if (!flush && q->tail == last)
257 last = readl(&q->regs->dma_idx);
258
259 }
260 spin_unlock_bh(&q->cleanup_lock);
261
262 if (flush) {
263 spin_lock_bh(&q->lock);
264 mt76_dma_sync_idx(dev, q);
265 mt76_dma_kick_queue(dev, q);
266 spin_unlock_bh(&q->lock);
267 }
268
269 if (!q->queued)
270 wake_up(&dev->tx_wait);
271 }
272
273 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more)274 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
275 int *len, u32 *info, bool *more)
276 {
277 struct mt76_queue_entry *e = &q->entry[idx];
278 struct mt76_desc *desc = &q->desc[idx];
279 dma_addr_t buf_addr;
280 void *buf = e->buf;
281 int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
282
283 buf_addr = e->dma_addr[0];
284 if (len) {
285 u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
286 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
287 *more = !(ctl & MT_DMA_CTL_LAST_SEC0);
288 }
289
290 if (info)
291 *info = le32_to_cpu(desc->info);
292
293 dma_unmap_single(dev->dma_dev, buf_addr, buf_len, DMA_FROM_DEVICE);
294 e->buf = NULL;
295
296 return buf;
297 }
298
299 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more)300 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
301 int *len, u32 *info, bool *more)
302 {
303 int idx = q->tail;
304
305 *more = false;
306 if (!q->queued)
307 return NULL;
308
309 if (flush)
310 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
311 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
312 return NULL;
313
314 q->tail = (q->tail + 1) % q->ndesc;
315 q->queued--;
316
317 return mt76_dma_get_buf(dev, q, idx, len, info, more);
318 }
319
320 static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)321 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
322 struct sk_buff *skb, u32 tx_info)
323 {
324 struct mt76_queue_buf buf = {};
325 dma_addr_t addr;
326
327 if (q->queued + 1 >= q->ndesc - 1)
328 goto error;
329
330 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
331 DMA_TO_DEVICE);
332 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
333 goto error;
334
335 buf.addr = addr;
336 buf.len = skb->len;
337
338 spin_lock_bh(&q->lock);
339 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
340 mt76_dma_kick_queue(dev, q);
341 spin_unlock_bh(&q->lock);
342
343 return 0;
344
345 error:
346 dev_kfree_skb(skb);
347 return -ENOMEM;
348 }
349
350 static int
mt76_dma_tx_queue_skb(struct mt76_dev * dev,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)351 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
352 enum mt76_txq_id qid, struct sk_buff *skb,
353 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
354 {
355 struct ieee80211_tx_status status = {
356 .sta = sta,
357 };
358 struct mt76_tx_info tx_info = {
359 .skb = skb,
360 };
361 struct ieee80211_hw *hw;
362 int len, n = 0, ret = -ENOMEM;
363 struct mt76_txwi_cache *t;
364 struct sk_buff *iter;
365 dma_addr_t addr;
366 u8 *txwi;
367
368 t = mt76_get_txwi(dev);
369 if (!t)
370 goto free_skb;
371
372 txwi = mt76_get_txwi_ptr(dev, t);
373
374 skb->prev = skb->next = NULL;
375 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
376 mt76_insert_hdr_pad(skb);
377
378 len = skb_headlen(skb);
379 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
380 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
381 goto free;
382
383 tx_info.buf[n].addr = t->dma_addr;
384 tx_info.buf[n++].len = dev->drv->txwi_size;
385 tx_info.buf[n].addr = addr;
386 tx_info.buf[n++].len = len;
387
388 skb_walk_frags(skb, iter) {
389 if (n == ARRAY_SIZE(tx_info.buf))
390 goto unmap;
391
392 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
393 DMA_TO_DEVICE);
394 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
395 goto unmap;
396
397 tx_info.buf[n].addr = addr;
398 tx_info.buf[n++].len = iter->len;
399 }
400 tx_info.nbuf = n;
401
402 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
403 ret = -ENOMEM;
404 goto unmap;
405 }
406
407 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
408 DMA_TO_DEVICE);
409 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
410 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
411 DMA_TO_DEVICE);
412 if (ret < 0)
413 goto unmap;
414
415 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
416 tx_info.info, tx_info.skb, t);
417
418 unmap:
419 for (n--; n > 0; n--)
420 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
421 tx_info.buf[n].len, DMA_TO_DEVICE);
422
423 free:
424 #ifdef CONFIG_NL80211_TESTMODE
425 /* fix tx_done accounting on queue overflow */
426 if (mt76_is_testmode_skb(dev, skb, &hw)) {
427 struct mt76_phy *phy = hw->priv;
428
429 if (tx_info.skb == phy->test.tx_skb)
430 phy->test.tx_done--;
431 }
432 #endif
433
434 mt76_put_txwi(dev, t);
435
436 free_skb:
437 status.skb = tx_info.skb;
438 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
439 spin_lock_bh(&dev->rx_lock);
440 ieee80211_tx_status_ext(hw, &status);
441 spin_unlock_bh(&dev->rx_lock);
442
443 return ret;
444 }
445
446 static int
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q)447 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
448 {
449 dma_addr_t addr;
450 void *buf;
451 int frames = 0;
452 int len = SKB_WITH_OVERHEAD(q->buf_size);
453 int offset = q->buf_offset;
454
455 spin_lock_bh(&q->lock);
456
457 while (q->queued < q->ndesc - 1) {
458 struct mt76_queue_buf qbuf;
459
460 buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
461 if (!buf)
462 break;
463
464 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
465 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
466 skb_free_frag(buf);
467 break;
468 }
469
470 qbuf.addr = addr + offset;
471 qbuf.len = len - offset;
472 qbuf.skip_unmap = false;
473 mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
474 frames++;
475 }
476
477 if (frames)
478 mt76_dma_kick_queue(dev, q);
479
480 spin_unlock_bh(&q->lock);
481
482 return frames;
483 }
484
485 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)486 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
487 {
488 struct page *page;
489 void *buf;
490 bool more;
491
492 spin_lock_bh(&q->lock);
493
494 do {
495 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
496 if (!buf)
497 break;
498
499 skb_free_frag(buf);
500 } while (1);
501
502 if (q->rx_head) {
503 dev_kfree_skb(q->rx_head);
504 q->rx_head = NULL;
505 }
506
507 spin_unlock_bh(&q->lock);
508
509 if (!q->rx_page.va)
510 return;
511
512 page = virt_to_page(q->rx_page.va);
513 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
514 memset(&q->rx_page, 0, sizeof(q->rx_page));
515 }
516
517 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)518 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
519 {
520 struct mt76_queue *q = &dev->q_rx[qid];
521 int i;
522
523 for (i = 0; i < q->ndesc; i++)
524 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
525
526 mt76_dma_rx_cleanup(dev, q);
527 mt76_dma_sync_idx(dev, q);
528 mt76_dma_rx_fill(dev, q);
529 }
530
531 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more)532 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
533 int len, bool more)
534 {
535 struct sk_buff *skb = q->rx_head;
536 struct skb_shared_info *shinfo = skb_shinfo(skb);
537 int nr_frags = shinfo->nr_frags;
538
539 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
540 struct page *page = virt_to_head_page(data);
541 int offset = data - page_address(page) + q->buf_offset;
542
543 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
544 } else {
545 skb_free_frag(data);
546 }
547
548 if (more)
549 return;
550
551 q->rx_head = NULL;
552 if (nr_frags < ARRAY_SIZE(shinfo->frags))
553 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
554 else
555 dev_kfree_skb(skb);
556 }
557
558 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)559 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
560 {
561 int len, data_len, done = 0;
562 struct sk_buff *skb;
563 unsigned char *data;
564 bool more;
565
566 while (done < budget) {
567 u32 info;
568
569 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
570 if (!data)
571 break;
572
573 if (q->rx_head)
574 data_len = q->buf_size;
575 else
576 data_len = SKB_WITH_OVERHEAD(q->buf_size);
577
578 if (data_len < len + q->buf_offset) {
579 dev_kfree_skb(q->rx_head);
580 q->rx_head = NULL;
581
582 skb_free_frag(data);
583 continue;
584 }
585
586 if (q->rx_head) {
587 mt76_add_fragment(dev, q, data, len, more);
588 continue;
589 }
590
591 skb = build_skb(data, q->buf_size);
592 if (!skb) {
593 skb_free_frag(data);
594 continue;
595 }
596 skb_reserve(skb, q->buf_offset);
597
598 if (q == &dev->q_rx[MT_RXQ_MCU]) {
599 u32 *rxfce = (u32 *)skb->cb;
600 *rxfce = info;
601 }
602
603 __skb_put(skb, len);
604 done++;
605
606 if (more) {
607 q->rx_head = skb;
608 continue;
609 }
610
611 dev->drv->rx_skb(dev, q - dev->q_rx, skb);
612 }
613
614 mt76_dma_rx_fill(dev, q);
615 return done;
616 }
617
mt76_dma_rx_poll(struct napi_struct * napi,int budget)618 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
619 {
620 struct mt76_dev *dev;
621 int qid, done = 0, cur;
622
623 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
624 qid = napi - dev->napi;
625
626 rcu_read_lock();
627
628 do {
629 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
630 mt76_rx_poll_complete(dev, qid, napi);
631 done += cur;
632 } while (cur && done < budget);
633
634 rcu_read_unlock();
635
636 if (done < budget && napi_complete(napi))
637 dev->drv->rx_poll_complete(dev, qid);
638
639 return done;
640 }
641 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
642
643 static int
mt76_dma_init(struct mt76_dev * dev,int (* poll)(struct napi_struct * napi,int budget))644 mt76_dma_init(struct mt76_dev *dev,
645 int (*poll)(struct napi_struct *napi, int budget))
646 {
647 int i;
648
649 init_dummy_netdev(&dev->napi_dev);
650 init_dummy_netdev(&dev->tx_napi_dev);
651 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
652 wiphy_name(dev->hw->wiphy));
653 dev->napi_dev.threaded = 1;
654
655 mt76_for_each_q_rx(dev, i) {
656 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64);
657 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
658 napi_enable(&dev->napi[i]);
659 }
660
661 return 0;
662 }
663
664 static const struct mt76_queue_ops mt76_dma_ops = {
665 .init = mt76_dma_init,
666 .alloc = mt76_dma_alloc_queue,
667 .reset_q = mt76_dma_queue_reset,
668 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
669 .tx_queue_skb = mt76_dma_tx_queue_skb,
670 .tx_cleanup = mt76_dma_tx_cleanup,
671 .rx_cleanup = mt76_dma_rx_cleanup,
672 .rx_reset = mt76_dma_rx_reset,
673 .kick = mt76_dma_kick_queue,
674 };
675
mt76_dma_attach(struct mt76_dev * dev)676 void mt76_dma_attach(struct mt76_dev *dev)
677 {
678 dev->queue_ops = &mt76_dma_ops;
679 }
680 EXPORT_SYMBOL_GPL(mt76_dma_attach);
681
mt76_dma_cleanup(struct mt76_dev * dev)682 void mt76_dma_cleanup(struct mt76_dev *dev)
683 {
684 int i;
685
686 mt76_worker_disable(&dev->tx_worker);
687 netif_napi_del(&dev->tx_napi);
688
689 for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
690 mt76_dma_tx_cleanup(dev, dev->phy.q_tx[i], true);
691 if (dev->phy2)
692 mt76_dma_tx_cleanup(dev, dev->phy2->q_tx[i], true);
693 }
694
695 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
696 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
697
698 mt76_for_each_q_rx(dev, i) {
699 netif_napi_del(&dev->napi[i]);
700 mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
701 }
702
703 mt76_free_pending_txwi(dev);
704 }
705 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
706