1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4 */
5
6 #include "mt7601u.h"
7 #include "dma.h"
8 #include "usb.h"
9 #include "trace.h"
10
11 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
13
ieee80211_get_hdrlen_from_buf(const u8 * data,unsigned len)14 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
15 {
16 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17 unsigned int hdrlen;
18
19 if (unlikely(len < 10))
20 return 0;
21 hdrlen = ieee80211_hdrlen(hdr->frame_control);
22 if (unlikely(hdrlen > len))
23 return 0;
24 return hdrlen;
25 }
26
27 static struct sk_buff *
mt7601u_rx_skb_from_seg(struct mt7601u_dev * dev,struct mt7601u_rxwi * rxwi,void * data,u32 seg_len,u32 truesize,struct page * p)28 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29 void *data, u32 seg_len, u32 truesize, struct page *p)
30 {
31 struct sk_buff *skb;
32 u32 true_len, hdr_len = 0, copy, frag;
33
34 skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35 if (!skb)
36 return NULL;
37
38 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39 if (!true_len || true_len > seg_len)
40 goto bad_frame;
41
42 hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
43 if (!hdr_len)
44 goto bad_frame;
45
46 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47 skb_put_data(skb, data, hdr_len);
48
49 data += hdr_len + 2;
50 true_len -= hdr_len;
51 hdr_len = 0;
52 }
53
54 /* If not doing paged RX allocated skb will always have enough space */
55 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56 frag = true_len - copy;
57
58 skb_put_data(skb, data, copy);
59 data += copy;
60
61 if (frag) {
62 skb_add_rx_frag(skb, 0, p, data - page_address(p),
63 frag, truesize);
64 get_page(p);
65 }
66
67 return skb;
68
69 bad_frame:
70 dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
71 true_len, hdr_len);
72 dev_kfree_skb(skb);
73 return NULL;
74 }
75
mt7601u_rx_process_seg(struct mt7601u_dev * dev,u8 * data,u32 seg_len,struct page * p)76 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77 u32 seg_len, struct page *p)
78 {
79 struct sk_buff *skb;
80 struct mt7601u_rxwi *rxwi;
81 u32 fce_info, truesize = seg_len;
82
83 /* DMA_INFO field at the beginning of the segment contains only some of
84 * the information, we need to read the FCE descriptor from the end.
85 */
86 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
87 seg_len -= MT_FCE_INFO_LEN;
88
89 data += MT_DMA_HDR_LEN;
90 seg_len -= MT_DMA_HDR_LEN;
91
92 rxwi = (struct mt7601u_rxwi *) data;
93 data += sizeof(struct mt7601u_rxwi);
94 seg_len -= sizeof(struct mt7601u_rxwi);
95
96 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
97 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
98 if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
99 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
100
101 trace_mt_rx(dev, rxwi, fce_info);
102
103 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
104 if (!skb)
105 return;
106
107 spin_lock(&dev->mac_lock);
108 ieee80211_rx(dev->hw, skb);
109 spin_unlock(&dev->mac_lock);
110 }
111
mt7601u_rx_next_seg_len(u8 * data,u32 data_len)112 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
113 {
114 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
115 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
116 u16 dma_len = get_unaligned_le16(data);
117
118 if (data_len < min_seg_len ||
119 WARN_ON_ONCE(!dma_len) ||
120 WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
121 WARN_ON_ONCE(dma_len & 0x3))
122 return 0;
123
124 return MT_DMA_HDRS + dma_len;
125 }
126
127 static void
mt7601u_rx_process_entry(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e)128 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
129 {
130 u32 seg_len, data_len = e->urb->actual_length;
131 u8 *data = page_address(e->p);
132 struct page *new_p = NULL;
133 int cnt = 0;
134
135 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
136 return;
137
138 /* Copy if there is very little data in the buffer. */
139 if (data_len > 512)
140 new_p = dev_alloc_pages(MT_RX_ORDER);
141
142 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
143 mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
144
145 data_len -= seg_len;
146 data += seg_len;
147 cnt++;
148 }
149
150 if (cnt > 1)
151 trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
152
153 if (new_p) {
154 /* we have one extra ref from the allocator */
155 put_page(e->p);
156 e->p = new_p;
157 }
158 }
159
160 static struct mt7601u_dma_buf_rx *
mt7601u_rx_get_pending_entry(struct mt7601u_dev * dev)161 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
162 {
163 struct mt7601u_rx_queue *q = &dev->rx_q;
164 struct mt7601u_dma_buf_rx *buf = NULL;
165 unsigned long flags;
166
167 spin_lock_irqsave(&dev->rx_lock, flags);
168
169 if (!q->pending)
170 goto out;
171
172 buf = &q->e[q->start];
173 q->pending--;
174 q->start = (q->start + 1) % q->entries;
175 out:
176 spin_unlock_irqrestore(&dev->rx_lock, flags);
177
178 return buf;
179 }
180
mt7601u_complete_rx(struct urb * urb)181 static void mt7601u_complete_rx(struct urb *urb)
182 {
183 struct mt7601u_dev *dev = urb->context;
184 struct mt7601u_rx_queue *q = &dev->rx_q;
185 unsigned long flags;
186
187 /* do no schedule rx tasklet if urb has been unlinked
188 * or the device has been removed
189 */
190 switch (urb->status) {
191 case -ECONNRESET:
192 case -ESHUTDOWN:
193 case -ENOENT:
194 return;
195 default:
196 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
197 urb->status);
198 fallthrough;
199 case 0:
200 break;
201 }
202
203 spin_lock_irqsave(&dev->rx_lock, flags);
204 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
205 goto out;
206
207 q->end = (q->end + 1) % q->entries;
208 q->pending++;
209 tasklet_schedule(&dev->rx_tasklet);
210 out:
211 spin_unlock_irqrestore(&dev->rx_lock, flags);
212 }
213
mt7601u_rx_tasklet(unsigned long data)214 static void mt7601u_rx_tasklet(unsigned long data)
215 {
216 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
217 struct mt7601u_dma_buf_rx *e;
218
219 while ((e = mt7601u_rx_get_pending_entry(dev))) {
220 if (e->urb->status)
221 continue;
222
223 mt7601u_rx_process_entry(dev, e);
224 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
225 }
226 }
227
mt7601u_complete_tx(struct urb * urb)228 static void mt7601u_complete_tx(struct urb *urb)
229 {
230 struct mt7601u_tx_queue *q = urb->context;
231 struct mt7601u_dev *dev = q->dev;
232 struct sk_buff *skb;
233 unsigned long flags;
234
235 switch (urb->status) {
236 case -ECONNRESET:
237 case -ESHUTDOWN:
238 case -ENOENT:
239 return;
240 default:
241 dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
242 urb->status);
243 fallthrough;
244 case 0:
245 break;
246 }
247
248 spin_lock_irqsave(&dev->tx_lock, flags);
249 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
250 goto out;
251
252 skb = q->e[q->start].skb;
253 q->e[q->start].skb = NULL;
254 trace_mt_tx_dma_done(dev, skb);
255
256 __skb_queue_tail(&dev->tx_skb_done, skb);
257 tasklet_schedule(&dev->tx_tasklet);
258
259 if (q->used == q->entries - q->entries / 8)
260 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
261
262 q->start = (q->start + 1) % q->entries;
263 q->used--;
264 out:
265 spin_unlock_irqrestore(&dev->tx_lock, flags);
266 }
267
mt7601u_tx_tasklet(unsigned long data)268 static void mt7601u_tx_tasklet(unsigned long data)
269 {
270 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
271 struct sk_buff_head skbs;
272 unsigned long flags;
273
274 __skb_queue_head_init(&skbs);
275
276 spin_lock_irqsave(&dev->tx_lock, flags);
277
278 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
279 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
280 queue_delayed_work(dev->stat_wq, &dev->stat_work,
281 msecs_to_jiffies(10));
282
283 skb_queue_splice_init(&dev->tx_skb_done, &skbs);
284
285 spin_unlock_irqrestore(&dev->tx_lock, flags);
286
287 while (!skb_queue_empty(&skbs)) {
288 struct sk_buff *skb = __skb_dequeue(&skbs);
289
290 mt7601u_tx_status(dev, skb);
291 }
292 }
293
mt7601u_dma_submit_tx(struct mt7601u_dev * dev,struct sk_buff * skb,u8 ep)294 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
295 struct sk_buff *skb, u8 ep)
296 {
297 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
298 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
299 struct mt7601u_dma_buf_tx *e;
300 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
301 unsigned long flags;
302 int ret;
303
304 spin_lock_irqsave(&dev->tx_lock, flags);
305
306 if (WARN_ON(q->entries <= q->used)) {
307 ret = -ENOSPC;
308 goto out;
309 }
310
311 e = &q->e[q->end];
312 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
313 mt7601u_complete_tx, q);
314 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
315 if (ret) {
316 /* Special-handle ENODEV from TX urb submission because it will
317 * often be the first ENODEV we see after device is removed.
318 */
319 if (ret == -ENODEV)
320 set_bit(MT7601U_STATE_REMOVED, &dev->state);
321 else
322 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
323 ret);
324 goto out;
325 }
326
327 q->end = (q->end + 1) % q->entries;
328 q->used++;
329 e->skb = skb;
330
331 if (q->used >= q->entries)
332 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
333 out:
334 spin_unlock_irqrestore(&dev->tx_lock, flags);
335
336 return ret;
337 }
338
339 /* Map hardware Q to USB endpoint number */
q2ep(u8 qid)340 static u8 q2ep(u8 qid)
341 {
342 /* TODO: take management packets to queue 5 */
343 return qid + 1;
344 }
345
346 /* Map USB endpoint number to Q id in the DMA engine */
ep2dmaq(u8 ep)347 static enum mt76_qsel ep2dmaq(u8 ep)
348 {
349 if (ep == 5)
350 return MT_QSEL_MGMT;
351 return MT_QSEL_EDCA;
352 }
353
mt7601u_dma_enqueue_tx(struct mt7601u_dev * dev,struct sk_buff * skb,struct mt76_wcid * wcid,int hw_q)354 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
355 struct mt76_wcid *wcid, int hw_q)
356 {
357 u8 ep = q2ep(hw_q);
358 u32 dma_flags;
359 int ret;
360
361 dma_flags = MT_TXD_PKT_INFO_80211;
362 if (wcid->hw_key_idx == 0xff)
363 dma_flags |= MT_TXD_PKT_INFO_WIV;
364
365 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
366 if (ret)
367 return ret;
368
369 ret = mt7601u_dma_submit_tx(dev, skb, ep);
370 if (ret) {
371 ieee80211_free_txskb(dev->hw, skb);
372 return ret;
373 }
374
375 return 0;
376 }
377
mt7601u_kill_rx(struct mt7601u_dev * dev)378 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
379 {
380 int i;
381
382 for (i = 0; i < dev->rx_q.entries; i++)
383 usb_poison_urb(dev->rx_q.e[i].urb);
384 }
385
mt7601u_submit_rx_buf(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e,gfp_t gfp)386 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
387 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
388 {
389 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
390 u8 *buf = page_address(e->p);
391 unsigned pipe;
392 int ret;
393
394 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
395
396 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
397 mt7601u_complete_rx, dev);
398
399 trace_mt_submit_urb(dev, e->urb);
400 ret = usb_submit_urb(e->urb, gfp);
401 if (ret)
402 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
403
404 return ret;
405 }
406
mt7601u_submit_rx(struct mt7601u_dev * dev)407 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
408 {
409 int i, ret;
410
411 for (i = 0; i < dev->rx_q.entries; i++) {
412 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
413 if (ret)
414 return ret;
415 }
416
417 return 0;
418 }
419
mt7601u_free_rx(struct mt7601u_dev * dev)420 static void mt7601u_free_rx(struct mt7601u_dev *dev)
421 {
422 int i;
423
424 for (i = 0; i < dev->rx_q.entries; i++) {
425 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
426 usb_free_urb(dev->rx_q.e[i].urb);
427 }
428 }
429
mt7601u_alloc_rx(struct mt7601u_dev * dev)430 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
431 {
432 int i;
433
434 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
435 dev->rx_q.dev = dev;
436 dev->rx_q.entries = N_RX_ENTRIES;
437
438 for (i = 0; i < N_RX_ENTRIES; i++) {
439 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
440 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
441
442 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
443 return -ENOMEM;
444 }
445
446 return 0;
447 }
448
mt7601u_free_tx_queue(struct mt7601u_tx_queue * q)449 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
450 {
451 int i;
452
453 for (i = 0; i < q->entries; i++) {
454 usb_poison_urb(q->e[i].urb);
455 if (q->e[i].skb)
456 mt7601u_tx_status(q->dev, q->e[i].skb);
457 usb_free_urb(q->e[i].urb);
458 }
459 }
460
mt7601u_free_tx(struct mt7601u_dev * dev)461 static void mt7601u_free_tx(struct mt7601u_dev *dev)
462 {
463 int i;
464
465 if (!dev->tx_q)
466 return;
467
468 for (i = 0; i < __MT_EP_OUT_MAX; i++)
469 mt7601u_free_tx_queue(&dev->tx_q[i]);
470 }
471
mt7601u_alloc_tx_queue(struct mt7601u_dev * dev,struct mt7601u_tx_queue * q)472 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
473 struct mt7601u_tx_queue *q)
474 {
475 int i;
476
477 q->dev = dev;
478 q->entries = N_TX_ENTRIES;
479
480 for (i = 0; i < N_TX_ENTRIES; i++) {
481 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
482 if (!q->e[i].urb)
483 return -ENOMEM;
484 }
485
486 return 0;
487 }
488
mt7601u_alloc_tx(struct mt7601u_dev * dev)489 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
490 {
491 int i;
492
493 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
494 sizeof(*dev->tx_q), GFP_KERNEL);
495 if (!dev->tx_q)
496 return -ENOMEM;
497
498 for (i = 0; i < __MT_EP_OUT_MAX; i++)
499 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
500 return -ENOMEM;
501
502 return 0;
503 }
504
mt7601u_dma_init(struct mt7601u_dev * dev)505 int mt7601u_dma_init(struct mt7601u_dev *dev)
506 {
507 int ret = -ENOMEM;
508
509 tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
510 tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
511
512 ret = mt7601u_alloc_tx(dev);
513 if (ret)
514 goto err;
515 ret = mt7601u_alloc_rx(dev);
516 if (ret)
517 goto err;
518
519 ret = mt7601u_submit_rx(dev);
520 if (ret)
521 goto err;
522
523 return 0;
524 err:
525 mt7601u_dma_cleanup(dev);
526 return ret;
527 }
528
mt7601u_dma_cleanup(struct mt7601u_dev * dev)529 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
530 {
531 mt7601u_kill_rx(dev);
532
533 tasklet_kill(&dev->rx_tasklet);
534
535 mt7601u_free_rx(dev);
536 mt7601u_free_tx(dev);
537
538 tasklet_kill(&dev->tx_tasklet);
539 }
540