• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "mt76.h"
18 #include "usb_trace.h"
19 #include "dma.h"
20 
21 #define MT_VEND_REQ_MAX_RETRY	10
22 #define MT_VEND_REQ_TOUT_MS	300
23 
24 /* should be called with usb_ctrl_mtx locked */
__mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)25 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
26 				  u8 req_type, u16 val, u16 offset,
27 				  void *buf, size_t len)
28 {
29 	struct usb_interface *intf = to_usb_interface(dev->dev);
30 	struct usb_device *udev = interface_to_usbdev(intf);
31 	unsigned int pipe;
32 	int i, ret;
33 
34 	pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
35 				       : usb_sndctrlpipe(udev, 0);
36 	for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
37 		if (test_bit(MT76_REMOVED, &dev->state))
38 			return -EIO;
39 
40 		ret = usb_control_msg(udev, pipe, req, req_type, val,
41 				      offset, buf, len, MT_VEND_REQ_TOUT_MS);
42 		if (ret == -ENODEV)
43 			set_bit(MT76_REMOVED, &dev->state);
44 		if (ret >= 0 || ret == -ENODEV)
45 			return ret;
46 		usleep_range(5000, 10000);
47 	}
48 
49 	dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
50 		req, offset, ret);
51 	return ret;
52 }
53 
mt76u_vendor_request(struct mt76_dev * dev,u8 req,u8 req_type,u16 val,u16 offset,void * buf,size_t len)54 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
55 			 u8 req_type, u16 val, u16 offset,
56 			 void *buf, size_t len)
57 {
58 	int ret;
59 
60 	mutex_lock(&dev->usb.usb_ctrl_mtx);
61 	ret = __mt76u_vendor_request(dev, req, req_type,
62 				     val, offset, buf, len);
63 	trace_usb_reg_wr(dev, offset, val);
64 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
65 
66 	return ret;
67 }
68 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
69 
70 /* should be called with usb_ctrl_mtx locked */
__mt76u_rr(struct mt76_dev * dev,u32 addr)71 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
72 {
73 	struct mt76_usb *usb = &dev->usb;
74 	u32 data = ~0;
75 	u16 offset;
76 	int ret;
77 	u8 req;
78 
79 	switch (addr & MT_VEND_TYPE_MASK) {
80 	case MT_VEND_TYPE_EEPROM:
81 		req = MT_VEND_READ_EEPROM;
82 		break;
83 	case MT_VEND_TYPE_CFG:
84 		req = MT_VEND_READ_CFG;
85 		break;
86 	default:
87 		req = MT_VEND_MULTI_READ;
88 		break;
89 	}
90 	offset = addr & ~MT_VEND_TYPE_MASK;
91 
92 	ret = __mt76u_vendor_request(dev, req,
93 				     USB_DIR_IN | USB_TYPE_VENDOR,
94 				     0, offset, usb->data, sizeof(__le32));
95 	if (ret == sizeof(__le32))
96 		data = get_unaligned_le32(usb->data);
97 	trace_usb_reg_rr(dev, addr, data);
98 
99 	return data;
100 }
101 
mt76u_rr(struct mt76_dev * dev,u32 addr)102 u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
103 {
104 	u32 ret;
105 
106 	mutex_lock(&dev->usb.usb_ctrl_mtx);
107 	ret = __mt76u_rr(dev, addr);
108 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
109 
110 	return ret;
111 }
112 
113 /* should be called with usb_ctrl_mtx locked */
__mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)114 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
115 {
116 	struct mt76_usb *usb = &dev->usb;
117 	u16 offset;
118 	u8 req;
119 
120 	switch (addr & MT_VEND_TYPE_MASK) {
121 	case MT_VEND_TYPE_CFG:
122 		req = MT_VEND_WRITE_CFG;
123 		break;
124 	default:
125 		req = MT_VEND_MULTI_WRITE;
126 		break;
127 	}
128 	offset = addr & ~MT_VEND_TYPE_MASK;
129 
130 	put_unaligned_le32(val, usb->data);
131 	__mt76u_vendor_request(dev, req,
132 			       USB_DIR_OUT | USB_TYPE_VENDOR, 0,
133 			       offset, usb->data, sizeof(__le32));
134 	trace_usb_reg_wr(dev, addr, val);
135 }
136 
mt76u_wr(struct mt76_dev * dev,u32 addr,u32 val)137 void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
138 {
139 	mutex_lock(&dev->usb.usb_ctrl_mtx);
140 	__mt76u_wr(dev, addr, val);
141 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
142 }
143 
mt76u_rmw(struct mt76_dev * dev,u32 addr,u32 mask,u32 val)144 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
145 		     u32 mask, u32 val)
146 {
147 	mutex_lock(&dev->usb.usb_ctrl_mtx);
148 	val |= __mt76u_rr(dev, addr) & ~mask;
149 	__mt76u_wr(dev, addr, val);
150 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
151 
152 	return val;
153 }
154 
mt76u_copy(struct mt76_dev * dev,u32 offset,const void * data,int len)155 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
156 		       const void *data, int len)
157 {
158 	struct mt76_usb *usb = &dev->usb;
159 	const u32 *val = data;
160 	int i, ret;
161 
162 	mutex_lock(&usb->usb_ctrl_mtx);
163 	for (i = 0; i < (len / 4); i++) {
164 		put_unaligned_le32(val[i], usb->data);
165 		ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
166 					     USB_DIR_OUT | USB_TYPE_VENDOR,
167 					     0, offset + i * 4, usb->data,
168 					     sizeof(__le32));
169 		if (ret < 0)
170 			break;
171 	}
172 	mutex_unlock(&usb->usb_ctrl_mtx);
173 }
174 
mt76u_single_wr(struct mt76_dev * dev,const u8 req,const u16 offset,const u32 val)175 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
176 		     const u16 offset, const u32 val)
177 {
178 	mutex_lock(&dev->usb.usb_ctrl_mtx);
179 	__mt76u_vendor_request(dev, req,
180 			       USB_DIR_OUT | USB_TYPE_VENDOR,
181 			       val & 0xffff, offset, NULL, 0);
182 	__mt76u_vendor_request(dev, req,
183 			       USB_DIR_OUT | USB_TYPE_VENDOR,
184 			       val >> 16, offset + 2, NULL, 0);
185 	mutex_unlock(&dev->usb.usb_ctrl_mtx);
186 }
187 EXPORT_SYMBOL_GPL(mt76u_single_wr);
188 
189 static int
mt76u_set_endpoints(struct usb_interface * intf,struct mt76_usb * usb)190 mt76u_set_endpoints(struct usb_interface *intf,
191 		    struct mt76_usb *usb)
192 {
193 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
194 	struct usb_endpoint_descriptor *ep_desc;
195 	int i, in_ep = 0, out_ep = 0;
196 
197 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
198 		ep_desc = &intf_desc->endpoint[i].desc;
199 
200 		if (usb_endpoint_is_bulk_in(ep_desc) &&
201 		    in_ep < __MT_EP_IN_MAX) {
202 			usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
203 			usb->in_max_packet = usb_endpoint_maxp(ep_desc);
204 			in_ep++;
205 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
206 			   out_ep < __MT_EP_OUT_MAX) {
207 			usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
208 			usb->out_max_packet = usb_endpoint_maxp(ep_desc);
209 			out_ep++;
210 		}
211 	}
212 
213 	if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
214 		return -EINVAL;
215 	return 0;
216 }
217 
218 static int
mt76u_fill_rx_sg(struct mt76_dev * dev,struct mt76u_buf * buf,int nsgs,int len,int sglen)219 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
220 		 int nsgs, int len, int sglen)
221 {
222 	struct urb *urb = buf->urb;
223 	int i;
224 
225 	for (i = 0; i < nsgs; i++) {
226 		struct page *page;
227 		void *data;
228 		int offset;
229 
230 		data = netdev_alloc_frag(len);
231 		if (!data)
232 			break;
233 
234 		page = virt_to_head_page(data);
235 		offset = data - page_address(page);
236 		sg_set_page(&urb->sg[i], page, sglen, offset);
237 	}
238 
239 	if (i < nsgs) {
240 		int j;
241 
242 		for (j = nsgs; j < urb->num_sgs; j++)
243 			skb_free_frag(sg_virt(&urb->sg[j]));
244 		urb->num_sgs = i;
245 	}
246 
247 	urb->num_sgs = max_t(int, i, urb->num_sgs);
248 	buf->len = urb->num_sgs * sglen,
249 	sg_init_marker(urb->sg, urb->num_sgs);
250 
251 	return i ? : -ENOMEM;
252 }
253 
mt76u_buf_alloc(struct mt76_dev * dev,struct mt76u_buf * buf,int nsgs,int len,int sglen,gfp_t gfp)254 int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
255 		    int nsgs, int len, int sglen, gfp_t gfp)
256 {
257 	buf->urb = usb_alloc_urb(0, gfp);
258 	if (!buf->urb)
259 		return -ENOMEM;
260 
261 	buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
262 				    gfp);
263 	if (!buf->urb->sg)
264 		return -ENOMEM;
265 
266 	sg_init_table(buf->urb->sg, nsgs);
267 	buf->dev = dev;
268 
269 	return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
270 }
271 EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
272 
mt76u_buf_free(struct mt76u_buf * buf)273 void mt76u_buf_free(struct mt76u_buf *buf)
274 {
275 	struct urb *urb = buf->urb;
276 	struct scatterlist *sg;
277 	int i;
278 
279 	for (i = 0; i < urb->num_sgs; i++) {
280 		sg = &urb->sg[i];
281 		if (!sg)
282 			continue;
283 
284 		skb_free_frag(sg_virt(sg));
285 	}
286 	usb_free_urb(buf->urb);
287 }
288 EXPORT_SYMBOL_GPL(mt76u_buf_free);
289 
mt76u_submit_buf(struct mt76_dev * dev,int dir,int index,struct mt76u_buf * buf,gfp_t gfp,usb_complete_t complete_fn,void * context)290 int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
291 		     struct mt76u_buf *buf, gfp_t gfp,
292 		     usb_complete_t complete_fn, void *context)
293 {
294 	struct usb_interface *intf = to_usb_interface(dev->dev);
295 	struct usb_device *udev = interface_to_usbdev(intf);
296 	unsigned int pipe;
297 
298 	if (dir == USB_DIR_IN)
299 		pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
300 	else
301 		pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
302 
303 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
304 			  complete_fn, context);
305 
306 	return usb_submit_urb(buf->urb, gfp);
307 }
308 EXPORT_SYMBOL_GPL(mt76u_submit_buf);
309 
310 static inline struct mt76u_buf
mt76u_get_next_rx_entry(struct mt76_queue * q)311 *mt76u_get_next_rx_entry(struct mt76_queue *q)
312 {
313 	struct mt76u_buf *buf = NULL;
314 	unsigned long flags;
315 
316 	spin_lock_irqsave(&q->lock, flags);
317 	if (q->queued > 0) {
318 		buf = &q->entry[q->head].ubuf;
319 		q->head = (q->head + 1) % q->ndesc;
320 		q->queued--;
321 	}
322 	spin_unlock_irqrestore(&q->lock, flags);
323 
324 	return buf;
325 }
326 
mt76u_get_rx_entry_len(u8 * data,u32 data_len)327 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
328 {
329 	u16 dma_len, min_len;
330 
331 	dma_len = get_unaligned_le16(data);
332 	min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
333 		  MT_FCE_INFO_LEN;
334 
335 	if (data_len < min_len || WARN_ON(!dma_len) ||
336 	    WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
337 	    WARN_ON(dma_len & 0x3))
338 		return -EINVAL;
339 	return dma_len;
340 }
341 
342 static int
mt76u_process_rx_entry(struct mt76_dev * dev,struct urb * urb)343 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
344 {
345 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
346 	u8 *data = sg_virt(&urb->sg[0]);
347 	int data_len, len, nsgs = 1;
348 	struct sk_buff *skb;
349 
350 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
351 		return 0;
352 
353 	len = mt76u_get_rx_entry_len(data, urb->actual_length);
354 	if (len < 0)
355 		return 0;
356 
357 	skb = build_skb(data, q->buf_size);
358 	if (!skb)
359 		return 0;
360 
361 	data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
362 	skb_reserve(skb, MT_DMA_HDR_LEN);
363 	if (skb->tail + data_len > skb->end) {
364 		dev_kfree_skb(skb);
365 		return 1;
366 	}
367 
368 	__skb_put(skb, data_len);
369 	len -= data_len;
370 
371 	while (len > 0) {
372 		data_len = min_t(int, len, urb->sg[nsgs].length);
373 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
374 				sg_page(&urb->sg[nsgs]),
375 				urb->sg[nsgs].offset,
376 				data_len, q->buf_size);
377 		len -= data_len;
378 		nsgs++;
379 	}
380 	dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
381 
382 	return nsgs;
383 }
384 
mt76u_complete_rx(struct urb * urb)385 static void mt76u_complete_rx(struct urb *urb)
386 {
387 	struct mt76_dev *dev = urb->context;
388 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
389 	unsigned long flags;
390 
391 	switch (urb->status) {
392 	case -ECONNRESET:
393 	case -ESHUTDOWN:
394 	case -ENOENT:
395 		return;
396 	default:
397 		dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
398 		/* fall through */
399 	case 0:
400 		break;
401 	}
402 
403 	spin_lock_irqsave(&q->lock, flags);
404 	if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
405 		goto out;
406 
407 	q->tail = (q->tail + 1) % q->ndesc;
408 	q->queued++;
409 	tasklet_schedule(&dev->usb.rx_tasklet);
410 out:
411 	spin_unlock_irqrestore(&q->lock, flags);
412 }
413 
mt76u_rx_tasklet(unsigned long data)414 static void mt76u_rx_tasklet(unsigned long data)
415 {
416 	struct mt76_dev *dev = (struct mt76_dev *)data;
417 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
418 	int err, nsgs, buf_len = q->buf_size;
419 	struct mt76u_buf *buf;
420 
421 	rcu_read_lock();
422 
423 	while (true) {
424 		buf = mt76u_get_next_rx_entry(q);
425 		if (!buf)
426 			break;
427 
428 		nsgs = mt76u_process_rx_entry(dev, buf->urb);
429 		if (nsgs > 0) {
430 			err = mt76u_fill_rx_sg(dev, buf, nsgs,
431 					       buf_len,
432 					       SKB_WITH_OVERHEAD(buf_len));
433 			if (err < 0)
434 				break;
435 		}
436 		mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
437 				 buf, GFP_ATOMIC,
438 				 mt76u_complete_rx, dev);
439 	}
440 	mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
441 
442 	rcu_read_unlock();
443 }
444 
mt76u_submit_rx_buffers(struct mt76_dev * dev)445 int mt76u_submit_rx_buffers(struct mt76_dev *dev)
446 {
447 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
448 	unsigned long flags;
449 	int i, err = 0;
450 
451 	spin_lock_irqsave(&q->lock, flags);
452 	for (i = 0; i < q->ndesc; i++) {
453 		err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
454 				       &q->entry[i].ubuf, GFP_ATOMIC,
455 				       mt76u_complete_rx, dev);
456 		if (err < 0)
457 			break;
458 	}
459 	q->head = q->tail = 0;
460 	q->queued = 0;
461 	spin_unlock_irqrestore(&q->lock, flags);
462 
463 	return err;
464 }
465 EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
466 
mt76u_alloc_rx(struct mt76_dev * dev)467 static int mt76u_alloc_rx(struct mt76_dev *dev)
468 {
469 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
470 	int i, err, nsgs;
471 
472 	spin_lock_init(&q->lock);
473 	q->entry = devm_kcalloc(dev->dev,
474 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
475 				GFP_KERNEL);
476 	if (!q->entry)
477 		return -ENOMEM;
478 
479 	if (mt76u_check_sg(dev)) {
480 		q->buf_size = MT_RX_BUF_SIZE;
481 		nsgs = MT_SG_MAX_SIZE;
482 	} else {
483 		q->buf_size = PAGE_SIZE;
484 		nsgs = 1;
485 	}
486 
487 	q->ndesc = MT_NUM_RX_ENTRIES;
488 	for (i = 0; i < q->ndesc; i++) {
489 		err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
490 				      nsgs, q->buf_size,
491 				      SKB_WITH_OVERHEAD(q->buf_size),
492 				      GFP_KERNEL);
493 		if (err < 0)
494 			return err;
495 	}
496 
497 	return mt76u_submit_rx_buffers(dev);
498 }
499 
mt76u_free_rx(struct mt76_dev * dev)500 static void mt76u_free_rx(struct mt76_dev *dev)
501 {
502 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
503 	int i;
504 
505 	for (i = 0; i < q->ndesc; i++)
506 		mt76u_buf_free(&q->entry[i].ubuf);
507 }
508 
mt76u_stop_rx(struct mt76_dev * dev)509 static void mt76u_stop_rx(struct mt76_dev *dev)
510 {
511 	struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
512 	int i;
513 
514 	for (i = 0; i < q->ndesc; i++)
515 		usb_kill_urb(q->entry[i].ubuf.urb);
516 }
517 
mt76u_skb_dma_info(struct sk_buff * skb,int port,u32 flags)518 int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
519 {
520 	struct sk_buff *iter, *last = skb;
521 	u32 info, pad;
522 
523 	/* Buffer layout:
524 	 *	|   4B   | xfer len |      pad       |  4B  |
525 	 *	| TXINFO | pkt/cmd  | zero pad to 4B | zero |
526 	 *
527 	 * length field of TXINFO should be set to 'xfer len'.
528 	 */
529 	info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
530 	       FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
531 	put_unaligned_le32(info, skb_push(skb, sizeof(info)));
532 
533 	pad = round_up(skb->len, 4) + 4 - skb->len;
534 	skb_walk_frags(skb, iter) {
535 		last = iter;
536 		if (!iter->next) {
537 			skb->data_len += pad;
538 			skb->len += pad;
539 			break;
540 		}
541 	}
542 
543 	if (unlikely(pad)) {
544 		if (__skb_pad(last, pad, true))
545 			return -ENOMEM;
546 		__skb_put(last, pad);
547 	}
548 	return 0;
549 }
550 EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
551 
mt76u_tx_tasklet(unsigned long data)552 static void mt76u_tx_tasklet(unsigned long data)
553 {
554 	struct mt76_dev *dev = (struct mt76_dev *)data;
555 	struct mt76u_buf *buf;
556 	struct mt76_queue *q;
557 	bool wake;
558 	int i;
559 
560 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
561 		q = &dev->q_tx[i];
562 
563 		spin_lock_bh(&q->lock);
564 		while (true) {
565 			buf = &q->entry[q->head].ubuf;
566 			if (!buf->done || !q->queued)
567 				break;
568 
569 			dev->drv->tx_complete_skb(dev, q,
570 						  &q->entry[q->head],
571 						  false);
572 
573 			if (q->entry[q->head].schedule) {
574 				q->entry[q->head].schedule = false;
575 				q->swq_queued--;
576 			}
577 
578 			q->head = (q->head + 1) % q->ndesc;
579 			q->queued--;
580 		}
581 		mt76_txq_schedule(dev, q);
582 		wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
583 		if (!q->queued)
584 			wake_up(&dev->tx_wait);
585 
586 		spin_unlock_bh(&q->lock);
587 
588 		if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
589 			ieee80211_queue_delayed_work(dev->hw,
590 						     &dev->usb.stat_work,
591 						     msecs_to_jiffies(10));
592 
593 		if (wake)
594 			ieee80211_wake_queue(dev->hw, i);
595 	}
596 }
597 
mt76u_tx_status_data(struct work_struct * work)598 static void mt76u_tx_status_data(struct work_struct *work)
599 {
600 	struct mt76_usb *usb;
601 	struct mt76_dev *dev;
602 	u8 update = 1;
603 	u16 count = 0;
604 
605 	usb = container_of(work, struct mt76_usb, stat_work.work);
606 	dev = container_of(usb, struct mt76_dev, usb);
607 
608 	while (true) {
609 		if (test_bit(MT76_REMOVED, &dev->state))
610 			break;
611 
612 		if (!dev->drv->tx_status_data(dev, &update))
613 			break;
614 		count++;
615 	}
616 
617 	if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
618 		ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
619 					     msecs_to_jiffies(10));
620 	else
621 		clear_bit(MT76_READING_STATS, &dev->state);
622 }
623 
mt76u_complete_tx(struct urb * urb)624 static void mt76u_complete_tx(struct urb *urb)
625 {
626 	struct mt76u_buf *buf = urb->context;
627 	struct mt76_dev *dev = buf->dev;
628 
629 	if (mt76u_urb_error(urb))
630 		dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
631 	buf->done = true;
632 
633 	tasklet_schedule(&dev->usb.tx_tasklet);
634 }
635 
636 static int
mt76u_tx_build_sg(struct sk_buff * skb,struct urb * urb)637 mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
638 {
639 	int nsgs = 1 + skb_shinfo(skb)->nr_frags;
640 	struct sk_buff *iter;
641 
642 	skb_walk_frags(skb, iter)
643 		nsgs += 1 + skb_shinfo(iter)->nr_frags;
644 
645 	memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
646 
647 	nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
648 	sg_init_marker(urb->sg, nsgs);
649 	urb->num_sgs = nsgs;
650 
651 	return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
652 }
653 
654 static int
mt76u_tx_queue_skb(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)655 mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
656 		   struct sk_buff *skb, struct mt76_wcid *wcid,
657 		   struct ieee80211_sta *sta)
658 {
659 	struct usb_interface *intf = to_usb_interface(dev->dev);
660 	struct usb_device *udev = interface_to_usbdev(intf);
661 	u8 ep = q2ep(q->hw_idx);
662 	struct mt76u_buf *buf;
663 	u16 idx = q->tail;
664 	unsigned int pipe;
665 	int err;
666 
667 	if (q->queued == q->ndesc)
668 		return -ENOSPC;
669 
670 	err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
671 	if (err < 0)
672 		return err;
673 
674 	buf = &q->entry[idx].ubuf;
675 	buf->done = false;
676 
677 	err = mt76u_tx_build_sg(skb, buf->urb);
678 	if (err < 0)
679 		return err;
680 
681 	pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
682 	usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
683 			  mt76u_complete_tx, buf);
684 
685 	q->tail = (q->tail + 1) % q->ndesc;
686 	q->entry[idx].skb = skb;
687 	q->queued++;
688 
689 	return idx;
690 }
691 
mt76u_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)692 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
693 {
694 	struct mt76u_buf *buf;
695 	int err;
696 
697 	while (q->first != q->tail) {
698 		buf = &q->entry[q->first].ubuf;
699 		err = usb_submit_urb(buf->urb, GFP_ATOMIC);
700 		if (err < 0) {
701 			if (err == -ENODEV)
702 				set_bit(MT76_REMOVED, &dev->state);
703 			else
704 				dev_err(dev->dev, "tx urb submit failed:%d\n",
705 					err);
706 			break;
707 		}
708 		q->first = (q->first + 1) % q->ndesc;
709 	}
710 }
711 
mt76u_alloc_tx(struct mt76_dev * dev)712 static int mt76u_alloc_tx(struct mt76_dev *dev)
713 {
714 	struct mt76u_buf *buf;
715 	struct mt76_queue *q;
716 	size_t size;
717 	int i, j;
718 
719 	size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
720 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
721 		q = &dev->q_tx[i];
722 		spin_lock_init(&q->lock);
723 		INIT_LIST_HEAD(&q->swq);
724 		q->hw_idx = q2hwq(i);
725 
726 		q->entry = devm_kcalloc(dev->dev,
727 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
728 					GFP_KERNEL);
729 		if (!q->entry)
730 			return -ENOMEM;
731 
732 		q->ndesc = MT_NUM_TX_ENTRIES;
733 		for (j = 0; j < q->ndesc; j++) {
734 			buf = &q->entry[j].ubuf;
735 			buf->dev = dev;
736 
737 			buf->urb = usb_alloc_urb(0, GFP_KERNEL);
738 			if (!buf->urb)
739 				return -ENOMEM;
740 
741 			buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
742 			if (!buf->urb->sg)
743 				return -ENOMEM;
744 		}
745 	}
746 	return 0;
747 }
748 
mt76u_free_tx(struct mt76_dev * dev)749 static void mt76u_free_tx(struct mt76_dev *dev)
750 {
751 	struct mt76_queue *q;
752 	int i, j;
753 
754 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
755 		q = &dev->q_tx[i];
756 		for (j = 0; j < q->ndesc; j++)
757 			usb_free_urb(q->entry[j].ubuf.urb);
758 	}
759 }
760 
mt76u_stop_tx(struct mt76_dev * dev)761 static void mt76u_stop_tx(struct mt76_dev *dev)
762 {
763 	struct mt76_queue *q;
764 	int i, j;
765 
766 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
767 		q = &dev->q_tx[i];
768 		for (j = 0; j < q->ndesc; j++)
769 			usb_kill_urb(q->entry[j].ubuf.urb);
770 	}
771 }
772 
mt76u_stop_queues(struct mt76_dev * dev)773 void mt76u_stop_queues(struct mt76_dev *dev)
774 {
775 	tasklet_disable(&dev->usb.rx_tasklet);
776 	tasklet_disable(&dev->usb.tx_tasklet);
777 
778 	mt76u_stop_rx(dev);
779 	mt76u_stop_tx(dev);
780 }
781 EXPORT_SYMBOL_GPL(mt76u_stop_queues);
782 
mt76u_stop_stat_wk(struct mt76_dev * dev)783 void mt76u_stop_stat_wk(struct mt76_dev *dev)
784 {
785 	cancel_delayed_work_sync(&dev->usb.stat_work);
786 	clear_bit(MT76_READING_STATS, &dev->state);
787 }
788 EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
789 
mt76u_queues_deinit(struct mt76_dev * dev)790 void mt76u_queues_deinit(struct mt76_dev *dev)
791 {
792 	mt76u_stop_queues(dev);
793 
794 	mt76u_free_rx(dev);
795 	mt76u_free_tx(dev);
796 }
797 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
798 
mt76u_alloc_queues(struct mt76_dev * dev)799 int mt76u_alloc_queues(struct mt76_dev *dev)
800 {
801 	int err;
802 
803 	err = mt76u_alloc_rx(dev);
804 	if (err < 0)
805 		return err;
806 
807 	return mt76u_alloc_tx(dev);
808 }
809 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
810 
811 static const struct mt76_queue_ops usb_queue_ops = {
812 	.tx_queue_skb = mt76u_tx_queue_skb,
813 	.kick = mt76u_tx_kick,
814 };
815 
mt76u_init(struct mt76_dev * dev,struct usb_interface * intf)816 int mt76u_init(struct mt76_dev *dev,
817 	       struct usb_interface *intf)
818 {
819 	static const struct mt76_bus_ops mt76u_ops = {
820 		.rr = mt76u_rr,
821 		.wr = mt76u_wr,
822 		.rmw = mt76u_rmw,
823 		.copy = mt76u_copy,
824 	};
825 	struct mt76_usb *usb = &dev->usb;
826 
827 	tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
828 	tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
829 	INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
830 	skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
831 
832 	init_completion(&usb->mcu.cmpl);
833 	mutex_init(&usb->mcu.mutex);
834 
835 	mutex_init(&usb->usb_ctrl_mtx);
836 	dev->bus = &mt76u_ops;
837 	dev->queue_ops = &usb_queue_ops;
838 
839 	return mt76u_set_endpoints(intf, usb);
840 }
841 EXPORT_SYMBOL_GPL(mt76u_init);
842 
843 MODULE_AUTHOR("Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>");
844 MODULE_LICENSE("Dual BSD/GPL");
845