• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Wireless Host Controller (WHC) qset management.
3  *
4  * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/uwb/umc.h>
22 #include <linux/usb.h>
23 
24 #include "../../wusbcore/wusbhc.h"
25 
26 #include "whcd.h"
27 
qset_alloc(struct whc * whc,gfp_t mem_flags)28 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
29 {
30 	struct whc_qset *qset;
31 	dma_addr_t dma;
32 
33 	qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
34 	if (qset == NULL)
35 		return NULL;
36 	memset(qset, 0, sizeof(struct whc_qset));
37 
38 	qset->qset_dma = dma;
39 	qset->whc = whc;
40 
41 	INIT_LIST_HEAD(&qset->list_node);
42 	INIT_LIST_HEAD(&qset->stds);
43 
44 	return qset;
45 }
46 
47 /**
48  * qset_fill_qh - fill the static endpoint state in a qset's QHead
49  * @qset: the qset whose QH needs initializing with static endpoint
50  *        state
51  * @urb:  an urb for a transfer to this endpoint
52  */
qset_fill_qh(struct whc * whc,struct whc_qset * qset,struct urb * urb)53 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
54 {
55 	struct usb_device *usb_dev = urb->dev;
56 	struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
57 	struct usb_wireless_ep_comp_descriptor *epcd;
58 	bool is_out;
59 	uint8_t phy_rate;
60 
61 	is_out = usb_pipeout(urb->pipe);
62 
63 	qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
64 
65 	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
66 	if (epcd) {
67 		qset->max_seq = epcd->bMaxSequence;
68 		qset->max_burst = epcd->bMaxBurst;
69 	} else {
70 		qset->max_seq = 2;
71 		qset->max_burst = 1;
72 	}
73 
74 	/*
75 	 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
76 	 * the maximum supported by the device for other endpoints
77 	 * (unless limited by the user).
78 	 */
79 	if (usb_pipecontrol(urb->pipe))
80 		phy_rate = UWB_PHY_RATE_53;
81 	else {
82 		uint16_t phy_rates;
83 
84 		phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
85 		phy_rate = fls(phy_rates) - 1;
86 		if (phy_rate > whc->wusbhc.phy_rate)
87 			phy_rate = whc->wusbhc.phy_rate;
88 	}
89 
90 	qset->qh.info1 = cpu_to_le32(
91 		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
92 		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
93 		| usb_pipe_to_qh_type(urb->pipe)
94 		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
95 		| QH_INFO1_MAX_PKT_LEN(qset->max_packet)
96 		);
97 	qset->qh.info2 = cpu_to_le32(
98 		QH_INFO2_BURST(qset->max_burst)
99 		| QH_INFO2_DBP(0)
100 		| QH_INFO2_MAX_COUNT(3)
101 		| QH_INFO2_MAX_RETRY(3)
102 		| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
103 		);
104 	/* FIXME: where can we obtain these Tx parameters from?  Why
105 	 * doesn't the chip know what Tx power to use? It knows the Rx
106 	 * strength and can presumably guess the Tx power required
107 	 * from that? */
108 	qset->qh.info3 = cpu_to_le32(
109 		QH_INFO3_TX_RATE(phy_rate)
110 		| QH_INFO3_TX_PWR(0) /* 0 == max power */
111 		);
112 
113 	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
114 }
115 
116 /**
117  * qset_clear - clear fields in a qset so it may be reinserted into a
118  * schedule.
119  *
120  * The sequence number and current window are not cleared (see
121  * qset_reset()).
122  */
qset_clear(struct whc * whc,struct whc_qset * qset)123 void qset_clear(struct whc *whc, struct whc_qset *qset)
124 {
125 	qset->td_start = qset->td_end = qset->ntds = 0;
126 
127 	qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
128 	qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
129 	qset->qh.err_count = 0;
130 	qset->qh.scratch[0] = 0;
131 	qset->qh.scratch[1] = 0;
132 	qset->qh.scratch[2] = 0;
133 
134 	memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
135 
136 	init_completion(&qset->remove_complete);
137 }
138 
139 /**
140  * qset_reset - reset endpoint state in a qset.
141  *
142  * Clears the sequence number and current window.  This qset must not
143  * be in the ASL or PZL.
144  */
qset_reset(struct whc * whc,struct whc_qset * qset)145 void qset_reset(struct whc *whc, struct whc_qset *qset)
146 {
147 	qset->reset = 0;
148 
149 	qset->qh.status &= ~QH_STATUS_SEQ_MASK;
150 	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
151 }
152 
153 /**
154  * get_qset - get the qset for an async endpoint
155  *
156  * A new qset is created if one does not already exist.
157  */
get_qset(struct whc * whc,struct urb * urb,gfp_t mem_flags)158 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
159 				 gfp_t mem_flags)
160 {
161 	struct whc_qset *qset;
162 
163 	qset = urb->ep->hcpriv;
164 	if (qset == NULL) {
165 		qset = qset_alloc(whc, mem_flags);
166 		if (qset == NULL)
167 			return NULL;
168 
169 		qset->ep = urb->ep;
170 		urb->ep->hcpriv = qset;
171 		qset_fill_qh(whc, qset, urb);
172 	}
173 	return qset;
174 }
175 
qset_remove_complete(struct whc * whc,struct whc_qset * qset)176 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
177 {
178 	qset->remove = 0;
179 	list_del_init(&qset->list_node);
180 	complete(&qset->remove_complete);
181 }
182 
183 /**
184  * qset_add_qtds - add qTDs for an URB to a qset
185  *
186  * Returns true if the list (ASL/PZL) must be updated because (for a
187  * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
188  */
qset_add_qtds(struct whc * whc,struct whc_qset * qset)189 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
190 {
191 	struct whc_std *std;
192 	enum whc_update update = 0;
193 
194 	list_for_each_entry(std, &qset->stds, list_node) {
195 		struct whc_qtd *qtd;
196 		uint32_t status;
197 
198 		if (qset->ntds >= WHCI_QSET_TD_MAX
199 		    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
200 			break;
201 
202 		if (std->qtd)
203 			continue; /* already has a qTD */
204 
205 		qtd = std->qtd = &qset->qtd[qset->td_end];
206 
207 		/* Fill in setup bytes for control transfers. */
208 		if (usb_pipecontrol(std->urb->pipe))
209 			memcpy(qtd->setup, std->urb->setup_packet, 8);
210 
211 		status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
212 
213 		if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
214 			status |= QTD_STS_LAST_PKT;
215 
216 		/*
217 		 * For an IN transfer the iAlt field should be set so
218 		 * the h/w will automatically advance to the next
219 		 * transfer. However, if there are 8 or more TDs
220 		 * remaining in this transfer then iAlt cannot be set
221 		 * as it could point to somewhere in this transfer.
222 		 */
223 		if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
224 			int ialt;
225 			ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
226 			status |= QTD_STS_IALT(ialt);
227 		} else if (usb_pipein(std->urb->pipe))
228 			qset->pause_after_urb = std->urb;
229 
230 		if (std->num_pointers)
231 			qtd->options = cpu_to_le32(QTD_OPT_IOC);
232 		else
233 			qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
234 		qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
235 
236 		qtd->status = cpu_to_le32(status);
237 
238 		if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
239 			update = WHC_UPDATE_UPDATED;
240 
241 		if (++qset->td_end >= WHCI_QSET_TD_MAX)
242 			qset->td_end = 0;
243 		qset->ntds++;
244 	}
245 
246 	return update;
247 }
248 
249 /**
250  * qset_remove_qtd - remove the first qTD from a qset.
251  *
252  * The qTD might be still active (if it's part of a IN URB that
253  * resulted in a short read) so ensure it's deactivated.
254  */
qset_remove_qtd(struct whc * whc,struct whc_qset * qset)255 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
256 {
257 	qset->qtd[qset->td_start].status = 0;
258 
259 	if (++qset->td_start >= WHCI_QSET_TD_MAX)
260 		qset->td_start = 0;
261 	qset->ntds--;
262 }
263 
qset_copy_bounce_to_sg(struct whc * whc,struct whc_std * std)264 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
265 {
266 	struct scatterlist *sg;
267 	void *bounce;
268 	size_t remaining, offset;
269 
270 	bounce = std->bounce_buf;
271 	remaining = std->len;
272 
273 	sg = std->bounce_sg;
274 	offset = std->bounce_offset;
275 
276 	while (remaining) {
277 		size_t len;
278 
279 		len = min(sg->length - offset, remaining);
280 		memcpy(sg_virt(sg) + offset, bounce, len);
281 
282 		bounce += len;
283 		remaining -= len;
284 
285 		offset += len;
286 		if (offset >= sg->length) {
287 			sg = sg_next(sg);
288 			offset = 0;
289 		}
290 	}
291 
292 }
293 
294 /**
295  * qset_free_std - remove an sTD and free it.
296  * @whc: the WHCI host controller
297  * @std: the sTD to remove and free.
298  */
qset_free_std(struct whc * whc,struct whc_std * std)299 void qset_free_std(struct whc *whc, struct whc_std *std)
300 {
301 	list_del(&std->list_node);
302 	if (std->bounce_buf) {
303 		bool is_out = usb_pipeout(std->urb->pipe);
304 		dma_addr_t dma_addr;
305 
306 		if (std->num_pointers)
307 			dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
308 		else
309 			dma_addr = std->dma_addr;
310 
311 		dma_unmap_single(whc->wusbhc.dev, dma_addr,
312 				 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
313 		if (!is_out)
314 			qset_copy_bounce_to_sg(whc, std);
315 		kfree(std->bounce_buf);
316 	}
317 	if (std->pl_virt) {
318 		if (std->dma_addr)
319 			dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
320 					 std->num_pointers * sizeof(struct whc_page_list_entry),
321 					 DMA_TO_DEVICE);
322 		kfree(std->pl_virt);
323 		std->pl_virt = NULL;
324 	}
325 	kfree(std);
326 }
327 
328 /**
329  * qset_remove_qtds - remove an URB's qTDs (and sTDs).
330  */
qset_remove_qtds(struct whc * whc,struct whc_qset * qset,struct urb * urb)331 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
332 			     struct urb *urb)
333 {
334 	struct whc_std *std, *t;
335 
336 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
337 		if (std->urb != urb)
338 			break;
339 		if (std->qtd != NULL)
340 			qset_remove_qtd(whc, qset);
341 		qset_free_std(whc, std);
342 	}
343 }
344 
345 /**
346  * qset_free_stds - free any remaining sTDs for an URB.
347  */
qset_free_stds(struct whc_qset * qset,struct urb * urb)348 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
349 {
350 	struct whc_std *std, *t;
351 
352 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
353 		if (std->urb == urb)
354 			qset_free_std(qset->whc, std);
355 	}
356 }
357 
qset_fill_page_list(struct whc * whc,struct whc_std * std,gfp_t mem_flags)358 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
359 {
360 	dma_addr_t dma_addr = std->dma_addr;
361 	dma_addr_t sp, ep;
362 	size_t pl_len;
363 	int p;
364 
365 	/* Short buffers don't need a page list. */
366 	if (std->len <= WHCI_PAGE_SIZE) {
367 		std->num_pointers = 0;
368 		return 0;
369 	}
370 
371 	sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
372 	ep = dma_addr + std->len;
373 	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
374 
375 	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
376 	std->pl_virt = kmalloc(pl_len, mem_flags);
377 	if (std->pl_virt == NULL)
378 		return -ENOMEM;
379 	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
380 	if (dma_mapping_error(whc->wusbhc.dev, std->dma_addr)) {
381 		kfree(std->pl_virt);
382 		return -EFAULT;
383 	}
384 
385 	for (p = 0; p < std->num_pointers; p++) {
386 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
387 		dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
388 	}
389 
390 	return 0;
391 }
392 
393 /**
394  * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
395  */
urb_dequeue_work(struct work_struct * work)396 static void urb_dequeue_work(struct work_struct *work)
397 {
398 	struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
399 	struct whc_qset *qset = wurb->qset;
400 	struct whc *whc = qset->whc;
401 	unsigned long flags;
402 
403 	if (wurb->is_async == true)
404 		asl_update(whc, WUSBCMD_ASYNC_UPDATED
405 			   | WUSBCMD_ASYNC_SYNCED_DB
406 			   | WUSBCMD_ASYNC_QSET_RM);
407 	else
408 		pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
409 			   | WUSBCMD_PERIODIC_SYNCED_DB
410 			   | WUSBCMD_PERIODIC_QSET_RM);
411 
412 	spin_lock_irqsave(&whc->lock, flags);
413 	qset_remove_urb(whc, qset, wurb->urb, wurb->status);
414 	spin_unlock_irqrestore(&whc->lock, flags);
415 }
416 
qset_new_std(struct whc * whc,struct whc_qset * qset,struct urb * urb,gfp_t mem_flags)417 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
418 				    struct urb *urb, gfp_t mem_flags)
419 {
420 	struct whc_std *std;
421 
422 	std = kzalloc(sizeof(struct whc_std), mem_flags);
423 	if (std == NULL)
424 		return NULL;
425 
426 	std->urb = urb;
427 	std->qtd = NULL;
428 
429 	INIT_LIST_HEAD(&std->list_node);
430 	list_add_tail(&std->list_node, &qset->stds);
431 
432 	return std;
433 }
434 
qset_add_urb_sg(struct whc * whc,struct whc_qset * qset,struct urb * urb,gfp_t mem_flags)435 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
436 			   gfp_t mem_flags)
437 {
438 	size_t remaining;
439 	struct scatterlist *sg;
440 	int i;
441 	int ntds = 0;
442 	struct whc_std *std = NULL;
443 	struct whc_page_list_entry *new_pl_virt;
444 	dma_addr_t prev_end = 0;
445 	size_t pl_len;
446 	int p = 0;
447 
448 	remaining = urb->transfer_buffer_length;
449 
450 	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
451 		dma_addr_t dma_addr;
452 		size_t dma_remaining;
453 		dma_addr_t sp, ep;
454 		int num_pointers;
455 
456 		if (remaining == 0) {
457 			break;
458 		}
459 
460 		dma_addr = sg_dma_address(sg);
461 		dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
462 
463 		while (dma_remaining) {
464 			size_t dma_len;
465 
466 			/*
467 			 * We can use the previous std (if it exists) provided that:
468 			 * - the previous one ended on a page boundary.
469 			 * - the current one begins on a page boundary.
470 			 * - the previous one isn't full.
471 			 *
472 			 * If a new std is needed but the previous one
473 			 * was not a whole number of packets then this
474 			 * sg list cannot be mapped onto multiple
475 			 * qTDs.  Return an error and let the caller
476 			 * sort it out.
477 			 */
478 			if (!std
479 			    || (prev_end & (WHCI_PAGE_SIZE-1))
480 			    || (dma_addr & (WHCI_PAGE_SIZE-1))
481 			    || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
482 				if (std && std->len % qset->max_packet != 0)
483 					return -EINVAL;
484 				std = qset_new_std(whc, qset, urb, mem_flags);
485 				if (std == NULL) {
486 					return -ENOMEM;
487 				}
488 				ntds++;
489 				p = 0;
490 			}
491 
492 			dma_len = dma_remaining;
493 
494 			/*
495 			 * If the remainder of this element doesn't
496 			 * fit in a single qTD, limit the qTD to a
497 			 * whole number of packets.  This allows the
498 			 * remainder to go into the next qTD.
499 			 */
500 			if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
501 				dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
502 					* qset->max_packet - std->len;
503 			}
504 
505 			std->len += dma_len;
506 			std->ntds_remaining = -1; /* filled in later */
507 
508 			sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
509 			ep = dma_addr + dma_len;
510 			num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
511 			std->num_pointers += num_pointers;
512 
513 			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
514 
515 			new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
516 			if (new_pl_virt == NULL) {
517 				kfree(std->pl_virt);
518 				std->pl_virt = NULL;
519 				return -ENOMEM;
520 			}
521 			std->pl_virt = new_pl_virt;
522 
523 			for (;p < std->num_pointers; p++) {
524 				std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
525 				dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
526 			}
527 
528 			prev_end = dma_addr = ep;
529 			dma_remaining -= dma_len;
530 			remaining -= dma_len;
531 		}
532 	}
533 
534 	/* Now the number of stds is know, go back and fill in
535 	   std->ntds_remaining. */
536 	list_for_each_entry(std, &qset->stds, list_node) {
537 		if (std->ntds_remaining == -1) {
538 			pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
539 			std->ntds_remaining = ntds--;
540 			std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
541 						       pl_len, DMA_TO_DEVICE);
542 		}
543 	}
544 	return 0;
545 }
546 
547 /**
548  * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
549  *
550  * If the URB contains an sg list whose elements cannot be directly
551  * mapped to qTDs then the data must be transferred via bounce
552  * buffers.
553  */
qset_add_urb_sg_linearize(struct whc * whc,struct whc_qset * qset,struct urb * urb,gfp_t mem_flags)554 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
555 				     struct urb *urb, gfp_t mem_flags)
556 {
557 	bool is_out = usb_pipeout(urb->pipe);
558 	size_t max_std_len;
559 	size_t remaining;
560 	int ntds = 0;
561 	struct whc_std *std = NULL;
562 	void *bounce = NULL;
563 	struct scatterlist *sg;
564 	int i;
565 
566 	/* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
567 	max_std_len = qset->max_burst * qset->max_packet;
568 
569 	remaining = urb->transfer_buffer_length;
570 
571 	for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
572 		size_t len;
573 		size_t sg_remaining;
574 		void *orig;
575 
576 		if (remaining == 0) {
577 			break;
578 		}
579 
580 		sg_remaining = min_t(size_t, remaining, sg->length);
581 		orig = sg_virt(sg);
582 
583 		while (sg_remaining) {
584 			if (!std || std->len == max_std_len) {
585 				std = qset_new_std(whc, qset, urb, mem_flags);
586 				if (std == NULL)
587 					return -ENOMEM;
588 				std->bounce_buf = kmalloc(max_std_len, mem_flags);
589 				if (std->bounce_buf == NULL)
590 					return -ENOMEM;
591 				std->bounce_sg = sg;
592 				std->bounce_offset = orig - sg_virt(sg);
593 				bounce = std->bounce_buf;
594 				ntds++;
595 			}
596 
597 			len = min(sg_remaining, max_std_len - std->len);
598 
599 			if (is_out)
600 				memcpy(bounce, orig, len);
601 
602 			std->len += len;
603 			std->ntds_remaining = -1; /* filled in later */
604 
605 			bounce += len;
606 			orig += len;
607 			sg_remaining -= len;
608 			remaining -= len;
609 		}
610 	}
611 
612 	/*
613 	 * For each of the new sTDs, map the bounce buffers, create
614 	 * page lists (if necessary), and fill in std->ntds_remaining.
615 	 */
616 	list_for_each_entry(std, &qset->stds, list_node) {
617 		if (std->ntds_remaining != -1)
618 			continue;
619 
620 		std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
621 					       is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
622 
623 		if (qset_fill_page_list(whc, std, mem_flags) < 0)
624 			return -ENOMEM;
625 
626 		std->ntds_remaining = ntds--;
627 	}
628 
629 	return 0;
630 }
631 
632 /**
633  * qset_add_urb - add an urb to the qset's queue.
634  *
635  * The URB is chopped into sTDs, one for each qTD that will required.
636  * At least one qTD (and sTD) is required even if the transfer has no
637  * data (e.g., for some control transfers).
638  */
qset_add_urb(struct whc * whc,struct whc_qset * qset,struct urb * urb,gfp_t mem_flags)639 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
640 	gfp_t mem_flags)
641 {
642 	struct whc_urb *wurb;
643 	int remaining = urb->transfer_buffer_length;
644 	u64 transfer_dma = urb->transfer_dma;
645 	int ntds_remaining;
646 	int ret;
647 
648 	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
649 	if (wurb == NULL)
650 		goto err_no_mem;
651 	urb->hcpriv = wurb;
652 	wurb->qset = qset;
653 	wurb->urb = urb;
654 	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
655 
656 	if (urb->num_sgs) {
657 		ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
658 		if (ret == -EINVAL) {
659 			qset_free_stds(qset, urb);
660 			ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
661 		}
662 		if (ret < 0)
663 			goto err_no_mem;
664 		return 0;
665 	}
666 
667 	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
668 	if (ntds_remaining == 0)
669 		ntds_remaining = 1;
670 
671 	while (ntds_remaining) {
672 		struct whc_std *std;
673 		size_t std_len;
674 
675 		std_len = remaining;
676 		if (std_len > QTD_MAX_XFER_SIZE)
677 			std_len = QTD_MAX_XFER_SIZE;
678 
679 		std = qset_new_std(whc, qset, urb, mem_flags);
680 		if (std == NULL)
681 			goto err_no_mem;
682 
683 		std->dma_addr = transfer_dma;
684 		std->len = std_len;
685 		std->ntds_remaining = ntds_remaining;
686 
687 		if (qset_fill_page_list(whc, std, mem_flags) < 0)
688 			goto err_no_mem;
689 
690 		ntds_remaining--;
691 		remaining -= std_len;
692 		transfer_dma += std_len;
693 	}
694 
695 	return 0;
696 
697 err_no_mem:
698 	qset_free_stds(qset, urb);
699 	return -ENOMEM;
700 }
701 
702 /**
703  * qset_remove_urb - remove an URB from the urb queue.
704  *
705  * The URB is returned to the USB subsystem.
706  */
qset_remove_urb(struct whc * whc,struct whc_qset * qset,struct urb * urb,int status)707 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
708 			    struct urb *urb, int status)
709 {
710 	struct wusbhc *wusbhc = &whc->wusbhc;
711 	struct whc_urb *wurb = urb->hcpriv;
712 
713 	usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
714 	/* Drop the lock as urb->complete() may enqueue another urb. */
715 	spin_unlock(&whc->lock);
716 	wusbhc_giveback_urb(wusbhc, urb, status);
717 	spin_lock(&whc->lock);
718 
719 	kfree(wurb);
720 }
721 
722 /**
723  * get_urb_status_from_qtd - get the completed urb status from qTD status
724  * @urb:    completed urb
725  * @status: qTD status
726  */
get_urb_status_from_qtd(struct urb * urb,u32 status)727 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
728 {
729 	if (status & QTD_STS_HALTED) {
730 		if (status & QTD_STS_DBE)
731 			return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
732 		else if (status & QTD_STS_BABBLE)
733 			return -EOVERFLOW;
734 		else if (status & QTD_STS_RCE)
735 			return -ETIME;
736 		return -EPIPE;
737 	}
738 	if (usb_pipein(urb->pipe)
739 	    && (urb->transfer_flags & URB_SHORT_NOT_OK)
740 	    && urb->actual_length < urb->transfer_buffer_length)
741 		return -EREMOTEIO;
742 	return 0;
743 }
744 
745 /**
746  * process_inactive_qtd - process an inactive (but not halted) qTD.
747  *
748  * Update the urb with the transfer bytes from the qTD, if the urb is
749  * completely transferred or (in the case of an IN only) the LPF is
750  * set, then the transfer is complete and the urb should be returned
751  * to the system.
752  */
process_inactive_qtd(struct whc * whc,struct whc_qset * qset,struct whc_qtd * qtd)753 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
754 				 struct whc_qtd *qtd)
755 {
756 	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
757 	struct urb *urb = std->urb;
758 	uint32_t status;
759 	bool complete;
760 
761 	status = le32_to_cpu(qtd->status);
762 
763 	urb->actual_length += std->len - QTD_STS_TO_LEN(status);
764 
765 	if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
766 		complete = true;
767 	else
768 		complete = whc_std_last(std);
769 
770 	qset_remove_qtd(whc, qset);
771 	qset_free_std(whc, std);
772 
773 	/*
774 	 * Transfers for this URB are complete?  Then return it to the
775 	 * USB subsystem.
776 	 */
777 	if (complete) {
778 		qset_remove_qtds(whc, qset, urb);
779 		qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
780 
781 		/*
782 		 * If iAlt isn't valid then the hardware didn't
783 		 * advance iCur. Adjust the start and end pointers to
784 		 * match iCur.
785 		 */
786 		if (!(status & QTD_STS_IALT_VALID))
787 			qset->td_start = qset->td_end
788 				= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
789 		qset->pause_after_urb = NULL;
790 	}
791 }
792 
793 /**
794  * process_halted_qtd - process a qset with a halted qtd
795  *
796  * Remove all the qTDs for the failed URB and return the failed URB to
797  * the USB subsystem.  Then remove all other qTDs so the qset can be
798  * removed.
799  *
800  * FIXME: this is the point where rate adaptation can be done.  If a
801  * transfer failed because it exceeded the maximum number of retries
802  * then it could be reactivated with a slower rate without having to
803  * remove the qset.
804  */
process_halted_qtd(struct whc * whc,struct whc_qset * qset,struct whc_qtd * qtd)805 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
806 			       struct whc_qtd *qtd)
807 {
808 	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
809 	struct urb *urb = std->urb;
810 	int urb_status;
811 
812 	urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
813 
814 	qset_remove_qtds(whc, qset, urb);
815 	qset_remove_urb(whc, qset, urb, urb_status);
816 
817 	list_for_each_entry(std, &qset->stds, list_node) {
818 		if (qset->ntds == 0)
819 			break;
820 		qset_remove_qtd(whc, qset);
821 		std->qtd = NULL;
822 	}
823 
824 	qset->remove = 1;
825 }
826 
qset_free(struct whc * whc,struct whc_qset * qset)827 void qset_free(struct whc *whc, struct whc_qset *qset)
828 {
829 	dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
830 }
831 
832 /**
833  * qset_delete - wait for a qset to be unused, then free it.
834  */
qset_delete(struct whc * whc,struct whc_qset * qset)835 void qset_delete(struct whc *whc, struct whc_qset *qset)
836 {
837 	wait_for_completion(&qset->remove_complete);
838 	qset_free(whc, qset);
839 }
840