• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Wireless Host Controller (WHC) qset management.
3  *
4  * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/uwb/umc.h>
21 #include <linux/usb.h>
22 
23 #include "../../wusbcore/wusbhc.h"
24 
25 #include "whcd.h"
26 
qset_alloc(struct whc * whc,gfp_t mem_flags)27 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
28 {
29 	struct whc_qset *qset;
30 	dma_addr_t dma;
31 
32 	qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
33 	if (qset == NULL)
34 		return NULL;
35 	memset(qset, 0, sizeof(struct whc_qset));
36 
37 	qset->qset_dma = dma;
38 	qset->whc = whc;
39 
40 	INIT_LIST_HEAD(&qset->list_node);
41 	INIT_LIST_HEAD(&qset->stds);
42 
43 	return qset;
44 }
45 
46 /**
47  * qset_fill_qh - fill the static endpoint state in a qset's QHead
48  * @qset: the qset whose QH needs initializing with static endpoint
49  *        state
50  * @urb:  an urb for a transfer to this endpoint
51  */
qset_fill_qh(struct whc_qset * qset,struct urb * urb)52 static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
53 {
54 	struct usb_device *usb_dev = urb->dev;
55 	struct usb_wireless_ep_comp_descriptor *epcd;
56 	bool is_out;
57 
58 	is_out = usb_pipeout(urb->pipe);
59 
60 	epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
61 
62 	if (epcd) {
63 		qset->max_seq = epcd->bMaxSequence;
64 		qset->max_burst = epcd->bMaxBurst;
65 	} else {
66 		qset->max_seq = 2;
67 		qset->max_burst = 1;
68 	}
69 
70 	qset->qh.info1 = cpu_to_le32(
71 		QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
72 		| (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
73 		| usb_pipe_to_qh_type(urb->pipe)
74 		| QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
75 		| QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
76 		);
77 	qset->qh.info2 = cpu_to_le32(
78 		QH_INFO2_BURST(qset->max_burst)
79 		| QH_INFO2_DBP(0)
80 		| QH_INFO2_MAX_COUNT(3)
81 		| QH_INFO2_MAX_RETRY(3)
82 		| QH_INFO2_MAX_SEQ(qset->max_seq - 1)
83 		);
84 	/* FIXME: where can we obtain these Tx parameters from?  Why
85 	 * doesn't the chip know what Tx power to use? It knows the Rx
86 	 * strength and can presumably guess the Tx power required
87 	 * from that? */
88 	qset->qh.info3 = cpu_to_le32(
89 		QH_INFO3_TX_RATE_53_3
90 		| QH_INFO3_TX_PWR(0) /* 0 == max power */
91 		);
92 }
93 
94 /**
95  * qset_clear - clear fields in a qset so it may be reinserted into a
96  * schedule
97  */
qset_clear(struct whc * whc,struct whc_qset * qset)98 void qset_clear(struct whc *whc, struct whc_qset *qset)
99 {
100 	qset->td_start = qset->td_end = qset->ntds = 0;
101 	qset->remove = 0;
102 
103 	qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
104 	qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
105 	qset->qh.err_count = 0;
106 	qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
107 	qset->qh.scratch[0] = 0;
108 	qset->qh.scratch[1] = 0;
109 	qset->qh.scratch[2] = 0;
110 
111 	memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
112 
113 	init_completion(&qset->remove_complete);
114 }
115 
116 /**
117  * get_qset - get the qset for an async endpoint
118  *
119  * A new qset is created if one does not already exist.
120  */
get_qset(struct whc * whc,struct urb * urb,gfp_t mem_flags)121 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
122 				 gfp_t mem_flags)
123 {
124 	struct whc_qset *qset;
125 
126 	qset = urb->ep->hcpriv;
127 	if (qset == NULL) {
128 		qset = qset_alloc(whc, mem_flags);
129 		if (qset == NULL)
130 			return NULL;
131 
132 		qset->ep = urb->ep;
133 		urb->ep->hcpriv = qset;
134 		qset_fill_qh(qset, urb);
135 	}
136 	return qset;
137 }
138 
qset_remove_complete(struct whc * whc,struct whc_qset * qset)139 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
140 {
141 	list_del_init(&qset->list_node);
142 	complete(&qset->remove_complete);
143 }
144 
145 /**
146  * qset_add_qtds - add qTDs for an URB to a qset
147  *
148  * Returns true if the list (ASL/PZL) must be updated because (for a
149  * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
150  */
qset_add_qtds(struct whc * whc,struct whc_qset * qset)151 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
152 {
153 	struct whc_std *std;
154 	enum whc_update update = 0;
155 
156 	list_for_each_entry(std, &qset->stds, list_node) {
157 		struct whc_qtd *qtd;
158 		uint32_t status;
159 
160 		if (qset->ntds >= WHCI_QSET_TD_MAX
161 		    || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
162 			break;
163 
164 		if (std->qtd)
165 			continue; /* already has a qTD */
166 
167 		qtd = std->qtd = &qset->qtd[qset->td_end];
168 
169 		/* Fill in setup bytes for control transfers. */
170 		if (usb_pipecontrol(std->urb->pipe))
171 			memcpy(qtd->setup, std->urb->setup_packet, 8);
172 
173 		status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
174 
175 		if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
176 			status |= QTD_STS_LAST_PKT;
177 
178 		/*
179 		 * For an IN transfer the iAlt field should be set so
180 		 * the h/w will automatically advance to the next
181 		 * transfer. However, if there are 8 or more TDs
182 		 * remaining in this transfer then iAlt cannot be set
183 		 * as it could point to somewhere in this transfer.
184 		 */
185 		if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
186 			int ialt;
187 			ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
188 			status |= QTD_STS_IALT(ialt);
189 		} else if (usb_pipein(std->urb->pipe))
190 			qset->pause_after_urb = std->urb;
191 
192 		if (std->num_pointers)
193 			qtd->options = cpu_to_le32(QTD_OPT_IOC);
194 		else
195 			qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
196 		qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
197 
198 		qtd->status = cpu_to_le32(status);
199 
200 		if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
201 			update = WHC_UPDATE_UPDATED;
202 
203 		if (++qset->td_end >= WHCI_QSET_TD_MAX)
204 			qset->td_end = 0;
205 		qset->ntds++;
206 	}
207 
208 	return update;
209 }
210 
211 /**
212  * qset_remove_qtd - remove the first qTD from a qset.
213  *
214  * The qTD might be still active (if it's part of a IN URB that
215  * resulted in a short read) so ensure it's deactivated.
216  */
qset_remove_qtd(struct whc * whc,struct whc_qset * qset)217 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
218 {
219 	qset->qtd[qset->td_start].status = 0;
220 
221 	if (++qset->td_start >= WHCI_QSET_TD_MAX)
222 		qset->td_start = 0;
223 	qset->ntds--;
224 }
225 
226 /**
227  * qset_free_std - remove an sTD and free it.
228  * @whc: the WHCI host controller
229  * @std: the sTD to remove and free.
230  */
qset_free_std(struct whc * whc,struct whc_std * std)231 void qset_free_std(struct whc *whc, struct whc_std *std)
232 {
233 	list_del(&std->list_node);
234 	if (std->num_pointers) {
235 		dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
236 				 std->num_pointers * sizeof(struct whc_page_list_entry),
237 				 DMA_TO_DEVICE);
238 		kfree(std->pl_virt);
239 	}
240 
241 	kfree(std);
242 }
243 
244 /**
245  * qset_remove_qtds - remove an URB's qTDs (and sTDs).
246  */
qset_remove_qtds(struct whc * whc,struct whc_qset * qset,struct urb * urb)247 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
248 			     struct urb *urb)
249 {
250 	struct whc_std *std, *t;
251 
252 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
253 		if (std->urb != urb)
254 			break;
255 		if (std->qtd != NULL)
256 			qset_remove_qtd(whc, qset);
257 		qset_free_std(whc, std);
258 	}
259 }
260 
261 /**
262  * qset_free_stds - free any remaining sTDs for an URB.
263  */
qset_free_stds(struct whc_qset * qset,struct urb * urb)264 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
265 {
266 	struct whc_std *std, *t;
267 
268 	list_for_each_entry_safe(std, t, &qset->stds, list_node) {
269 		if (std->urb == urb)
270 			qset_free_std(qset->whc, std);
271 	}
272 }
273 
qset_fill_page_list(struct whc * whc,struct whc_std * std,gfp_t mem_flags)274 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
275 {
276 	dma_addr_t dma_addr = std->dma_addr;
277 	dma_addr_t sp, ep;
278 	size_t std_len = std->len;
279 	size_t pl_len;
280 	int p;
281 
282 	sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
283 	ep = dma_addr + std_len;
284 	std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
285 
286 	pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
287 	std->pl_virt = kmalloc(pl_len, mem_flags);
288 	if (std->pl_virt == NULL)
289 		return -ENOMEM;
290 	std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
291 
292 	for (p = 0; p < std->num_pointers; p++) {
293 		std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
294 		dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
295 	}
296 
297 	return 0;
298 }
299 
300 /**
301  * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
302  */
urb_dequeue_work(struct work_struct * work)303 static void urb_dequeue_work(struct work_struct *work)
304 {
305 	struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
306 	struct whc_qset *qset = wurb->qset;
307 	struct whc *whc = qset->whc;
308 	unsigned long flags;
309 
310 	if (wurb->is_async == true)
311 		asl_update(whc, WUSBCMD_ASYNC_UPDATED
312 			   | WUSBCMD_ASYNC_SYNCED_DB
313 			   | WUSBCMD_ASYNC_QSET_RM);
314 	else
315 		pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
316 			   | WUSBCMD_PERIODIC_SYNCED_DB
317 			   | WUSBCMD_PERIODIC_QSET_RM);
318 
319 	spin_lock_irqsave(&whc->lock, flags);
320 	qset_remove_urb(whc, qset, wurb->urb, wurb->status);
321 	spin_unlock_irqrestore(&whc->lock, flags);
322 }
323 
324 /**
325  * qset_add_urb - add an urb to the qset's queue.
326  *
327  * The URB is chopped into sTDs, one for each qTD that will required.
328  * At least one qTD (and sTD) is required even if the transfer has no
329  * data (e.g., for some control transfers).
330  */
qset_add_urb(struct whc * whc,struct whc_qset * qset,struct urb * urb,gfp_t mem_flags)331 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
332 	gfp_t mem_flags)
333 {
334 	struct whc_urb *wurb;
335 	int remaining = urb->transfer_buffer_length;
336 	u64 transfer_dma = urb->transfer_dma;
337 	int ntds_remaining;
338 
339 	ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
340 	if (ntds_remaining == 0)
341 		ntds_remaining = 1;
342 
343 	wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
344 	if (wurb == NULL)
345 		goto err_no_mem;
346 	urb->hcpriv = wurb;
347 	wurb->qset = qset;
348 	wurb->urb = urb;
349 	INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
350 
351 	while (ntds_remaining) {
352 		struct whc_std *std;
353 		size_t std_len;
354 
355 		std = kmalloc(sizeof(struct whc_std), mem_flags);
356 		if (std == NULL)
357 			goto err_no_mem;
358 
359 		std_len = remaining;
360 		if (std_len > QTD_MAX_XFER_SIZE)
361 			std_len = QTD_MAX_XFER_SIZE;
362 
363 		std->urb = urb;
364 		std->dma_addr = transfer_dma;
365 		std->len = std_len;
366 		std->ntds_remaining = ntds_remaining;
367 		std->qtd = NULL;
368 
369 		INIT_LIST_HEAD(&std->list_node);
370 		list_add_tail(&std->list_node, &qset->stds);
371 
372 		if (std_len > WHCI_PAGE_SIZE) {
373 			if (qset_fill_page_list(whc, std, mem_flags) < 0)
374 				goto err_no_mem;
375 		} else
376 			std->num_pointers = 0;
377 
378 		ntds_remaining--;
379 		remaining -= std_len;
380 		transfer_dma += std_len;
381 	}
382 
383 	return 0;
384 
385 err_no_mem:
386 	qset_free_stds(qset, urb);
387 	return -ENOMEM;
388 }
389 
390 /**
391  * qset_remove_urb - remove an URB from the urb queue.
392  *
393  * The URB is returned to the USB subsystem.
394  */
qset_remove_urb(struct whc * whc,struct whc_qset * qset,struct urb * urb,int status)395 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
396 			    struct urb *urb, int status)
397 {
398 	struct wusbhc *wusbhc = &whc->wusbhc;
399 	struct whc_urb *wurb = urb->hcpriv;
400 
401 	usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
402 	/* Drop the lock as urb->complete() may enqueue another urb. */
403 	spin_unlock(&whc->lock);
404 	wusbhc_giveback_urb(wusbhc, urb, status);
405 	spin_lock(&whc->lock);
406 
407 	kfree(wurb);
408 }
409 
410 /**
411  * get_urb_status_from_qtd - get the completed urb status from qTD status
412  * @urb:    completed urb
413  * @status: qTD status
414  */
get_urb_status_from_qtd(struct urb * urb,u32 status)415 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
416 {
417 	if (status & QTD_STS_HALTED) {
418 		if (status & QTD_STS_DBE)
419 			return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
420 		else if (status & QTD_STS_BABBLE)
421 			return -EOVERFLOW;
422 		else if (status & QTD_STS_RCE)
423 			return -ETIME;
424 		return -EPIPE;
425 	}
426 	if (usb_pipein(urb->pipe)
427 	    && (urb->transfer_flags & URB_SHORT_NOT_OK)
428 	    && urb->actual_length < urb->transfer_buffer_length)
429 		return -EREMOTEIO;
430 	return 0;
431 }
432 
433 /**
434  * process_inactive_qtd - process an inactive (but not halted) qTD.
435  *
436  * Update the urb with the transfer bytes from the qTD, if the urb is
437  * completely transfered or (in the case of an IN only) the LPF is
438  * set, then the transfer is complete and the urb should be returned
439  * to the system.
440  */
process_inactive_qtd(struct whc * whc,struct whc_qset * qset,struct whc_qtd * qtd)441 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
442 				 struct whc_qtd *qtd)
443 {
444 	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
445 	struct urb *urb = std->urb;
446 	uint32_t status;
447 	bool complete;
448 
449 	status = le32_to_cpu(qtd->status);
450 
451 	urb->actual_length += std->len - QTD_STS_TO_LEN(status);
452 
453 	if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
454 		complete = true;
455 	else
456 		complete = whc_std_last(std);
457 
458 	qset_remove_qtd(whc, qset);
459 	qset_free_std(whc, std);
460 
461 	/*
462 	 * Transfers for this URB are complete?  Then return it to the
463 	 * USB subsystem.
464 	 */
465 	if (complete) {
466 		qset_remove_qtds(whc, qset, urb);
467 		qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
468 
469 		/*
470 		 * If iAlt isn't valid then the hardware didn't
471 		 * advance iCur. Adjust the start and end pointers to
472 		 * match iCur.
473 		 */
474 		if (!(status & QTD_STS_IALT_VALID))
475 			qset->td_start = qset->td_end
476 				= QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
477 		qset->pause_after_urb = NULL;
478 	}
479 }
480 
481 /**
482  * process_halted_qtd - process a qset with a halted qtd
483  *
484  * Remove all the qTDs for the failed URB and return the failed URB to
485  * the USB subsystem.  Then remove all other qTDs so the qset can be
486  * removed.
487  *
488  * FIXME: this is the point where rate adaptation can be done.  If a
489  * transfer failed because it exceeded the maximum number of retries
490  * then it could be reactivated with a slower rate without having to
491  * remove the qset.
492  */
process_halted_qtd(struct whc * whc,struct whc_qset * qset,struct whc_qtd * qtd)493 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
494 			       struct whc_qtd *qtd)
495 {
496 	struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
497 	struct urb *urb = std->urb;
498 	int urb_status;
499 
500 	urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
501 
502 	qset_remove_qtds(whc, qset, urb);
503 	qset_remove_urb(whc, qset, urb, urb_status);
504 
505 	list_for_each_entry(std, &qset->stds, list_node) {
506 		if (qset->ntds == 0)
507 			break;
508 		qset_remove_qtd(whc, qset);
509 		std->qtd = NULL;
510 	}
511 
512 	qset->remove = 1;
513 }
514 
qset_free(struct whc * whc,struct whc_qset * qset)515 void qset_free(struct whc *whc, struct whc_qset *qset)
516 {
517 	dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
518 }
519 
520 /**
521  * qset_delete - wait for a qset to be unused, then free it.
522  */
qset_delete(struct whc * whc,struct whc_qset * qset)523 void qset_delete(struct whc *whc, struct whc_qset *qset)
524 {
525 	wait_for_completion(&qset->remove_complete);
526 	qset_free(whc, qset);
527 }
528