• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * MUSB OTG driver host support
3  *
4  * Copyright 2005 Mentor Graphics Corporation
5  * Copyright (C) 2005-2006 by Texas Instruments
6  * Copyright (C) 2006-2007 Nokia Corporation
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
25  * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  *
33  */
34 
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
43 
44 #include "musb_core.h"
45 #include "musb_host.h"
46 
47 
48 /* MUSB HOST status 22-mar-2006
49  *
50  * - There's still lots of partial code duplication for fault paths, so
51  *   they aren't handled as consistently as they need to be.
52  *
53  * - PIO mostly behaved when last tested.
54  *     + including ep0, with all usbtest cases 9, 10
55  *     + usbtest 14 (ep0out) doesn't seem to run at all
56  *     + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57  *       configurations, but otherwise double buffering passes basic tests.
58  *     + for 2.6.N, for N > ~10, needs API changes for hcd framework.
59  *
60  * - DMA (CPPI) ... partially behaves, not currently recommended
61  *     + about 1/15 the speed of typical EHCI implementations (PCI)
62  *     + RX, all too often reqpkt seems to misbehave after tx
63  *     + TX, no known issues (other than evident silicon issue)
64  *
65  * - DMA (Mentor/OMAP) ...has at least toggle update problems
66  *
67  * - Still no traffic scheduling code to make NAKing for bulk or control
68  *   transfers unable to starve other requests; or to make efficient use
69  *   of hardware with periodic transfers.  (Note that network drivers
70  *   commonly post bulk reads that stay pending for a long time; these
71  *   would make very visible trouble.)
72  *
73  * - Not tested with HNP, but some SRP paths seem to behave.
74  *
75  * NOTE 24-August-2006:
76  *
77  * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
78  *   extra endpoint for periodic use enabling hub + keybd + mouse.  That
79  *   mostly works, except that with "usbnet" it's easy to trigger cases
80  *   with "ping" where RX loses.  (a) ping to davinci, even "ping -f",
81  *   fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
82  *   although ARP RX wins.  (That test was done with a full speed link.)
83  */
84 
85 
86 /*
87  * NOTE on endpoint usage:
88  *
89  * CONTROL transfers all go through ep0.  BULK ones go through dedicated IN
90  * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
91  *
92  * (Yes, bulk _could_ use more of the endpoints than that, and would even
93  * benefit from it ... one remote device may easily be NAKing while others
94  * need to perform transfers in that same direction.  The same thing could
95  * be done in software though, assuming dma cooperates.)
96  *
97  * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
98  * So far that scheduling is both dumb and optimistic:  the endpoint will be
99  * "claimed" until its software queue is no longer refilled.  No multiplexing
100  * of transfers between endpoints, or anything clever.
101  */
102 
103 
104 static void musb_ep_program(struct musb *musb, u8 epnum,
105 			struct urb *urb, unsigned int nOut,
106 			u8 *buf, u32 len);
107 
108 /*
109  * Clear TX fifo. Needed to avoid BABBLE errors.
110  */
musb_h_tx_flush_fifo(struct musb_hw_ep * ep)111 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
112 {
113 	void __iomem	*epio = ep->regs;
114 	u16		csr;
115 	u16		lastcsr = 0;
116 	int		retries = 1000;
117 
118 	csr = musb_readw(epio, MUSB_TXCSR);
119 	while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
120 		if (csr != lastcsr)
121 			DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
122 		lastcsr = csr;
123 		csr |= MUSB_TXCSR_FLUSHFIFO;
124 		musb_writew(epio, MUSB_TXCSR, csr);
125 		csr = musb_readw(epio, MUSB_TXCSR);
126 		if (WARN(retries-- < 1,
127 				"Could not flush host TX%d fifo: csr: %04x\n",
128 				ep->epnum, csr))
129 			return;
130 		mdelay(1);
131 	}
132 }
133 
134 /*
135  * Start transmit. Caller is responsible for locking shared resources.
136  * musb must be locked.
137  */
musb_h_tx_start(struct musb_hw_ep * ep)138 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
139 {
140 	u16	txcsr;
141 
142 	/* NOTE: no locks here; caller should lock and select EP */
143 	if (ep->epnum) {
144 		txcsr = musb_readw(ep->regs, MUSB_TXCSR);
145 		txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
146 		musb_writew(ep->regs, MUSB_TXCSR, txcsr);
147 	} else {
148 		txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
149 		musb_writew(ep->regs, MUSB_CSR0, txcsr);
150 	}
151 
152 }
153 
cppi_host_txdma_start(struct musb_hw_ep * ep)154 static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
155 {
156 	u16	txcsr;
157 
158 	/* NOTE: no locks here; caller should lock and select EP */
159 	txcsr = musb_readw(ep->regs, MUSB_TXCSR);
160 	txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
161 	musb_writew(ep->regs, MUSB_TXCSR, txcsr);
162 }
163 
164 /*
165  * Start the URB at the front of an endpoint's queue
166  * end must be claimed from the caller.
167  *
168  * Context: controller locked, irqs blocked
169  */
170 static void
musb_start_urb(struct musb * musb,int is_in,struct musb_qh * qh)171 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
172 {
173 	u16			frame;
174 	u32			len;
175 	void			*buf;
176 	void __iomem		*mbase =  musb->mregs;
177 	struct urb		*urb = next_urb(qh);
178 	struct musb_hw_ep	*hw_ep = qh->hw_ep;
179 	unsigned		pipe = urb->pipe;
180 	u8			address = usb_pipedevice(pipe);
181 	int			epnum = hw_ep->epnum;
182 
183 	/* initialize software qh state */
184 	qh->offset = 0;
185 	qh->segsize = 0;
186 
187 	/* gather right source of data */
188 	switch (qh->type) {
189 	case USB_ENDPOINT_XFER_CONTROL:
190 		/* control transfers always start with SETUP */
191 		is_in = 0;
192 		hw_ep->out_qh = qh;
193 		musb->ep0_stage = MUSB_EP0_START;
194 		buf = urb->setup_packet;
195 		len = 8;
196 		break;
197 	case USB_ENDPOINT_XFER_ISOC:
198 		qh->iso_idx = 0;
199 		qh->frame = 0;
200 		buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
201 		len = urb->iso_frame_desc[0].length;
202 		break;
203 	default:		/* bulk, interrupt */
204 		buf = urb->transfer_buffer;
205 		len = urb->transfer_buffer_length;
206 	}
207 
208 	DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
209 			qh, urb, address, qh->epnum,
210 			is_in ? "in" : "out",
211 			({char *s; switch (qh->type) {
212 			case USB_ENDPOINT_XFER_CONTROL:	s = ""; break;
213 			case USB_ENDPOINT_XFER_BULK:	s = "-bulk"; break;
214 			case USB_ENDPOINT_XFER_ISOC:	s = "-iso"; break;
215 			default:			s = "-intr"; break;
216 			}; s; }),
217 			epnum, buf, len);
218 
219 	/* Configure endpoint */
220 	if (is_in || hw_ep->is_shared_fifo)
221 		hw_ep->in_qh = qh;
222 	else
223 		hw_ep->out_qh = qh;
224 	musb_ep_program(musb, epnum, urb, !is_in, buf, len);
225 
226 	/* transmit may have more work: start it when it is time */
227 	if (is_in)
228 		return;
229 
230 	/* determine if the time is right for a periodic transfer */
231 	switch (qh->type) {
232 	case USB_ENDPOINT_XFER_ISOC:
233 	case USB_ENDPOINT_XFER_INT:
234 		DBG(3, "check whether there's still time for periodic Tx\n");
235 		qh->iso_idx = 0;
236 		frame = musb_readw(mbase, MUSB_FRAME);
237 		/* FIXME this doesn't implement that scheduling policy ...
238 		 * or handle framecounter wrapping
239 		 */
240 		if ((urb->transfer_flags & URB_ISO_ASAP)
241 				|| (frame >= urb->start_frame)) {
242 			/* REVISIT the SOF irq handler shouldn't duplicate
243 			 * this code; and we don't init urb->start_frame...
244 			 */
245 			qh->frame = 0;
246 			goto start;
247 		} else {
248 			qh->frame = urb->start_frame;
249 			/* enable SOF interrupt so we can count down */
250 			DBG(1, "SOF for %d\n", epnum);
251 #if 1 /* ifndef	CONFIG_ARCH_DAVINCI */
252 			musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
253 #endif
254 		}
255 		break;
256 	default:
257 start:
258 		DBG(4, "Start TX%d %s\n", epnum,
259 			hw_ep->tx_channel ? "dma" : "pio");
260 
261 		if (!hw_ep->tx_channel)
262 			musb_h_tx_start(hw_ep);
263 		else if (is_cppi_enabled() || tusb_dma_omap())
264 			cppi_host_txdma_start(hw_ep);
265 	}
266 }
267 
268 /* caller owns controller lock, irqs are blocked */
269 static void
__musb_giveback(struct musb * musb,struct urb * urb,int status)270 __musb_giveback(struct musb *musb, struct urb *urb, int status)
271 __releases(musb->lock)
272 __acquires(musb->lock)
273 {
274 	DBG(({ int level; switch (status) {
275 				case 0:
276 					level = 4;
277 					break;
278 				/* common/boring faults */
279 				case -EREMOTEIO:
280 				case -ESHUTDOWN:
281 				case -ECONNRESET:
282 				case -EPIPE:
283 					level = 3;
284 					break;
285 				default:
286 					level = 2;
287 					break;
288 				}; level; }),
289 			"complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
290 			urb, urb->complete, status,
291 			usb_pipedevice(urb->pipe),
292 			usb_pipeendpoint(urb->pipe),
293 			usb_pipein(urb->pipe) ? "in" : "out",
294 			urb->actual_length, urb->transfer_buffer_length
295 			);
296 
297 	usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
298 	spin_unlock(&musb->lock);
299 	usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
300 	spin_lock(&musb->lock);
301 }
302 
303 /* for bulk/interrupt endpoints only */
304 static inline void
musb_save_toggle(struct musb_hw_ep * ep,int is_in,struct urb * urb)305 musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
306 {
307 	struct usb_device	*udev = urb->dev;
308 	u16			csr;
309 	void __iomem		*epio = ep->regs;
310 	struct musb_qh		*qh;
311 
312 	/* FIXME:  the current Mentor DMA code seems to have
313 	 * problems getting toggle correct.
314 	 */
315 
316 	if (is_in || ep->is_shared_fifo)
317 		qh = ep->in_qh;
318 	else
319 		qh = ep->out_qh;
320 
321 	if (!is_in) {
322 		csr = musb_readw(epio, MUSB_TXCSR);
323 		usb_settoggle(udev, qh->epnum, 1,
324 			(csr & MUSB_TXCSR_H_DATATOGGLE)
325 				? 1 : 0);
326 	} else {
327 		csr = musb_readw(epio, MUSB_RXCSR);
328 		usb_settoggle(udev, qh->epnum, 0,
329 			(csr & MUSB_RXCSR_H_DATATOGGLE)
330 				? 1 : 0);
331 	}
332 }
333 
334 /* caller owns controller lock, irqs are blocked */
335 static struct musb_qh *
musb_giveback(struct musb_qh * qh,struct urb * urb,int status)336 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
337 {
338 	struct musb_hw_ep	*ep = qh->hw_ep;
339 	struct musb		*musb = ep->musb;
340 	int			is_in = usb_pipein(urb->pipe);
341 	int			ready = qh->is_ready;
342 
343 	/* save toggle eagerly, for paranoia */
344 	switch (qh->type) {
345 	case USB_ENDPOINT_XFER_BULK:
346 	case USB_ENDPOINT_XFER_INT:
347 		musb_save_toggle(ep, is_in, urb);
348 		break;
349 	case USB_ENDPOINT_XFER_ISOC:
350 		if (status == 0 && urb->error_count)
351 			status = -EXDEV;
352 		break;
353 	}
354 
355 	qh->is_ready = 0;
356 	__musb_giveback(musb, urb, status);
357 	qh->is_ready = ready;
358 
359 	/* reclaim resources (and bandwidth) ASAP; deschedule it, and
360 	 * invalidate qh as soon as list_empty(&hep->urb_list)
361 	 */
362 	if (list_empty(&qh->hep->urb_list)) {
363 		struct list_head	*head;
364 
365 		if (is_in)
366 			ep->rx_reinit = 1;
367 		else
368 			ep->tx_reinit = 1;
369 
370 		/* clobber old pointers to this qh */
371 		if (is_in || ep->is_shared_fifo)
372 			ep->in_qh = NULL;
373 		else
374 			ep->out_qh = NULL;
375 		qh->hep->hcpriv = NULL;
376 
377 		switch (qh->type) {
378 
379 		case USB_ENDPOINT_XFER_CONTROL:
380 		case USB_ENDPOINT_XFER_BULK:
381 			/* fifo policy for these lists, except that NAKing
382 			 * should rotate a qh to the end (for fairness).
383 			 */
384 			if (qh->mux == 1) {
385 				head = qh->ring.prev;
386 				list_del(&qh->ring);
387 				kfree(qh);
388 				qh = first_qh(head);
389 				break;
390 			}
391 
392 		case USB_ENDPOINT_XFER_ISOC:
393 		case USB_ENDPOINT_XFER_INT:
394 			/* this is where periodic bandwidth should be
395 			 * de-allocated if it's tracked and allocated;
396 			 * and where we'd update the schedule tree...
397 			 */
398 			musb->periodic[ep->epnum] = NULL;
399 			kfree(qh);
400 			qh = NULL;
401 			break;
402 		}
403 	}
404 	return qh;
405 }
406 
407 /*
408  * Advance this hardware endpoint's queue, completing the specified urb and
409  * advancing to either the next urb queued to that qh, or else invalidating
410  * that qh and advancing to the next qh scheduled after the current one.
411  *
412  * Context: caller owns controller lock, irqs are blocked
413  */
414 static void
musb_advance_schedule(struct musb * musb,struct urb * urb,struct musb_hw_ep * hw_ep,int is_in)415 musb_advance_schedule(struct musb *musb, struct urb *urb,
416 		struct musb_hw_ep *hw_ep, int is_in)
417 {
418 	struct musb_qh	*qh;
419 
420 	if (is_in || hw_ep->is_shared_fifo)
421 		qh = hw_ep->in_qh;
422 	else
423 		qh = hw_ep->out_qh;
424 
425 	if (urb->status == -EINPROGRESS)
426 		qh = musb_giveback(qh, urb, 0);
427 	else
428 		qh = musb_giveback(qh, urb, urb->status);
429 
430 	if (qh != NULL && qh->is_ready) {
431 		DBG(4, "... next ep%d %cX urb %p\n",
432 				hw_ep->epnum, is_in ? 'R' : 'T',
433 				next_urb(qh));
434 		musb_start_urb(musb, is_in, qh);
435 	}
436 }
437 
musb_h_flush_rxfifo(struct musb_hw_ep * hw_ep,u16 csr)438 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
439 {
440 	/* we don't want fifo to fill itself again;
441 	 * ignore dma (various models),
442 	 * leave toggle alone (may not have been saved yet)
443 	 */
444 	csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
445 	csr &= ~(MUSB_RXCSR_H_REQPKT
446 		| MUSB_RXCSR_H_AUTOREQ
447 		| MUSB_RXCSR_AUTOCLEAR);
448 
449 	/* write 2x to allow double buffering */
450 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
451 	musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
452 
453 	/* flush writebuffer */
454 	return musb_readw(hw_ep->regs, MUSB_RXCSR);
455 }
456 
457 /*
458  * PIO RX for a packet (or part of it).
459  */
460 static bool
musb_host_packet_rx(struct musb * musb,struct urb * urb,u8 epnum,u8 iso_err)461 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
462 {
463 	u16			rx_count;
464 	u8			*buf;
465 	u16			csr;
466 	bool			done = false;
467 	u32			length;
468 	int			do_flush = 0;
469 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
470 	void __iomem		*epio = hw_ep->regs;
471 	struct musb_qh		*qh = hw_ep->in_qh;
472 	int			pipe = urb->pipe;
473 	void			*buffer = urb->transfer_buffer;
474 
475 	/* musb_ep_select(mbase, epnum); */
476 	rx_count = musb_readw(epio, MUSB_RXCOUNT);
477 	DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
478 			urb->transfer_buffer, qh->offset,
479 			urb->transfer_buffer_length);
480 
481 	/* unload FIFO */
482 	if (usb_pipeisoc(pipe)) {
483 		int					status = 0;
484 		struct usb_iso_packet_descriptor	*d;
485 
486 		if (iso_err) {
487 			status = -EILSEQ;
488 			urb->error_count++;
489 		}
490 
491 		d = urb->iso_frame_desc + qh->iso_idx;
492 		buf = buffer + d->offset;
493 		length = d->length;
494 		if (rx_count > length) {
495 			if (status == 0) {
496 				status = -EOVERFLOW;
497 				urb->error_count++;
498 			}
499 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
500 			do_flush = 1;
501 		} else
502 			length = rx_count;
503 		urb->actual_length += length;
504 		d->actual_length = length;
505 
506 		d->status = status;
507 
508 		/* see if we are done */
509 		done = (++qh->iso_idx >= urb->number_of_packets);
510 	} else {
511 		/* non-isoch */
512 		buf = buffer + qh->offset;
513 		length = urb->transfer_buffer_length - qh->offset;
514 		if (rx_count > length) {
515 			if (urb->status == -EINPROGRESS)
516 				urb->status = -EOVERFLOW;
517 			DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
518 			do_flush = 1;
519 		} else
520 			length = rx_count;
521 		urb->actual_length += length;
522 		qh->offset += length;
523 
524 		/* see if we are done */
525 		done = (urb->actual_length == urb->transfer_buffer_length)
526 			|| (rx_count < qh->maxpacket)
527 			|| (urb->status != -EINPROGRESS);
528 		if (done
529 				&& (urb->status == -EINPROGRESS)
530 				&& (urb->transfer_flags & URB_SHORT_NOT_OK)
531 				&& (urb->actual_length
532 					< urb->transfer_buffer_length))
533 			urb->status = -EREMOTEIO;
534 	}
535 
536 	musb_read_fifo(hw_ep, length, buf);
537 
538 	csr = musb_readw(epio, MUSB_RXCSR);
539 	csr |= MUSB_RXCSR_H_WZC_BITS;
540 	if (unlikely(do_flush))
541 		musb_h_flush_rxfifo(hw_ep, csr);
542 	else {
543 		/* REVISIT this assumes AUTOCLEAR is never set */
544 		csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
545 		if (!done)
546 			csr |= MUSB_RXCSR_H_REQPKT;
547 		musb_writew(epio, MUSB_RXCSR, csr);
548 	}
549 
550 	return done;
551 }
552 
553 /* we don't always need to reinit a given side of an endpoint...
554  * when we do, use tx/rx reinit routine and then construct a new CSR
555  * to address data toggle, NYET, and DMA or PIO.
556  *
557  * it's possible that driver bugs (especially for DMA) or aborting a
558  * transfer might have left the endpoint busier than it should be.
559  * the busy/not-empty tests are basically paranoia.
560  */
561 static void
musb_rx_reinit(struct musb * musb,struct musb_qh * qh,struct musb_hw_ep * ep)562 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
563 {
564 	u16	csr;
565 
566 	/* NOTE:  we know the "rx" fifo reinit never triggers for ep0.
567 	 * That always uses tx_reinit since ep0 repurposes TX register
568 	 * offsets; the initial SETUP packet is also a kind of OUT.
569 	 */
570 
571 	/* if programmed for Tx, put it in RX mode */
572 	if (ep->is_shared_fifo) {
573 		csr = musb_readw(ep->regs, MUSB_TXCSR);
574 		if (csr & MUSB_TXCSR_MODE) {
575 			musb_h_tx_flush_fifo(ep);
576 			musb_writew(ep->regs, MUSB_TXCSR,
577 					MUSB_TXCSR_FRCDATATOG);
578 		}
579 		/* clear mode (and everything else) to enable Rx */
580 		musb_writew(ep->regs, MUSB_TXCSR, 0);
581 
582 	/* scrub all previous state, clearing toggle */
583 	} else {
584 		csr = musb_readw(ep->regs, MUSB_RXCSR);
585 		if (csr & MUSB_RXCSR_RXPKTRDY)
586 			WARNING("rx%d, packet/%d ready?\n", ep->epnum,
587 				musb_readw(ep->regs, MUSB_RXCOUNT));
588 
589 		musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
590 	}
591 
592 	/* target addr and (for multipoint) hub addr/port */
593 	if (musb->is_multipoint) {
594 		musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
595 		musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
596 		musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
597 
598 	} else
599 		musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
600 
601 	/* protocol/endpoint, interval/NAKlimit, i/o size */
602 	musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
603 	musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
604 	/* NOTE: bulk combining rewrites high bits of maxpacket */
605 	musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
606 
607 	ep->rx_reinit = 0;
608 }
609 
610 
611 /*
612  * Program an HDRC endpoint as per the given URB
613  * Context: irqs blocked, controller lock held
614  */
musb_ep_program(struct musb * musb,u8 epnum,struct urb * urb,unsigned int is_out,u8 * buf,u32 len)615 static void musb_ep_program(struct musb *musb, u8 epnum,
616 			struct urb *urb, unsigned int is_out,
617 			u8 *buf, u32 len)
618 {
619 	struct dma_controller	*dma_controller;
620 	struct dma_channel	*dma_channel;
621 	u8			dma_ok;
622 	void __iomem		*mbase = musb->mregs;
623 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
624 	void __iomem		*epio = hw_ep->regs;
625 	struct musb_qh		*qh;
626 	u16			packet_sz;
627 
628 	if (!is_out || hw_ep->is_shared_fifo)
629 		qh = hw_ep->in_qh;
630 	else
631 		qh = hw_ep->out_qh;
632 
633 	packet_sz = qh->maxpacket;
634 
635 	DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
636 				"h_addr%02x h_port%02x bytes %d\n",
637 			is_out ? "-->" : "<--",
638 			epnum, urb, urb->dev->speed,
639 			qh->addr_reg, qh->epnum, is_out ? "out" : "in",
640 			qh->h_addr_reg, qh->h_port_reg,
641 			len);
642 
643 	musb_ep_select(mbase, epnum);
644 
645 	/* candidate for DMA? */
646 	dma_controller = musb->dma_controller;
647 	if (is_dma_capable() && epnum && dma_controller) {
648 		dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
649 		if (!dma_channel) {
650 			dma_channel = dma_controller->channel_alloc(
651 					dma_controller, hw_ep, is_out);
652 			if (is_out)
653 				hw_ep->tx_channel = dma_channel;
654 			else
655 				hw_ep->rx_channel = dma_channel;
656 		}
657 	} else
658 		dma_channel = NULL;
659 
660 	/* make sure we clear DMAEnab, autoSet bits from previous run */
661 
662 	/* OUT/transmit/EP0 or IN/receive? */
663 	if (is_out) {
664 		u16	csr;
665 		u16	int_txe;
666 		u16	load_count;
667 
668 		csr = musb_readw(epio, MUSB_TXCSR);
669 
670 		/* disable interrupt in case we flush */
671 		int_txe = musb_readw(mbase, MUSB_INTRTXE);
672 		musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
673 
674 		/* general endpoint setup */
675 		if (epnum) {
676 			/* ASSERT:  TXCSR_DMAENAB was already cleared */
677 
678 			/* flush all old state, set default */
679 			musb_h_tx_flush_fifo(hw_ep);
680 			csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
681 					| MUSB_TXCSR_DMAMODE
682 					| MUSB_TXCSR_FRCDATATOG
683 					| MUSB_TXCSR_H_RXSTALL
684 					| MUSB_TXCSR_H_ERROR
685 					| MUSB_TXCSR_TXPKTRDY
686 					);
687 			csr |= MUSB_TXCSR_MODE;
688 
689 			if (usb_gettoggle(urb->dev,
690 					qh->epnum, 1))
691 				csr |= MUSB_TXCSR_H_WR_DATATOGGLE
692 					| MUSB_TXCSR_H_DATATOGGLE;
693 			else
694 				csr |= MUSB_TXCSR_CLRDATATOG;
695 
696 			/* twice in case of double packet buffering */
697 			musb_writew(epio, MUSB_TXCSR, csr);
698 			/* REVISIT may need to clear FLUSHFIFO ... */
699 			musb_writew(epio, MUSB_TXCSR, csr);
700 			csr = musb_readw(epio, MUSB_TXCSR);
701 		} else {
702 			/* endpoint 0: just flush */
703 			musb_writew(epio, MUSB_CSR0,
704 				csr | MUSB_CSR0_FLUSHFIFO);
705 			musb_writew(epio, MUSB_CSR0,
706 				csr | MUSB_CSR0_FLUSHFIFO);
707 		}
708 
709 		/* target addr and (for multipoint) hub addr/port */
710 		if (musb->is_multipoint) {
711 			musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
712 			musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
713 			musb_write_txhubport(mbase, epnum, qh->h_port_reg);
714 /* FIXME if !epnum, do the same for RX ... */
715 		} else
716 			musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
717 
718 		/* protocol/endpoint/interval/NAKlimit */
719 		if (epnum) {
720 			musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
721 			if (can_bulk_split(musb, qh->type))
722 				musb_writew(epio, MUSB_TXMAXP,
723 					packet_sz
724 					| ((hw_ep->max_packet_sz_tx /
725 						packet_sz) - 1) << 11);
726 			else
727 				musb_writew(epio, MUSB_TXMAXP,
728 					packet_sz);
729 			musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
730 		} else {
731 			musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
732 			if (musb->is_multipoint)
733 				musb_writeb(epio, MUSB_TYPE0,
734 						qh->type_reg);
735 		}
736 
737 		if (can_bulk_split(musb, qh->type))
738 			load_count = min((u32) hw_ep->max_packet_sz_tx,
739 						len);
740 		else
741 			load_count = min((u32) packet_sz, len);
742 
743 #ifdef CONFIG_USB_INVENTRA_DMA
744 		if (dma_channel) {
745 
746 			/* clear previous state */
747 			csr = musb_readw(epio, MUSB_TXCSR);
748 			csr &= ~(MUSB_TXCSR_AUTOSET
749 				| MUSB_TXCSR_DMAMODE
750 				| MUSB_TXCSR_DMAENAB);
751 			csr |= MUSB_TXCSR_MODE;
752 			musb_writew(epio, MUSB_TXCSR,
753 				csr | MUSB_TXCSR_MODE);
754 
755 			qh->segsize = min(len, dma_channel->max_len);
756 
757 			if (qh->segsize <= packet_sz)
758 				dma_channel->desired_mode = 0;
759 			else
760 				dma_channel->desired_mode = 1;
761 
762 
763 			if (dma_channel->desired_mode == 0) {
764 				csr &= ~(MUSB_TXCSR_AUTOSET
765 					| MUSB_TXCSR_DMAMODE);
766 				csr |= (MUSB_TXCSR_DMAENAB);
767 					/* against programming guide */
768 			} else
769 				csr |= (MUSB_TXCSR_AUTOSET
770 					| MUSB_TXCSR_DMAENAB
771 					| MUSB_TXCSR_DMAMODE);
772 
773 			musb_writew(epio, MUSB_TXCSR, csr);
774 
775 			dma_ok = dma_controller->channel_program(
776 					dma_channel, packet_sz,
777 					dma_channel->desired_mode,
778 					urb->transfer_dma,
779 					qh->segsize);
780 			if (dma_ok) {
781 				load_count = 0;
782 			} else {
783 				dma_controller->channel_release(dma_channel);
784 				if (is_out)
785 					hw_ep->tx_channel = NULL;
786 				else
787 					hw_ep->rx_channel = NULL;
788 				dma_channel = NULL;
789 			}
790 		}
791 #endif
792 
793 		/* candidate for DMA */
794 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
795 
796 			/* program endpoint CSRs first, then setup DMA.
797 			 * assume CPPI setup succeeds.
798 			 * defer enabling dma.
799 			 */
800 			csr = musb_readw(epio, MUSB_TXCSR);
801 			csr &= ~(MUSB_TXCSR_AUTOSET
802 					| MUSB_TXCSR_DMAMODE
803 					| MUSB_TXCSR_DMAENAB);
804 			csr |= MUSB_TXCSR_MODE;
805 			musb_writew(epio, MUSB_TXCSR,
806 				csr | MUSB_TXCSR_MODE);
807 
808 			dma_channel->actual_len = 0L;
809 			qh->segsize = len;
810 
811 			/* TX uses "rndis" mode automatically, but needs help
812 			 * to identify the zero-length-final-packet case.
813 			 */
814 			dma_ok = dma_controller->channel_program(
815 					dma_channel, packet_sz,
816 					(urb->transfer_flags
817 							& URB_ZERO_PACKET)
818 						== URB_ZERO_PACKET,
819 					urb->transfer_dma,
820 					qh->segsize);
821 			if (dma_ok) {
822 				load_count = 0;
823 			} else {
824 				dma_controller->channel_release(dma_channel);
825 				hw_ep->tx_channel = NULL;
826 				dma_channel = NULL;
827 
828 				/* REVISIT there's an error path here that
829 				 * needs handling:  can't do dma, but
830 				 * there's no pio buffer address...
831 				 */
832 			}
833 		}
834 
835 		if (load_count) {
836 			/* ASSERT:  TXCSR_DMAENAB was already cleared */
837 
838 			/* PIO to load FIFO */
839 			qh->segsize = load_count;
840 			musb_write_fifo(hw_ep, load_count, buf);
841 			csr = musb_readw(epio, MUSB_TXCSR);
842 			csr &= ~(MUSB_TXCSR_DMAENAB
843 				| MUSB_TXCSR_DMAMODE
844 				| MUSB_TXCSR_AUTOSET);
845 			/* write CSR */
846 			csr |= MUSB_TXCSR_MODE;
847 
848 			if (epnum)
849 				musb_writew(epio, MUSB_TXCSR, csr);
850 		}
851 
852 		/* re-enable interrupt */
853 		musb_writew(mbase, MUSB_INTRTXE, int_txe);
854 
855 	/* IN/receive */
856 	} else {
857 		u16	csr;
858 
859 		if (hw_ep->rx_reinit) {
860 			musb_rx_reinit(musb, qh, hw_ep);
861 
862 			/* init new state: toggle and NYET, maybe DMA later */
863 			if (usb_gettoggle(urb->dev, qh->epnum, 0))
864 				csr = MUSB_RXCSR_H_WR_DATATOGGLE
865 					| MUSB_RXCSR_H_DATATOGGLE;
866 			else
867 				csr = 0;
868 			if (qh->type == USB_ENDPOINT_XFER_INT)
869 				csr |= MUSB_RXCSR_DISNYET;
870 
871 		} else {
872 			csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
873 
874 			if (csr & (MUSB_RXCSR_RXPKTRDY
875 					| MUSB_RXCSR_DMAENAB
876 					| MUSB_RXCSR_H_REQPKT))
877 				ERR("broken !rx_reinit, ep%d csr %04x\n",
878 						hw_ep->epnum, csr);
879 
880 			/* scrub any stale state, leaving toggle alone */
881 			csr &= MUSB_RXCSR_DISNYET;
882 		}
883 
884 		/* kick things off */
885 
886 		if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
887 			/* candidate for DMA */
888 			if (dma_channel) {
889 				dma_channel->actual_len = 0L;
890 				qh->segsize = len;
891 
892 				/* AUTOREQ is in a DMA register */
893 				musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
894 				csr = musb_readw(hw_ep->regs,
895 						MUSB_RXCSR);
896 
897 				/* unless caller treats short rx transfers as
898 				 * errors, we dare not queue multiple transfers.
899 				 */
900 				dma_ok = dma_controller->channel_program(
901 						dma_channel, packet_sz,
902 						!(urb->transfer_flags
903 							& URB_SHORT_NOT_OK),
904 						urb->transfer_dma,
905 						qh->segsize);
906 				if (!dma_ok) {
907 					dma_controller->channel_release(
908 							dma_channel);
909 					hw_ep->rx_channel = NULL;
910 					dma_channel = NULL;
911 				} else
912 					csr |= MUSB_RXCSR_DMAENAB;
913 			}
914 		}
915 
916 		csr |= MUSB_RXCSR_H_REQPKT;
917 		DBG(7, "RXCSR%d := %04x\n", epnum, csr);
918 		musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
919 		csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
920 	}
921 }
922 
923 
924 /*
925  * Service the default endpoint (ep0) as host.
926  * Return true until it's time to start the status stage.
927  */
musb_h_ep0_continue(struct musb * musb,u16 len,struct urb * urb)928 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
929 {
930 	bool			 more = false;
931 	u8			*fifo_dest = NULL;
932 	u16			fifo_count = 0;
933 	struct musb_hw_ep	*hw_ep = musb->control_ep;
934 	struct musb_qh		*qh = hw_ep->in_qh;
935 	struct usb_ctrlrequest	*request;
936 
937 	switch (musb->ep0_stage) {
938 	case MUSB_EP0_IN:
939 		fifo_dest = urb->transfer_buffer + urb->actual_length;
940 		fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
941 				   urb->actual_length);
942 		if (fifo_count < len)
943 			urb->status = -EOVERFLOW;
944 
945 		musb_read_fifo(hw_ep, fifo_count, fifo_dest);
946 
947 		urb->actual_length += fifo_count;
948 		if (len < qh->maxpacket) {
949 			/* always terminate on short read; it's
950 			 * rarely reported as an error.
951 			 */
952 		} else if (urb->actual_length <
953 				urb->transfer_buffer_length)
954 			more = true;
955 		break;
956 	case MUSB_EP0_START:
957 		request = (struct usb_ctrlrequest *) urb->setup_packet;
958 
959 		if (!request->wLength) {
960 			DBG(4, "start no-DATA\n");
961 			break;
962 		} else if (request->bRequestType & USB_DIR_IN) {
963 			DBG(4, "start IN-DATA\n");
964 			musb->ep0_stage = MUSB_EP0_IN;
965 			more = true;
966 			break;
967 		} else {
968 			DBG(4, "start OUT-DATA\n");
969 			musb->ep0_stage = MUSB_EP0_OUT;
970 			more = true;
971 		}
972 		/* FALLTHROUGH */
973 	case MUSB_EP0_OUT:
974 		fifo_count = min_t(size_t, qh->maxpacket,
975 				   urb->transfer_buffer_length -
976 				   urb->actual_length);
977 		if (fifo_count) {
978 			fifo_dest = (u8 *) (urb->transfer_buffer
979 					+ urb->actual_length);
980 			DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
981 					fifo_count,
982 					(fifo_count == 1) ? "" : "s",
983 					fifo_dest);
984 			musb_write_fifo(hw_ep, fifo_count, fifo_dest);
985 
986 			urb->actual_length += fifo_count;
987 			more = true;
988 		}
989 		break;
990 	default:
991 		ERR("bogus ep0 stage %d\n", musb->ep0_stage);
992 		break;
993 	}
994 
995 	return more;
996 }
997 
998 /*
999  * Handle default endpoint interrupt as host. Only called in IRQ time
1000  * from musb_interrupt().
1001  *
1002  * called with controller irqlocked
1003  */
musb_h_ep0_irq(struct musb * musb)1004 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1005 {
1006 	struct urb		*urb;
1007 	u16			csr, len;
1008 	int			status = 0;
1009 	void __iomem		*mbase = musb->mregs;
1010 	struct musb_hw_ep	*hw_ep = musb->control_ep;
1011 	void __iomem		*epio = hw_ep->regs;
1012 	struct musb_qh		*qh = hw_ep->in_qh;
1013 	bool			complete = false;
1014 	irqreturn_t		retval = IRQ_NONE;
1015 
1016 	/* ep0 only has one queue, "in" */
1017 	urb = next_urb(qh);
1018 
1019 	musb_ep_select(mbase, 0);
1020 	csr = musb_readw(epio, MUSB_CSR0);
1021 	len = (csr & MUSB_CSR0_RXPKTRDY)
1022 			? musb_readb(epio, MUSB_COUNT0)
1023 			: 0;
1024 
1025 	DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1026 		csr, qh, len, urb, musb->ep0_stage);
1027 
1028 	/* if we just did status stage, we are done */
1029 	if (MUSB_EP0_STATUS == musb->ep0_stage) {
1030 		retval = IRQ_HANDLED;
1031 		complete = true;
1032 	}
1033 
1034 	/* prepare status */
1035 	if (csr & MUSB_CSR0_H_RXSTALL) {
1036 		DBG(6, "STALLING ENDPOINT\n");
1037 		status = -EPIPE;
1038 
1039 	} else if (csr & MUSB_CSR0_H_ERROR) {
1040 		DBG(2, "no response, csr0 %04x\n", csr);
1041 		status = -EPROTO;
1042 
1043 	} else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1044 		DBG(2, "control NAK timeout\n");
1045 
1046 		/* NOTE:  this code path would be a good place to PAUSE a
1047 		 * control transfer, if another one is queued, so that
1048 		 * ep0 is more likely to stay busy.
1049 		 *
1050 		 * if (qh->ring.next != &musb->control), then
1051 		 * we have a candidate... NAKing is *NOT* an error
1052 		 */
1053 		musb_writew(epio, MUSB_CSR0, 0);
1054 		retval = IRQ_HANDLED;
1055 	}
1056 
1057 	if (status) {
1058 		DBG(6, "aborting\n");
1059 		retval = IRQ_HANDLED;
1060 		if (urb)
1061 			urb->status = status;
1062 		complete = true;
1063 
1064 		/* use the proper sequence to abort the transfer */
1065 		if (csr & MUSB_CSR0_H_REQPKT) {
1066 			csr &= ~MUSB_CSR0_H_REQPKT;
1067 			musb_writew(epio, MUSB_CSR0, csr);
1068 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1069 			musb_writew(epio, MUSB_CSR0, csr);
1070 		} else {
1071 			csr |= MUSB_CSR0_FLUSHFIFO;
1072 			musb_writew(epio, MUSB_CSR0, csr);
1073 			musb_writew(epio, MUSB_CSR0, csr);
1074 			csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1075 			musb_writew(epio, MUSB_CSR0, csr);
1076 		}
1077 
1078 		musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1079 
1080 		/* clear it */
1081 		musb_writew(epio, MUSB_CSR0, 0);
1082 	}
1083 
1084 	if (unlikely(!urb)) {
1085 		/* stop endpoint since we have no place for its data, this
1086 		 * SHOULD NEVER HAPPEN! */
1087 		ERR("no URB for end 0\n");
1088 
1089 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1090 		musb_writew(epio, MUSB_CSR0, MUSB_CSR0_FLUSHFIFO);
1091 		musb_writew(epio, MUSB_CSR0, 0);
1092 
1093 		goto done;
1094 	}
1095 
1096 	if (!complete) {
1097 		/* call common logic and prepare response */
1098 		if (musb_h_ep0_continue(musb, len, urb)) {
1099 			/* more packets required */
1100 			csr = (MUSB_EP0_IN == musb->ep0_stage)
1101 				?  MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1102 		} else {
1103 			/* data transfer complete; perform status phase */
1104 			if (usb_pipeout(urb->pipe)
1105 					|| !urb->transfer_buffer_length)
1106 				csr = MUSB_CSR0_H_STATUSPKT
1107 					| MUSB_CSR0_H_REQPKT;
1108 			else
1109 				csr = MUSB_CSR0_H_STATUSPKT
1110 					| MUSB_CSR0_TXPKTRDY;
1111 
1112 			/* flag status stage */
1113 			musb->ep0_stage = MUSB_EP0_STATUS;
1114 
1115 			DBG(5, "ep0 STATUS, csr %04x\n", csr);
1116 
1117 		}
1118 		musb_writew(epio, MUSB_CSR0, csr);
1119 		retval = IRQ_HANDLED;
1120 	} else
1121 		musb->ep0_stage = MUSB_EP0_IDLE;
1122 
1123 	/* call completion handler if done */
1124 	if (complete)
1125 		musb_advance_schedule(musb, urb, hw_ep, 1);
1126 done:
1127 	return retval;
1128 }
1129 
1130 
1131 #ifdef CONFIG_USB_INVENTRA_DMA
1132 
1133 /* Host side TX (OUT) using Mentor DMA works as follows:
1134 	submit_urb ->
1135 		- if queue was empty, Program Endpoint
1136 		- ... which starts DMA to fifo in mode 1 or 0
1137 
1138 	DMA Isr (transfer complete) -> TxAvail()
1139 		- Stop DMA (~DmaEnab)	(<--- Alert ... currently happens
1140 					only in musb_cleanup_urb)
1141 		- TxPktRdy has to be set in mode 0 or for
1142 			short packets in mode 1.
1143 */
1144 
1145 #endif
1146 
1147 /* Service a Tx-Available or dma completion irq for the endpoint */
musb_host_tx(struct musb * musb,u8 epnum)1148 void musb_host_tx(struct musb *musb, u8 epnum)
1149 {
1150 	int			pipe;
1151 	bool			done = false;
1152 	u16			tx_csr;
1153 	size_t			wLength = 0;
1154 	u8			*buf = NULL;
1155 	struct urb		*urb;
1156 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1157 	void __iomem		*epio = hw_ep->regs;
1158 	struct musb_qh		*qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1159 							    : hw_ep->out_qh;
1160 	u32			status = 0;
1161 	void __iomem		*mbase = musb->mregs;
1162 	struct dma_channel	*dma;
1163 
1164 	urb = next_urb(qh);
1165 
1166 	musb_ep_select(mbase, epnum);
1167 	tx_csr = musb_readw(epio, MUSB_TXCSR);
1168 
1169 	/* with CPPI, DMA sometimes triggers "extra" irqs */
1170 	if (!urb) {
1171 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1172 		goto finish;
1173 	}
1174 
1175 	pipe = urb->pipe;
1176 	dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1177 	DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1178 			dma ? ", dma" : "");
1179 
1180 	/* check for errors */
1181 	if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1182 		/* dma was disabled, fifo flushed */
1183 		DBG(3, "TX end %d stall\n", epnum);
1184 
1185 		/* stall; record URB status */
1186 		status = -EPIPE;
1187 
1188 	} else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1189 		/* (NON-ISO) dma was disabled, fifo flushed */
1190 		DBG(3, "TX 3strikes on ep=%d\n", epnum);
1191 
1192 		status = -ETIMEDOUT;
1193 
1194 	} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1195 		DBG(6, "TX end=%d device not responding\n", epnum);
1196 
1197 		/* NOTE:  this code path would be a good place to PAUSE a
1198 		 * transfer, if there's some other (nonperiodic) tx urb
1199 		 * that could use this fifo.  (dma complicates it...)
1200 		 *
1201 		 * if (bulk && qh->ring.next != &musb->out_bulk), then
1202 		 * we have a candidate... NAKing is *NOT* an error
1203 		 */
1204 		musb_ep_select(mbase, epnum);
1205 		musb_writew(epio, MUSB_TXCSR,
1206 				MUSB_TXCSR_H_WZC_BITS
1207 				| MUSB_TXCSR_TXPKTRDY);
1208 		goto finish;
1209 	}
1210 
1211 	if (status) {
1212 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1213 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1214 			(void) musb->dma_controller->channel_abort(dma);
1215 		}
1216 
1217 		/* do the proper sequence to abort the transfer in the
1218 		 * usb core; the dma engine should already be stopped.
1219 		 */
1220 		musb_h_tx_flush_fifo(hw_ep);
1221 		tx_csr &= ~(MUSB_TXCSR_AUTOSET
1222 				| MUSB_TXCSR_DMAENAB
1223 				| MUSB_TXCSR_H_ERROR
1224 				| MUSB_TXCSR_H_RXSTALL
1225 				| MUSB_TXCSR_H_NAKTIMEOUT
1226 				);
1227 
1228 		musb_ep_select(mbase, epnum);
1229 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1230 		/* REVISIT may need to clear FLUSHFIFO ... */
1231 		musb_writew(epio, MUSB_TXCSR, tx_csr);
1232 		musb_writeb(epio, MUSB_TXINTERVAL, 0);
1233 
1234 		done = true;
1235 	}
1236 
1237 	/* second cppi case */
1238 	if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1239 		DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1240 		goto finish;
1241 
1242 	}
1243 
1244 	/* REVISIT this looks wrong... */
1245 	if (!status || dma || usb_pipeisoc(pipe)) {
1246 		if (dma)
1247 			wLength = dma->actual_len;
1248 		else
1249 			wLength = qh->segsize;
1250 		qh->offset += wLength;
1251 
1252 		if (usb_pipeisoc(pipe)) {
1253 			struct usb_iso_packet_descriptor	*d;
1254 
1255 			d = urb->iso_frame_desc + qh->iso_idx;
1256 			d->actual_length = qh->segsize;
1257 			if (++qh->iso_idx >= urb->number_of_packets) {
1258 				done = true;
1259 			} else {
1260 				d++;
1261 				buf = urb->transfer_buffer + d->offset;
1262 				wLength = d->length;
1263 			}
1264 		} else if (dma) {
1265 			done = true;
1266 		} else {
1267 			/* see if we need to send more data, or ZLP */
1268 			if (qh->segsize < qh->maxpacket)
1269 				done = true;
1270 			else if (qh->offset == urb->transfer_buffer_length
1271 					&& !(urb->transfer_flags
1272 						& URB_ZERO_PACKET))
1273 				done = true;
1274 			if (!done) {
1275 				buf = urb->transfer_buffer
1276 						+ qh->offset;
1277 				wLength = urb->transfer_buffer_length
1278 						- qh->offset;
1279 			}
1280 		}
1281 	}
1282 
1283 	/* urb->status != -EINPROGRESS means request has been faulted,
1284 	 * so we must abort this transfer after cleanup
1285 	 */
1286 	if (urb->status != -EINPROGRESS) {
1287 		done = true;
1288 		if (status == 0)
1289 			status = urb->status;
1290 	}
1291 
1292 	if (done) {
1293 		/* set status */
1294 		urb->status = status;
1295 		urb->actual_length = qh->offset;
1296 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1297 
1298 	} else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1299 		/* WARN_ON(!buf); */
1300 
1301 		/* REVISIT:  some docs say that when hw_ep->tx_double_buffered,
1302 		 * (and presumably, fifo is not half-full) we should write TWO
1303 		 * packets before updating TXCSR ... other docs disagree ...
1304 		 */
1305 		/* PIO:  start next packet in this URB */
1306 		if (wLength > qh->maxpacket)
1307 			wLength = qh->maxpacket;
1308 		musb_write_fifo(hw_ep, wLength, buf);
1309 		qh->segsize = wLength;
1310 
1311 		musb_ep_select(mbase, epnum);
1312 		musb_writew(epio, MUSB_TXCSR,
1313 				MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1314 	} else
1315 		DBG(1, "not complete, but dma enabled?\n");
1316 
1317 finish:
1318 	return;
1319 }
1320 
1321 
1322 #ifdef CONFIG_USB_INVENTRA_DMA
1323 
1324 /* Host side RX (IN) using Mentor DMA works as follows:
1325 	submit_urb ->
1326 		- if queue was empty, ProgramEndpoint
1327 		- first IN token is sent out (by setting ReqPkt)
1328 	LinuxIsr -> RxReady()
1329 	/\	=> first packet is received
1330 	|	- Set in mode 0 (DmaEnab, ~ReqPkt)
1331 	|		-> DMA Isr (transfer complete) -> RxReady()
1332 	|		    - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1333 	|		    - if urb not complete, send next IN token (ReqPkt)
1334 	|			   |		else complete urb.
1335 	|			   |
1336 	---------------------------
1337  *
1338  * Nuances of mode 1:
1339  *	For short packets, no ack (+RxPktRdy) is sent automatically
1340  *	(even if AutoClear is ON)
1341  *	For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1342  *	automatically => major problem, as collecting the next packet becomes
1343  *	difficult. Hence mode 1 is not used.
1344  *
1345  * REVISIT
1346  *	All we care about at this driver level is that
1347  *       (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1348  *       (b) termination conditions are: short RX, or buffer full;
1349  *       (c) fault modes include
1350  *           - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1351  *             (and that endpoint's dma queue stops immediately)
1352  *           - overflow (full, PLUS more bytes in the terminal packet)
1353  *
1354  *	So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1355  *	thus be a great candidate for using mode 1 ... for all but the
1356  *	last packet of one URB's transfer.
1357  */
1358 
1359 #endif
1360 
1361 /*
1362  * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1363  * and high-bandwidth IN transfer cases.
1364  */
musb_host_rx(struct musb * musb,u8 epnum)1365 void musb_host_rx(struct musb *musb, u8 epnum)
1366 {
1367 	struct urb		*urb;
1368 	struct musb_hw_ep	*hw_ep = musb->endpoints + epnum;
1369 	void __iomem		*epio = hw_ep->regs;
1370 	struct musb_qh		*qh = hw_ep->in_qh;
1371 	size_t			xfer_len;
1372 	void __iomem		*mbase = musb->mregs;
1373 	int			pipe;
1374 	u16			rx_csr, val;
1375 	bool			iso_err = false;
1376 	bool			done = false;
1377 	u32			status;
1378 	struct dma_channel	*dma;
1379 
1380 	musb_ep_select(mbase, epnum);
1381 
1382 	urb = next_urb(qh);
1383 	dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1384 	status = 0;
1385 	xfer_len = 0;
1386 
1387 	rx_csr = musb_readw(epio, MUSB_RXCSR);
1388 	val = rx_csr;
1389 
1390 	if (unlikely(!urb)) {
1391 		/* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1392 		 * usbtest #11 (unlinks) triggers it regularly, sometimes
1393 		 * with fifo full.  (Only with DMA??)
1394 		 */
1395 		DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1396 			musb_readw(epio, MUSB_RXCOUNT));
1397 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1398 		return;
1399 	}
1400 
1401 	pipe = urb->pipe;
1402 
1403 	DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1404 		epnum, rx_csr, urb->actual_length,
1405 		dma ? dma->actual_len : 0);
1406 
1407 	/* check for errors, concurrent stall & unlink is not really
1408 	 * handled yet! */
1409 	if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1410 		DBG(3, "RX end %d STALL\n", epnum);
1411 
1412 		/* stall; record URB status */
1413 		status = -EPIPE;
1414 
1415 	} else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1416 		DBG(3, "end %d RX proto error\n", epnum);
1417 
1418 		status = -EPROTO;
1419 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1420 
1421 	} else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1422 
1423 		if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1424 			/* NOTE this code path would be a good place to PAUSE a
1425 			 * transfer, if there's some other (nonperiodic) rx urb
1426 			 * that could use this fifo.  (dma complicates it...)
1427 			 *
1428 			 * if (bulk && qh->ring.next != &musb->in_bulk), then
1429 			 * we have a candidate... NAKing is *NOT* an error
1430 			 */
1431 			DBG(6, "RX end %d NAK timeout\n", epnum);
1432 			musb_ep_select(mbase, epnum);
1433 			musb_writew(epio, MUSB_RXCSR,
1434 					MUSB_RXCSR_H_WZC_BITS
1435 					| MUSB_RXCSR_H_REQPKT);
1436 
1437 			goto finish;
1438 		} else {
1439 			DBG(4, "RX end %d ISO data error\n", epnum);
1440 			/* packet error reported later */
1441 			iso_err = true;
1442 		}
1443 	}
1444 
1445 	/* faults abort the transfer */
1446 	if (status) {
1447 		/* clean up dma and collect transfer count */
1448 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1449 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1450 			(void) musb->dma_controller->channel_abort(dma);
1451 			xfer_len = dma->actual_len;
1452 		}
1453 		musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1454 		musb_writeb(epio, MUSB_RXINTERVAL, 0);
1455 		done = true;
1456 		goto finish;
1457 	}
1458 
1459 	if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1460 		/* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1461 		ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1462 		goto finish;
1463 	}
1464 
1465 	/* thorough shutdown for now ... given more precise fault handling
1466 	 * and better queueing support, we might keep a DMA pipeline going
1467 	 * while processing this irq for earlier completions.
1468 	 */
1469 
1470 	/* FIXME this is _way_ too much in-line logic for Mentor DMA */
1471 
1472 #ifndef CONFIG_USB_INVENTRA_DMA
1473 	if (rx_csr & MUSB_RXCSR_H_REQPKT)  {
1474 		/* REVISIT this happened for a while on some short reads...
1475 		 * the cleanup still needs investigation... looks bad...
1476 		 * and also duplicates dma cleanup code above ... plus,
1477 		 * shouldn't this be the "half full" double buffer case?
1478 		 */
1479 		if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1480 			dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1481 			(void) musb->dma_controller->channel_abort(dma);
1482 			xfer_len = dma->actual_len;
1483 			done = true;
1484 		}
1485 
1486 		DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1487 				xfer_len, dma ? ", dma" : "");
1488 		rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1489 
1490 		musb_ep_select(mbase, epnum);
1491 		musb_writew(epio, MUSB_RXCSR,
1492 				MUSB_RXCSR_H_WZC_BITS | rx_csr);
1493 	}
1494 #endif
1495 	if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1496 		xfer_len = dma->actual_len;
1497 
1498 		val &= ~(MUSB_RXCSR_DMAENAB
1499 			| MUSB_RXCSR_H_AUTOREQ
1500 			| MUSB_RXCSR_AUTOCLEAR
1501 			| MUSB_RXCSR_RXPKTRDY);
1502 		musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1503 
1504 #ifdef CONFIG_USB_INVENTRA_DMA
1505 		if (usb_pipeisoc(pipe)) {
1506 			struct usb_iso_packet_descriptor *d;
1507 
1508 			d = urb->iso_frame_desc + qh->iso_idx;
1509 			d->actual_length = xfer_len;
1510 
1511 			/* even if there was an error, we did the dma
1512 			 * for iso_frame_desc->length
1513 			 */
1514 			if (d->status != EILSEQ && d->status != -EOVERFLOW)
1515 				d->status = 0;
1516 
1517 			if (++qh->iso_idx >= urb->number_of_packets)
1518 				done = true;
1519 			else
1520 				done = false;
1521 
1522 		} else  {
1523 		/* done if urb buffer is full or short packet is recd */
1524 		done = (urb->actual_length + xfer_len >=
1525 				urb->transfer_buffer_length
1526 			|| dma->actual_len < qh->maxpacket);
1527 		}
1528 
1529 		/* send IN token for next packet, without AUTOREQ */
1530 		if (!done) {
1531 			val |= MUSB_RXCSR_H_REQPKT;
1532 			musb_writew(epio, MUSB_RXCSR,
1533 				MUSB_RXCSR_H_WZC_BITS | val);
1534 		}
1535 
1536 		DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1537 			done ? "off" : "reset",
1538 			musb_readw(epio, MUSB_RXCSR),
1539 			musb_readw(epio, MUSB_RXCOUNT));
1540 #else
1541 		done = true;
1542 #endif
1543 	} else if (urb->status == -EINPROGRESS) {
1544 		/* if no errors, be sure a packet is ready for unloading */
1545 		if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1546 			status = -EPROTO;
1547 			ERR("Rx interrupt with no errors or packet!\n");
1548 
1549 			/* FIXME this is another "SHOULD NEVER HAPPEN" */
1550 
1551 /* SCRUB (RX) */
1552 			/* do the proper sequence to abort the transfer */
1553 			musb_ep_select(mbase, epnum);
1554 			val &= ~MUSB_RXCSR_H_REQPKT;
1555 			musb_writew(epio, MUSB_RXCSR, val);
1556 			goto finish;
1557 		}
1558 
1559 		/* we are expecting IN packets */
1560 #ifdef CONFIG_USB_INVENTRA_DMA
1561 		if (dma) {
1562 			struct dma_controller	*c;
1563 			u16			rx_count;
1564 			int			ret, length;
1565 			dma_addr_t		buf;
1566 
1567 			rx_count = musb_readw(epio, MUSB_RXCOUNT);
1568 
1569 			DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1570 					epnum, rx_count,
1571 					urb->transfer_dma
1572 						+ urb->actual_length,
1573 					qh->offset,
1574 					urb->transfer_buffer_length);
1575 
1576 			c = musb->dma_controller;
1577 
1578 			if (usb_pipeisoc(pipe)) {
1579 				int status = 0;
1580 				struct usb_iso_packet_descriptor *d;
1581 
1582 				d = urb->iso_frame_desc + qh->iso_idx;
1583 
1584 				if (iso_err) {
1585 					status = -EILSEQ;
1586 					urb->error_count++;
1587 				}
1588 				if (rx_count > d->length) {
1589 					if (status == 0) {
1590 						status = -EOVERFLOW;
1591 						urb->error_count++;
1592 					}
1593 					DBG(2, "** OVERFLOW %d into %d\n",\
1594 					    rx_count, d->length);
1595 
1596 					length = d->length;
1597 				} else
1598 					length = rx_count;
1599 				d->status = status;
1600 				buf = urb->transfer_dma + d->offset;
1601 			} else {
1602 				length = rx_count;
1603 				buf = urb->transfer_dma +
1604 						urb->actual_length;
1605 			}
1606 
1607 			dma->desired_mode = 0;
1608 #ifdef USE_MODE1
1609 			/* because of the issue below, mode 1 will
1610 			 * only rarely behave with correct semantics.
1611 			 */
1612 			if ((urb->transfer_flags &
1613 						URB_SHORT_NOT_OK)
1614 				&& (urb->transfer_buffer_length -
1615 						urb->actual_length)
1616 					> qh->maxpacket)
1617 				dma->desired_mode = 1;
1618 			if (rx_count < hw_ep->max_packet_sz_rx) {
1619 				length = rx_count;
1620 				dma->bDesiredMode = 0;
1621 			} else {
1622 				length = urb->transfer_buffer_length;
1623 			}
1624 #endif
1625 
1626 /* Disadvantage of using mode 1:
1627  *	It's basically usable only for mass storage class; essentially all
1628  *	other protocols also terminate transfers on short packets.
1629  *
1630  * Details:
1631  *	An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1632  *	If you try to use mode 1 for (transfer_buffer_length - 512), and try
1633  *	to use the extra IN token to grab the last packet using mode 0, then
1634  *	the problem is that you cannot be sure when the device will send the
1635  *	last packet and RxPktRdy set. Sometimes the packet is recd too soon
1636  *	such that it gets lost when RxCSR is re-set at the end of the mode 1
1637  *	transfer, while sometimes it is recd just a little late so that if you
1638  *	try to configure for mode 0 soon after the mode 1 transfer is
1639  *	completed, you will find rxcount 0. Okay, so you might think why not
1640  *	wait for an interrupt when the pkt is recd. Well, you won't get any!
1641  */
1642 
1643 			val = musb_readw(epio, MUSB_RXCSR);
1644 			val &= ~MUSB_RXCSR_H_REQPKT;
1645 
1646 			if (dma->desired_mode == 0)
1647 				val &= ~MUSB_RXCSR_H_AUTOREQ;
1648 			else
1649 				val |= MUSB_RXCSR_H_AUTOREQ;
1650 			val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1651 
1652 			musb_writew(epio, MUSB_RXCSR,
1653 				MUSB_RXCSR_H_WZC_BITS | val);
1654 
1655 			/* REVISIT if when actual_length != 0,
1656 			 * transfer_buffer_length needs to be
1657 			 * adjusted first...
1658 			 */
1659 			ret = c->channel_program(
1660 				dma, qh->maxpacket,
1661 				dma->desired_mode, buf, length);
1662 
1663 			if (!ret) {
1664 				c->channel_release(dma);
1665 				hw_ep->rx_channel = NULL;
1666 				dma = NULL;
1667 				/* REVISIT reset CSR */
1668 			}
1669 		}
1670 #endif	/* Mentor DMA */
1671 
1672 		if (!dma) {
1673 			done = musb_host_packet_rx(musb, urb,
1674 					epnum, iso_err);
1675 			DBG(6, "read %spacket\n", done ? "last " : "");
1676 		}
1677 	}
1678 
1679 finish:
1680 	urb->actual_length += xfer_len;
1681 	qh->offset += xfer_len;
1682 	if (done) {
1683 		if (urb->status == -EINPROGRESS)
1684 			urb->status = status;
1685 		musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1686 	}
1687 }
1688 
1689 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1690  * the software schedule associates multiple such nodes with a given
1691  * host side hardware endpoint + direction; scheduling may activate
1692  * that hardware endpoint.
1693  */
musb_schedule(struct musb * musb,struct musb_qh * qh,int is_in)1694 static int musb_schedule(
1695 	struct musb		*musb,
1696 	struct musb_qh		*qh,
1697 	int			is_in)
1698 {
1699 	int			idle;
1700 	int			best_diff;
1701 	int			best_end, epnum;
1702 	struct musb_hw_ep	*hw_ep = NULL;
1703 	struct list_head	*head = NULL;
1704 
1705 	/* use fixed hardware for control and bulk */
1706 	if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1707 		head = &musb->control;
1708 		hw_ep = musb->control_ep;
1709 		goto success;
1710 	}
1711 
1712 	/* else, periodic transfers get muxed to other endpoints */
1713 
1714 	/* FIXME this doesn't consider direction, so it can only
1715 	 * work for one half of the endpoint hardware, and assumes
1716 	 * the previous cases handled all non-shared endpoints...
1717 	 */
1718 
1719 	/* we know this qh hasn't been scheduled, so all we need to do
1720 	 * is choose which hardware endpoint to put it on ...
1721 	 *
1722 	 * REVISIT what we really want here is a regular schedule tree
1723 	 * like e.g. OHCI uses, but for now musb->periodic is just an
1724 	 * array of the _single_ logical endpoint associated with a
1725 	 * given physical one (identity mapping logical->physical).
1726 	 *
1727 	 * that simplistic approach makes TT scheduling a lot simpler;
1728 	 * there is none, and thus none of its complexity...
1729 	 */
1730 	best_diff = 4096;
1731 	best_end = -1;
1732 
1733 	for (epnum = 1; epnum < musb->nr_endpoints; epnum++) {
1734 		int	diff;
1735 
1736 		if (musb->periodic[epnum])
1737 			continue;
1738 		hw_ep = &musb->endpoints[epnum];
1739 		if (hw_ep == musb->bulk_ep)
1740 			continue;
1741 
1742 		if (is_in)
1743 			diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1744 		else
1745 			diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1746 
1747 		if (diff >= 0 && best_diff > diff) {
1748 			best_diff = diff;
1749 			best_end = epnum;
1750 		}
1751 	}
1752 	/* use bulk reserved ep1 if no other ep is free */
1753 	if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1754 		hw_ep = musb->bulk_ep;
1755 		if (is_in)
1756 			head = &musb->in_bulk;
1757 		else
1758 			head = &musb->out_bulk;
1759 		goto success;
1760 	} else if (best_end < 0) {
1761 		return -ENOSPC;
1762 	}
1763 
1764 	idle = 1;
1765 	qh->mux = 0;
1766 	hw_ep = musb->endpoints + best_end;
1767 	musb->periodic[best_end] = qh;
1768 	DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1769 success:
1770 	if (head) {
1771 		idle = list_empty(head);
1772 		list_add_tail(&qh->ring, head);
1773 		qh->mux = 1;
1774 	}
1775 	qh->hw_ep = hw_ep;
1776 	qh->hep->hcpriv = qh;
1777 	if (idle)
1778 		musb_start_urb(musb, is_in, qh);
1779 	return 0;
1780 }
1781 
musb_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1782 static int musb_urb_enqueue(
1783 	struct usb_hcd			*hcd,
1784 	struct urb			*urb,
1785 	gfp_t				mem_flags)
1786 {
1787 	unsigned long			flags;
1788 	struct musb			*musb = hcd_to_musb(hcd);
1789 	struct usb_host_endpoint	*hep = urb->ep;
1790 	struct musb_qh			*qh = hep->hcpriv;
1791 	struct usb_endpoint_descriptor	*epd = &hep->desc;
1792 	int				ret;
1793 	unsigned			type_reg;
1794 	unsigned			interval;
1795 
1796 	/* host role must be active */
1797 	if (!is_host_active(musb) || !musb->is_active)
1798 		return -ENODEV;
1799 
1800 	spin_lock_irqsave(&musb->lock, flags);
1801 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1802 	spin_unlock_irqrestore(&musb->lock, flags);
1803 	if (ret)
1804 		return ret;
1805 
1806 	/* DMA mapping was already done, if needed, and this urb is on
1807 	 * hep->urb_list ... so there's little to do unless hep wasn't
1808 	 * yet scheduled onto a live qh.
1809 	 *
1810 	 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1811 	 * disabled, testing for empty qh->ring and avoiding qh setup costs
1812 	 * except for the first urb queued after a config change.
1813 	 */
1814 	if (qh) {
1815 		urb->hcpriv = qh;
1816 		return 0;
1817 	}
1818 
1819 	/* Allocate and initialize qh, minimizing the work done each time
1820 	 * hw_ep gets reprogrammed, or with irqs blocked.  Then schedule it.
1821 	 *
1822 	 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1823 	 * for bugs in other kernel code to break this driver...
1824 	 */
1825 	qh = kzalloc(sizeof *qh, mem_flags);
1826 	if (!qh) {
1827 		spin_lock_irqsave(&musb->lock, flags);
1828 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1829 		spin_unlock_irqrestore(&musb->lock, flags);
1830 		return -ENOMEM;
1831 	}
1832 
1833 	qh->hep = hep;
1834 	qh->dev = urb->dev;
1835 	INIT_LIST_HEAD(&qh->ring);
1836 	qh->is_ready = 1;
1837 
1838 	qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1839 
1840 	/* no high bandwidth support yet */
1841 	if (qh->maxpacket & ~0x7ff) {
1842 		ret = -EMSGSIZE;
1843 		goto done;
1844 	}
1845 
1846 	qh->epnum = usb_endpoint_num(epd);
1847 	qh->type = usb_endpoint_type(epd);
1848 
1849 	/* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1850 	qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1851 
1852 	/* precompute rxtype/txtype/type0 register */
1853 	type_reg = (qh->type << 4) | qh->epnum;
1854 	switch (urb->dev->speed) {
1855 	case USB_SPEED_LOW:
1856 		type_reg |= 0xc0;
1857 		break;
1858 	case USB_SPEED_FULL:
1859 		type_reg |= 0x80;
1860 		break;
1861 	default:
1862 		type_reg |= 0x40;
1863 	}
1864 	qh->type_reg = type_reg;
1865 
1866 	/* Precompute RXINTERVAL/TXINTERVAL register */
1867 	switch (qh->type) {
1868 	case USB_ENDPOINT_XFER_INT:
1869 		/*
1870 		 * Full/low speeds use the  linear encoding,
1871 		 * high speed uses the logarithmic encoding.
1872 		 */
1873 		if (urb->dev->speed <= USB_SPEED_FULL) {
1874 			interval = max_t(u8, epd->bInterval, 1);
1875 			break;
1876 		}
1877 		/* FALLTHROUGH */
1878 	case USB_ENDPOINT_XFER_ISOC:
1879 		/* ISO always uses logarithmic encoding */
1880 		interval = min_t(u8, epd->bInterval, 16);
1881 		break;
1882 	default:
1883 		/* REVISIT we actually want to use NAK limits, hinting to the
1884 		 * transfer scheduling logic to try some other qh, e.g. try
1885 		 * for 2 msec first:
1886 		 *
1887 		 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1888 		 *
1889 		 * The downside of disabling this is that transfer scheduling
1890 		 * gets VERY unfair for nonperiodic transfers; a misbehaving
1891 		 * peripheral could make that hurt.  Or for reads, one that's
1892 		 * perfectly normal:  network and other drivers keep reads
1893 		 * posted at all times, having one pending for a week should
1894 		 * be perfectly safe.
1895 		 *
1896 		 * The upside of disabling it is avoidng transfer scheduling
1897 		 * code to put this aside for while.
1898 		 */
1899 		interval = 0;
1900 	}
1901 	qh->intv_reg = interval;
1902 
1903 	/* precompute addressing for external hub/tt ports */
1904 	if (musb->is_multipoint) {
1905 		struct usb_device	*parent = urb->dev->parent;
1906 
1907 		if (parent != hcd->self.root_hub) {
1908 			qh->h_addr_reg = (u8) parent->devnum;
1909 
1910 			/* set up tt info if needed */
1911 			if (urb->dev->tt) {
1912 				qh->h_port_reg = (u8) urb->dev->ttport;
1913 				if (urb->dev->tt->hub)
1914 					qh->h_addr_reg =
1915 						(u8) urb->dev->tt->hub->devnum;
1916 				if (urb->dev->tt->multi)
1917 					qh->h_addr_reg |= 0x80;
1918 			}
1919 		}
1920 	}
1921 
1922 	/* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1923 	 * until we get real dma queues (with an entry for each urb/buffer),
1924 	 * we only have work to do in the former case.
1925 	 */
1926 	spin_lock_irqsave(&musb->lock, flags);
1927 	if (hep->hcpriv) {
1928 		/* some concurrent activity submitted another urb to hep...
1929 		 * odd, rare, error prone, but legal.
1930 		 */
1931 		kfree(qh);
1932 		ret = 0;
1933 	} else
1934 		ret = musb_schedule(musb, qh,
1935 				epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
1936 
1937 	if (ret == 0) {
1938 		urb->hcpriv = qh;
1939 		/* FIXME set urb->start_frame for iso/intr, it's tested in
1940 		 * musb_start_urb(), but otherwise only konicawc cares ...
1941 		 */
1942 	}
1943 	spin_unlock_irqrestore(&musb->lock, flags);
1944 
1945 done:
1946 	if (ret != 0) {
1947 		spin_lock_irqsave(&musb->lock, flags);
1948 		usb_hcd_unlink_urb_from_ep(hcd, urb);
1949 		spin_unlock_irqrestore(&musb->lock, flags);
1950 		kfree(qh);
1951 	}
1952 	return ret;
1953 }
1954 
1955 
1956 /*
1957  * abort a transfer that's at the head of a hardware queue.
1958  * called with controller locked, irqs blocked
1959  * that hardware queue advances to the next transfer, unless prevented
1960  */
musb_cleanup_urb(struct urb * urb,struct musb_qh * qh,int is_in)1961 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
1962 {
1963 	struct musb_hw_ep	*ep = qh->hw_ep;
1964 	void __iomem		*epio = ep->regs;
1965 	unsigned		hw_end = ep->epnum;
1966 	void __iomem		*regs = ep->musb->mregs;
1967 	u16			csr;
1968 	int			status = 0;
1969 
1970 	musb_ep_select(regs, hw_end);
1971 
1972 	if (is_dma_capable()) {
1973 		struct dma_channel	*dma;
1974 
1975 		dma = is_in ? ep->rx_channel : ep->tx_channel;
1976 		if (dma) {
1977 			status = ep->musb->dma_controller->channel_abort(dma);
1978 			DBG(status ? 1 : 3,
1979 				"abort %cX%d DMA for urb %p --> %d\n",
1980 				is_in ? 'R' : 'T', ep->epnum,
1981 				urb, status);
1982 			urb->actual_length += dma->actual_len;
1983 		}
1984 	}
1985 
1986 	/* turn off DMA requests, discard state, stop polling ... */
1987 	if (is_in) {
1988 		/* giveback saves bulk toggle */
1989 		csr = musb_h_flush_rxfifo(ep, 0);
1990 
1991 		/* REVISIT we still get an irq; should likely clear the
1992 		 * endpoint's irq status here to avoid bogus irqs.
1993 		 * clearing that status is platform-specific...
1994 		 */
1995 	} else {
1996 		musb_h_tx_flush_fifo(ep);
1997 		csr = musb_readw(epio, MUSB_TXCSR);
1998 		csr &= ~(MUSB_TXCSR_AUTOSET
1999 			| MUSB_TXCSR_DMAENAB
2000 			| MUSB_TXCSR_H_RXSTALL
2001 			| MUSB_TXCSR_H_NAKTIMEOUT
2002 			| MUSB_TXCSR_H_ERROR
2003 			| MUSB_TXCSR_TXPKTRDY);
2004 		musb_writew(epio, MUSB_TXCSR, csr);
2005 		/* REVISIT may need to clear FLUSHFIFO ... */
2006 		musb_writew(epio, MUSB_TXCSR, csr);
2007 		/* flush cpu writebuffer */
2008 		csr = musb_readw(epio, MUSB_TXCSR);
2009 	}
2010 	if (status == 0)
2011 		musb_advance_schedule(ep->musb, urb, ep, is_in);
2012 	return status;
2013 }
2014 
musb_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)2015 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2016 {
2017 	struct musb		*musb = hcd_to_musb(hcd);
2018 	struct musb_qh		*qh;
2019 	struct list_head	*sched;
2020 	unsigned long		flags;
2021 	int			ret;
2022 
2023 	DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2024 			usb_pipedevice(urb->pipe),
2025 			usb_pipeendpoint(urb->pipe),
2026 			usb_pipein(urb->pipe) ? "in" : "out");
2027 
2028 	spin_lock_irqsave(&musb->lock, flags);
2029 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2030 	if (ret)
2031 		goto done;
2032 
2033 	qh = urb->hcpriv;
2034 	if (!qh)
2035 		goto done;
2036 
2037 	/* Any URB not actively programmed into endpoint hardware can be
2038 	 * immediately given back; that's any URB not at the head of an
2039 	 * endpoint queue, unless someday we get real DMA queues.  And even
2040 	 * if it's at the head, it might not be known to the hardware...
2041 	 *
2042 	 * Otherwise abort current transfer, pending dma, etc.; urb->status
2043 	 * has already been updated.  This is a synchronous abort; it'd be
2044 	 * OK to hold off until after some IRQ, though.
2045 	 */
2046 	if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2047 		ret = -EINPROGRESS;
2048 	else {
2049 		switch (qh->type) {
2050 		case USB_ENDPOINT_XFER_CONTROL:
2051 			sched = &musb->control;
2052 			break;
2053 		case USB_ENDPOINT_XFER_BULK:
2054 			if (qh->mux == 1) {
2055 				if (usb_pipein(urb->pipe))
2056 					sched = &musb->in_bulk;
2057 				else
2058 					sched = &musb->out_bulk;
2059 				break;
2060 			}
2061 		default:
2062 			/* REVISIT when we get a schedule tree, periodic
2063 			 * transfers won't always be at the head of a
2064 			 * singleton queue...
2065 			 */
2066 			sched = NULL;
2067 			break;
2068 		}
2069 	}
2070 
2071 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2072 	if (ret < 0 || (sched && qh != first_qh(sched))) {
2073 		int	ready = qh->is_ready;
2074 
2075 		ret = 0;
2076 		qh->is_ready = 0;
2077 		__musb_giveback(musb, urb, 0);
2078 		qh->is_ready = ready;
2079 
2080 		/* If nothing else (usually musb_giveback) is using it
2081 		 * and its URB list has emptied, recycle this qh.
2082 		 */
2083 		if (ready && list_empty(&qh->hep->urb_list)) {
2084 			qh->hep->hcpriv = NULL;
2085 			list_del(&qh->ring);
2086 			kfree(qh);
2087 		}
2088 	} else
2089 		ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2090 done:
2091 	spin_unlock_irqrestore(&musb->lock, flags);
2092 	return ret;
2093 }
2094 
2095 /* disable an endpoint */
2096 static void
musb_h_disable(struct usb_hcd * hcd,struct usb_host_endpoint * hep)2097 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2098 {
2099 	u8			epnum = hep->desc.bEndpointAddress;
2100 	unsigned long		flags;
2101 	struct musb		*musb = hcd_to_musb(hcd);
2102 	u8			is_in = epnum & USB_DIR_IN;
2103 	struct musb_qh		*qh;
2104 	struct urb		*urb;
2105 	struct list_head	*sched;
2106 
2107 	spin_lock_irqsave(&musb->lock, flags);
2108 
2109 	qh = hep->hcpriv;
2110 	if (qh == NULL)
2111 		goto exit;
2112 
2113 	switch (qh->type) {
2114 	case USB_ENDPOINT_XFER_CONTROL:
2115 		sched = &musb->control;
2116 		break;
2117 	case USB_ENDPOINT_XFER_BULK:
2118 		if (qh->mux == 1) {
2119 			if (is_in)
2120 				sched = &musb->in_bulk;
2121 			else
2122 				sched = &musb->out_bulk;
2123 			break;
2124 		}
2125 	default:
2126 		/* REVISIT when we get a schedule tree, periodic transfers
2127 		 * won't always be at the head of a singleton queue...
2128 		 */
2129 		sched = NULL;
2130 		break;
2131 	}
2132 
2133 	/* NOTE:  qh is invalid unless !list_empty(&hep->urb_list) */
2134 
2135 	/* kick first urb off the hardware, if needed */
2136 	qh->is_ready = 0;
2137 	if (!sched || qh == first_qh(sched)) {
2138 		urb = next_urb(qh);
2139 
2140 		/* make software (then hardware) stop ASAP */
2141 		if (!urb->unlinked)
2142 			urb->status = -ESHUTDOWN;
2143 
2144 		/* cleanup */
2145 		musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2146 
2147 		/* Then nuke all the others ... and advance the
2148 		 * queue on hw_ep (e.g. bulk ring) when we're done.
2149 		 */
2150 		while (!list_empty(&hep->urb_list)) {
2151 			urb = next_urb(qh);
2152 			urb->status = -ESHUTDOWN;
2153 			musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2154 		}
2155 	} else {
2156 		/* Just empty the queue; the hardware is busy with
2157 		 * other transfers, and since !qh->is_ready nothing
2158 		 * will activate any of these as it advances.
2159 		 */
2160 		while (!list_empty(&hep->urb_list))
2161 			__musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2162 
2163 		hep->hcpriv = NULL;
2164 		list_del(&qh->ring);
2165 		kfree(qh);
2166 	}
2167 exit:
2168 	spin_unlock_irqrestore(&musb->lock, flags);
2169 }
2170 
musb_h_get_frame_number(struct usb_hcd * hcd)2171 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2172 {
2173 	struct musb	*musb = hcd_to_musb(hcd);
2174 
2175 	return musb_readw(musb->mregs, MUSB_FRAME);
2176 }
2177 
musb_h_start(struct usb_hcd * hcd)2178 static int musb_h_start(struct usb_hcd *hcd)
2179 {
2180 	struct musb	*musb = hcd_to_musb(hcd);
2181 
2182 	/* NOTE: musb_start() is called when the hub driver turns
2183 	 * on port power, or when (OTG) peripheral starts.
2184 	 */
2185 	hcd->state = HC_STATE_RUNNING;
2186 	musb->port1_status = 0;
2187 	return 0;
2188 }
2189 
musb_h_stop(struct usb_hcd * hcd)2190 static void musb_h_stop(struct usb_hcd *hcd)
2191 {
2192 	musb_stop(hcd_to_musb(hcd));
2193 	hcd->state = HC_STATE_HALT;
2194 }
2195 
musb_bus_suspend(struct usb_hcd * hcd)2196 static int musb_bus_suspend(struct usb_hcd *hcd)
2197 {
2198 	struct musb	*musb = hcd_to_musb(hcd);
2199 
2200 	if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2201 		return 0;
2202 
2203 	if (is_host_active(musb) && musb->is_active) {
2204 		WARNING("trying to suspend as %s is_active=%i\n",
2205 			otg_state_string(musb), musb->is_active);
2206 		return -EBUSY;
2207 	} else
2208 		return 0;
2209 }
2210 
musb_bus_resume(struct usb_hcd * hcd)2211 static int musb_bus_resume(struct usb_hcd *hcd)
2212 {
2213 	/* resuming child port does the work */
2214 	return 0;
2215 }
2216 
2217 const struct hc_driver musb_hc_driver = {
2218 	.description		= "musb-hcd",
2219 	.product_desc		= "MUSB HDRC host driver",
2220 	.hcd_priv_size		= sizeof(struct musb),
2221 	.flags			= HCD_USB2 | HCD_MEMORY,
2222 
2223 	/* not using irq handler or reset hooks from usbcore, since
2224 	 * those must be shared with peripheral code for OTG configs
2225 	 */
2226 
2227 	.start			= musb_h_start,
2228 	.stop			= musb_h_stop,
2229 
2230 	.get_frame_number	= musb_h_get_frame_number,
2231 
2232 	.urb_enqueue		= musb_urb_enqueue,
2233 	.urb_dequeue		= musb_urb_dequeue,
2234 	.endpoint_disable	= musb_h_disable,
2235 
2236 	.hub_status_data	= musb_hub_status_data,
2237 	.hub_control		= musb_hub_control,
2238 	.bus_suspend		= musb_bus_suspend,
2239 	.bus_resume		= musb_bus_resume,
2240 	/* .start_port_reset	= NULL, */
2241 	/* .hub_irq_enable	= NULL, */
2242 };
2243