• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * USB Host Controller Driver for IMX21
3  *
4  * Copyright (C) 2006 Loping Dog Embedded Systems
5  * Copyright (C) 2009 Martin Fuzzey
6  * Originally written by Jay Monkman <jtm@lopingdog.com>
7  * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the
11  * Free Software Foundation; either version 2 of the License, or (at your
12  * option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
17  * for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 
25  /*
26   * The i.MX21 USB hardware contains
27   *    * 32 transfer descriptors (called ETDs)
28   *    * 4Kb of Data memory
29   *
30   * The data memory is shared between the host and function controllers
31   * (but this driver only supports the host controller)
32   *
33   * So setting up a transfer involves:
34   *    * Allocating a ETD
35   *    * Fill in ETD with appropriate information
36   *    * Allocating data memory (and putting the offset in the ETD)
37   *    * Activate the ETD
38   *    * Get interrupt when done.
39   *
40   * An ETD is assigned to each active endpoint.
41   *
42   * Low resource (ETD and Data memory) situations are handled differently for
43   * isochronous and non insosynchronous transactions :
44   *
45   * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
46   *
47   * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
48   * They allocate both ETDs and Data memory during URB submission
49   * (and fail if unavailable).
50   */
51 
52 #include <linux/clk.h>
53 #include <linux/io.h>
54 #include <linux/kernel.h>
55 #include <linux/list.h>
56 #include <linux/platform_device.h>
57 #include <linux/slab.h>
58 #include <linux/usb.h>
59 #include <linux/usb/hcd.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/module.h>
62 
63 #include "imx21-hcd.h"
64 
65 #ifdef CONFIG_DYNAMIC_DEBUG
66 #define DEBUG
67 #endif
68 
69 #ifdef DEBUG
70 #define DEBUG_LOG_FRAME(imx21, etd, event) \
71 	(etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
72 #else
73 #define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
74 #endif
75 
76 static const char hcd_name[] = "imx21-hcd";
77 
hcd_to_imx21(struct usb_hcd * hcd)78 static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
79 {
80 	return (struct imx21 *)hcd->hcd_priv;
81 }
82 
83 
84 /* =========================================== */
85 /* Hardware access helpers			*/
86 /* =========================================== */
87 
set_register_bits(struct imx21 * imx21,u32 offset,u32 mask)88 static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
89 {
90 	void __iomem *reg = imx21->regs + offset;
91 	writel(readl(reg) | mask, reg);
92 }
93 
clear_register_bits(struct imx21 * imx21,u32 offset,u32 mask)94 static inline void clear_register_bits(struct imx21 *imx21,
95 	u32 offset, u32 mask)
96 {
97 	void __iomem *reg = imx21->regs + offset;
98 	writel(readl(reg) & ~mask, reg);
99 }
100 
clear_toggle_bit(struct imx21 * imx21,u32 offset,u32 mask)101 static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
102 {
103 	void __iomem *reg = imx21->regs + offset;
104 
105 	if (readl(reg) & mask)
106 		writel(mask, reg);
107 }
108 
set_toggle_bit(struct imx21 * imx21,u32 offset,u32 mask)109 static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
110 {
111 	void __iomem *reg = imx21->regs + offset;
112 
113 	if (!(readl(reg) & mask))
114 		writel(mask, reg);
115 }
116 
etd_writel(struct imx21 * imx21,int etd_num,int dword,u32 value)117 static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
118 {
119 	writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
120 }
121 
etd_readl(struct imx21 * imx21,int etd_num,int dword)122 static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
123 {
124 	return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
125 }
126 
wrap_frame(int counter)127 static inline int wrap_frame(int counter)
128 {
129 	return counter & 0xFFFF;
130 }
131 
frame_after(int frame,int after)132 static inline int frame_after(int frame, int after)
133 {
134 	/* handle wrapping like jiffies time_afer */
135 	return (s16)((s16)after - (s16)frame) < 0;
136 }
137 
imx21_hc_get_frame(struct usb_hcd * hcd)138 static int imx21_hc_get_frame(struct usb_hcd *hcd)
139 {
140 	struct imx21 *imx21 = hcd_to_imx21(hcd);
141 
142 	return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
143 }
144 
unsuitable_for_dma(dma_addr_t addr)145 static inline bool unsuitable_for_dma(dma_addr_t addr)
146 {
147 	return (addr & 3) != 0;
148 }
149 
150 #include "imx21-dbg.c"
151 
152 static void nonisoc_urb_completed_for_etd(
153 	struct imx21 *imx21, struct etd_priv *etd, int status);
154 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
155 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd);
156 
157 /* =========================================== */
158 /* ETD management				*/
159 /* ===========================================	*/
160 
alloc_etd(struct imx21 * imx21)161 static int alloc_etd(struct imx21 *imx21)
162 {
163 	int i;
164 	struct etd_priv *etd = imx21->etd;
165 
166 	for (i = 0; i < USB_NUM_ETD; i++, etd++) {
167 		if (etd->alloc == 0) {
168 			memset(etd, 0, sizeof(imx21->etd[0]));
169 			etd->alloc = 1;
170 			debug_etd_allocated(imx21);
171 			return i;
172 		}
173 	}
174 	return -1;
175 }
176 
disactivate_etd(struct imx21 * imx21,int num)177 static void disactivate_etd(struct imx21 *imx21, int num)
178 {
179 	int etd_mask = (1 << num);
180 	struct etd_priv *etd = &imx21->etd[num];
181 
182 	writel(etd_mask, imx21->regs + USBH_ETDENCLR);
183 	clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
184 	writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
185 	clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
186 
187 	etd->active_count = 0;
188 
189 	DEBUG_LOG_FRAME(imx21, etd, disactivated);
190 }
191 
reset_etd(struct imx21 * imx21,int num)192 static void reset_etd(struct imx21 *imx21, int num)
193 {
194 	struct etd_priv *etd = imx21->etd + num;
195 	int i;
196 
197 	disactivate_etd(imx21, num);
198 
199 	for (i = 0; i < 4; i++)
200 		etd_writel(imx21, num, i, 0);
201 	etd->urb = NULL;
202 	etd->ep = NULL;
203 	etd->td = NULL;
204 	etd->bounce_buffer = NULL;
205 }
206 
free_etd(struct imx21 * imx21,int num)207 static void free_etd(struct imx21 *imx21, int num)
208 {
209 	if (num < 0)
210 		return;
211 
212 	if (num >= USB_NUM_ETD) {
213 		dev_err(imx21->dev, "BAD etd=%d!\n", num);
214 		return;
215 	}
216 	if (imx21->etd[num].alloc == 0) {
217 		dev_err(imx21->dev, "ETD %d already free!\n", num);
218 		return;
219 	}
220 
221 	debug_etd_freed(imx21);
222 	reset_etd(imx21, num);
223 	memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
224 }
225 
226 
setup_etd_dword0(struct imx21 * imx21,int etd_num,struct urb * urb,u8 dir,u16 maxpacket)227 static void setup_etd_dword0(struct imx21 *imx21,
228 	int etd_num, struct urb *urb,  u8 dir, u16 maxpacket)
229 {
230 	etd_writel(imx21, etd_num, 0,
231 		((u32) usb_pipedevice(urb->pipe)) <<  DW0_ADDRESS |
232 		((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
233 		((u32) dir << DW0_DIRECT) |
234 		((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
235 			1 : 0) << DW0_SPEED) |
236 		((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
237 		((u32) maxpacket << DW0_MAXPKTSIZ));
238 }
239 
240 /**
241  * Copy buffer to data controller data memory.
242  * We cannot use memcpy_toio() because the hardware requires 32bit writes
243  */
copy_to_dmem(struct imx21 * imx21,int dmem_offset,void * src,int count)244 static void copy_to_dmem(
245 	struct imx21 *imx21, int dmem_offset, void *src, int count)
246 {
247 	void __iomem *dmem = imx21->regs + USBOTG_DMEM + dmem_offset;
248 	u32 word = 0;
249 	u8 *p = src;
250 	int byte = 0;
251 	int i;
252 
253 	for (i = 0; i < count; i++) {
254 		byte = i % 4;
255 		word += (*p++ << (byte * 8));
256 		if (byte == 3) {
257 			writel(word, dmem);
258 			dmem += 4;
259 			word = 0;
260 		}
261 	}
262 
263 	if (count && byte != 3)
264 		writel(word, dmem);
265 }
266 
activate_etd(struct imx21 * imx21,int etd_num,u8 dir)267 static void activate_etd(struct imx21 *imx21, int etd_num, u8 dir)
268 {
269 	u32 etd_mask = 1 << etd_num;
270 	struct etd_priv *etd = &imx21->etd[etd_num];
271 
272 	if (etd->dma_handle && unsuitable_for_dma(etd->dma_handle)) {
273 		/* For non aligned isoc the condition below is always true */
274 		if (etd->len <= etd->dmem_size) {
275 			/* Fits into data memory, use PIO */
276 			if (dir != TD_DIR_IN) {
277 				copy_to_dmem(imx21,
278 						etd->dmem_offset,
279 						etd->cpu_buffer, etd->len);
280 			}
281 			etd->dma_handle = 0;
282 
283 		} else {
284 			/* Too big for data memory, use bounce buffer */
285 			enum dma_data_direction dmadir;
286 
287 			if (dir == TD_DIR_IN) {
288 				dmadir = DMA_FROM_DEVICE;
289 				etd->bounce_buffer = kmalloc(etd->len,
290 								GFP_ATOMIC);
291 			} else {
292 				dmadir = DMA_TO_DEVICE;
293 				etd->bounce_buffer = kmemdup(etd->cpu_buffer,
294 								etd->len,
295 								GFP_ATOMIC);
296 			}
297 			if (!etd->bounce_buffer) {
298 				dev_err(imx21->dev, "failed bounce alloc\n");
299 				goto err_bounce_alloc;
300 			}
301 
302 			etd->dma_handle =
303 				dma_map_single(imx21->dev,
304 						etd->bounce_buffer,
305 						etd->len,
306 						dmadir);
307 			if (dma_mapping_error(imx21->dev, etd->dma_handle)) {
308 				dev_err(imx21->dev, "failed bounce map\n");
309 				goto err_bounce_map;
310 			}
311 		}
312 	}
313 
314 	clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
315 	set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
316 	clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
317 	clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
318 
319 	if (etd->dma_handle) {
320 		set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
321 		clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
322 		clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
323 		writel(etd->dma_handle, imx21->regs + USB_ETDSMSA(etd_num));
324 		set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
325 	} else {
326 		if (dir != TD_DIR_IN) {
327 			/* need to set for ZLP and PIO */
328 			set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
329 			set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
330 		}
331 	}
332 
333 	DEBUG_LOG_FRAME(imx21, etd, activated);
334 
335 #ifdef DEBUG
336 	if (!etd->active_count) {
337 		int i;
338 		etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
339 		etd->disactivated_frame = -1;
340 		etd->last_int_frame = -1;
341 		etd->last_req_frame = -1;
342 
343 		for (i = 0; i < 4; i++)
344 			etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
345 	}
346 #endif
347 
348 	etd->active_count = 1;
349 	writel(etd_mask, imx21->regs + USBH_ETDENSET);
350 	return;
351 
352 err_bounce_map:
353 	kfree(etd->bounce_buffer);
354 
355 err_bounce_alloc:
356 	free_dmem(imx21, etd);
357 	nonisoc_urb_completed_for_etd(imx21, etd, -ENOMEM);
358 }
359 
360 /* ===========================================	*/
361 /* Data memory management			*/
362 /* ===========================================	*/
363 
alloc_dmem(struct imx21 * imx21,unsigned int size,struct usb_host_endpoint * ep)364 static int alloc_dmem(struct imx21 *imx21, unsigned int size,
365 		      struct usb_host_endpoint *ep)
366 {
367 	unsigned int offset = 0;
368 	struct imx21_dmem_area *area;
369 	struct imx21_dmem_area *tmp;
370 
371 	size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
372 
373 	if (size > DMEM_SIZE) {
374 		dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
375 			size, DMEM_SIZE);
376 		return -EINVAL;
377 	}
378 
379 	list_for_each_entry(tmp, &imx21->dmem_list, list) {
380 		if ((size + offset) < offset)
381 			goto fail;
382 		if ((size + offset) <= tmp->offset)
383 			break;
384 		offset = tmp->size + tmp->offset;
385 		if ((offset + size) > DMEM_SIZE)
386 			goto fail;
387 	}
388 
389 	area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
390 	if (area == NULL)
391 		return -ENOMEM;
392 
393 	area->ep = ep;
394 	area->offset = offset;
395 	area->size = size;
396 	list_add_tail(&area->list, &tmp->list);
397 	debug_dmem_allocated(imx21, size);
398 	return offset;
399 
400 fail:
401 	return -ENOMEM;
402 }
403 
404 /* Memory now available for a queued ETD - activate it */
activate_queued_etd(struct imx21 * imx21,struct etd_priv * etd,u32 dmem_offset)405 static void activate_queued_etd(struct imx21 *imx21,
406 	struct etd_priv *etd, u32 dmem_offset)
407 {
408 	struct urb_priv *urb_priv = etd->urb->hcpriv;
409 	int etd_num = etd - &imx21->etd[0];
410 	u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
411 	u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
412 
413 	dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
414 		etd_num);
415 	etd_writel(imx21, etd_num, 1,
416 	    ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
417 
418 	etd->dmem_offset = dmem_offset;
419 	urb_priv->active = 1;
420 	activate_etd(imx21, etd_num, dir);
421 }
422 
free_dmem(struct imx21 * imx21,struct etd_priv * etd)423 static void free_dmem(struct imx21 *imx21, struct etd_priv *etd)
424 {
425 	struct imx21_dmem_area *area;
426 	struct etd_priv *tmp;
427 	int found = 0;
428 	int offset;
429 
430 	if (!etd->dmem_size)
431 		return;
432 	etd->dmem_size = 0;
433 
434 	offset = etd->dmem_offset;
435 	list_for_each_entry(area, &imx21->dmem_list, list) {
436 		if (area->offset == offset) {
437 			debug_dmem_freed(imx21, area->size);
438 			list_del(&area->list);
439 			kfree(area);
440 			found = 1;
441 			break;
442 		}
443 	}
444 
445 	if (!found)  {
446 		dev_err(imx21->dev,
447 			"Trying to free unallocated DMEM %d\n", offset);
448 		return;
449 	}
450 
451 	/* Try again to allocate memory for anything we've queued */
452 	list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
453 		offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
454 		if (offset >= 0) {
455 			list_del(&etd->queue);
456 			activate_queued_etd(imx21, etd, (u32)offset);
457 		}
458 	}
459 }
460 
free_epdmem(struct imx21 * imx21,struct usb_host_endpoint * ep)461 static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
462 {
463 	struct imx21_dmem_area *area, *tmp;
464 
465 	list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
466 		if (area->ep == ep) {
467 			dev_err(imx21->dev,
468 				"Active DMEM %d for disabled ep=%p\n",
469 				area->offset, ep);
470 			list_del(&area->list);
471 			kfree(area);
472 		}
473 	}
474 }
475 
476 
477 /* ===========================================	*/
478 /* End handling 				*/
479 /* ===========================================	*/
480 
481 /* Endpoint now idle - release its ETD(s) or assign to queued request */
ep_idle(struct imx21 * imx21,struct ep_priv * ep_priv)482 static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
483 {
484 	int i;
485 
486 	for (i = 0; i < NUM_ISO_ETDS; i++) {
487 		int etd_num = ep_priv->etd[i];
488 		struct etd_priv *etd;
489 		if (etd_num < 0)
490 			continue;
491 
492 		etd = &imx21->etd[etd_num];
493 		ep_priv->etd[i] = -1;
494 
495 		free_dmem(imx21, etd); /* for isoc */
496 
497 		if (list_empty(&imx21->queue_for_etd)) {
498 			free_etd(imx21, etd_num);
499 			continue;
500 		}
501 
502 		dev_dbg(imx21->dev,
503 			"assigning idle etd %d for queued request\n", etd_num);
504 		ep_priv = list_first_entry(&imx21->queue_for_etd,
505 			struct ep_priv, queue);
506 		list_del(&ep_priv->queue);
507 		reset_etd(imx21, etd_num);
508 		ep_priv->waiting_etd = 0;
509 		ep_priv->etd[i] = etd_num;
510 
511 		if (list_empty(&ep_priv->ep->urb_list)) {
512 			dev_err(imx21->dev, "No urb for queued ep!\n");
513 			continue;
514 		}
515 		schedule_nonisoc_etd(imx21, list_first_entry(
516 			&ep_priv->ep->urb_list, struct urb, urb_list));
517 	}
518 }
519 
urb_done(struct usb_hcd * hcd,struct urb * urb,int status)520 static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
521 __releases(imx21->lock)
522 __acquires(imx21->lock)
523 {
524 	struct imx21 *imx21 = hcd_to_imx21(hcd);
525 	struct ep_priv *ep_priv = urb->ep->hcpriv;
526 	struct urb_priv *urb_priv = urb->hcpriv;
527 
528 	debug_urb_completed(imx21, urb, status);
529 	dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
530 
531 	kfree(urb_priv->isoc_td);
532 	kfree(urb->hcpriv);
533 	urb->hcpriv = NULL;
534 	usb_hcd_unlink_urb_from_ep(hcd, urb);
535 	spin_unlock(&imx21->lock);
536 	usb_hcd_giveback_urb(hcd, urb, status);
537 	spin_lock(&imx21->lock);
538 	if (list_empty(&ep_priv->ep->urb_list))
539 		ep_idle(imx21, ep_priv);
540 }
541 
nonisoc_urb_completed_for_etd(struct imx21 * imx21,struct etd_priv * etd,int status)542 static void nonisoc_urb_completed_for_etd(
543 	struct imx21 *imx21, struct etd_priv *etd, int status)
544 {
545 	struct usb_host_endpoint *ep = etd->ep;
546 
547 	urb_done(imx21->hcd, etd->urb, status);
548 	etd->urb = NULL;
549 
550 	if (!list_empty(&ep->urb_list)) {
551 		struct urb *urb = list_first_entry(
552 					&ep->urb_list, struct urb, urb_list);
553 
554 		dev_vdbg(imx21->dev, "next URB %p\n", urb);
555 		schedule_nonisoc_etd(imx21, urb);
556 	}
557 }
558 
559 
560 /* ===========================================	*/
561 /* ISOC Handling ... 				*/
562 /* ===========================================	*/
563 
schedule_isoc_etds(struct usb_hcd * hcd,struct usb_host_endpoint * ep)564 static void schedule_isoc_etds(struct usb_hcd *hcd,
565 	struct usb_host_endpoint *ep)
566 {
567 	struct imx21 *imx21 = hcd_to_imx21(hcd);
568 	struct ep_priv *ep_priv = ep->hcpriv;
569 	struct etd_priv *etd;
570 	struct urb_priv *urb_priv;
571 	struct td *td;
572 	int etd_num;
573 	int i;
574 	int cur_frame;
575 	u8 dir;
576 
577 	for (i = 0; i < NUM_ISO_ETDS; i++) {
578 too_late:
579 		if (list_empty(&ep_priv->td_list))
580 			break;
581 
582 		etd_num = ep_priv->etd[i];
583 		if (etd_num < 0)
584 			break;
585 
586 		etd = &imx21->etd[etd_num];
587 		if (etd->urb)
588 			continue;
589 
590 		td = list_entry(ep_priv->td_list.next, struct td, list);
591 		list_del(&td->list);
592 		urb_priv = td->urb->hcpriv;
593 
594 		cur_frame = imx21_hc_get_frame(hcd);
595 		if (frame_after(cur_frame, td->frame)) {
596 			dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
597 				cur_frame, td->frame);
598 			urb_priv->isoc_status = -EXDEV;
599 			td->urb->iso_frame_desc[
600 				td->isoc_index].actual_length = 0;
601 			td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
602 			if (--urb_priv->isoc_remaining == 0)
603 				urb_done(hcd, td->urb, urb_priv->isoc_status);
604 			goto too_late;
605 		}
606 
607 		urb_priv->active = 1;
608 		etd->td = td;
609 		etd->ep = td->ep;
610 		etd->urb = td->urb;
611 		etd->len = td->len;
612 		etd->dma_handle = td->dma_handle;
613 		etd->cpu_buffer = td->cpu_buffer;
614 
615 		debug_isoc_submitted(imx21, cur_frame, td);
616 
617 		dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
618 		setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
619 		etd_writel(imx21, etd_num, 1, etd->dmem_offset);
620 		etd_writel(imx21, etd_num, 2,
621 			(TD_NOTACCESSED << DW2_COMPCODE) |
622 			((td->frame & 0xFFFF) << DW2_STARTFRM));
623 		etd_writel(imx21, etd_num, 3,
624 			(TD_NOTACCESSED << DW3_COMPCODE0) |
625 			(td->len << DW3_PKTLEN0));
626 
627 		activate_etd(imx21, etd_num, dir);
628 	}
629 }
630 
isoc_etd_done(struct usb_hcd * hcd,int etd_num)631 static void isoc_etd_done(struct usb_hcd *hcd, int etd_num)
632 {
633 	struct imx21 *imx21 = hcd_to_imx21(hcd);
634 	int etd_mask = 1 << etd_num;
635 	struct etd_priv *etd = imx21->etd + etd_num;
636 	struct urb *urb = etd->urb;
637 	struct urb_priv *urb_priv = urb->hcpriv;
638 	struct td *td = etd->td;
639 	struct usb_host_endpoint *ep = etd->ep;
640 	int isoc_index = td->isoc_index;
641 	unsigned int pipe = urb->pipe;
642 	int dir_in = usb_pipein(pipe);
643 	int cc;
644 	int bytes_xfrd;
645 
646 	disactivate_etd(imx21, etd_num);
647 
648 	cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
649 	bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
650 
651 	/* Input doesn't always fill the buffer, don't generate an error
652 	 * when this happens.
653 	 */
654 	if (dir_in && (cc == TD_DATAUNDERRUN))
655 		cc = TD_CC_NOERROR;
656 
657 	if (cc == TD_NOTACCESSED)
658 		bytes_xfrd = 0;
659 
660 	debug_isoc_completed(imx21,
661 		imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
662 	if (cc) {
663 		urb_priv->isoc_status = -EXDEV;
664 		dev_dbg(imx21->dev,
665 			"bad iso cc=0x%X frame=%d sched frame=%d "
666 			"cnt=%d len=%d urb=%p etd=%d index=%d\n",
667 			cc,  imx21_hc_get_frame(hcd), td->frame,
668 			bytes_xfrd, td->len, urb, etd_num, isoc_index);
669 	}
670 
671 	if (dir_in) {
672 		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
673 		if (!etd->dma_handle)
674 			memcpy_fromio(etd->cpu_buffer,
675 				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
676 				bytes_xfrd);
677 	}
678 
679 	urb->actual_length += bytes_xfrd;
680 	urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
681 	urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
682 
683 	etd->td = NULL;
684 	etd->urb = NULL;
685 	etd->ep = NULL;
686 
687 	if (--urb_priv->isoc_remaining == 0)
688 		urb_done(hcd, urb, urb_priv->isoc_status);
689 
690 	schedule_isoc_etds(hcd, ep);
691 }
692 
alloc_isoc_ep(struct imx21 * imx21,struct usb_host_endpoint * ep)693 static struct ep_priv *alloc_isoc_ep(
694 	struct imx21 *imx21, struct usb_host_endpoint *ep)
695 {
696 	struct ep_priv *ep_priv;
697 	int i;
698 
699 	ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
700 	if (!ep_priv)
701 		return NULL;
702 
703 	for (i = 0; i < NUM_ISO_ETDS; i++)
704 		ep_priv->etd[i] = -1;
705 
706 	INIT_LIST_HEAD(&ep_priv->td_list);
707 	ep_priv->ep = ep;
708 	ep->hcpriv = ep_priv;
709 	return ep_priv;
710 }
711 
alloc_isoc_etds(struct imx21 * imx21,struct ep_priv * ep_priv)712 static int alloc_isoc_etds(struct imx21 *imx21, struct ep_priv *ep_priv)
713 {
714 	int i, j;
715 	int etd_num;
716 
717 	/* Allocate the ETDs if required */
718 	for (i = 0; i < NUM_ISO_ETDS; i++) {
719 		if (ep_priv->etd[i] < 0) {
720 			etd_num = alloc_etd(imx21);
721 			if (etd_num < 0)
722 				goto alloc_etd_failed;
723 
724 			ep_priv->etd[i] = etd_num;
725 			imx21->etd[etd_num].ep = ep_priv->ep;
726 		}
727 	}
728 	return 0;
729 
730 alloc_etd_failed:
731 	dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
732 	for (j = 0; j < i; j++) {
733 		free_etd(imx21, ep_priv->etd[j]);
734 		ep_priv->etd[j] = -1;
735 	}
736 	return -ENOMEM;
737 }
738 
imx21_hc_urb_enqueue_isoc(struct usb_hcd * hcd,struct usb_host_endpoint * ep,struct urb * urb,gfp_t mem_flags)739 static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
740 				     struct usb_host_endpoint *ep,
741 				     struct urb *urb, gfp_t mem_flags)
742 {
743 	struct imx21 *imx21 = hcd_to_imx21(hcd);
744 	struct urb_priv *urb_priv;
745 	unsigned long flags;
746 	struct ep_priv *ep_priv;
747 	struct td *td = NULL;
748 	int i;
749 	int ret;
750 	int cur_frame;
751 	u16 maxpacket;
752 
753 	urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
754 	if (urb_priv == NULL)
755 		return -ENOMEM;
756 
757 	urb_priv->isoc_td = kzalloc(
758 		sizeof(struct td) * urb->number_of_packets, mem_flags);
759 	if (urb_priv->isoc_td == NULL) {
760 		ret = -ENOMEM;
761 		goto alloc_td_failed;
762 	}
763 
764 	spin_lock_irqsave(&imx21->lock, flags);
765 
766 	if (ep->hcpriv == NULL) {
767 		ep_priv = alloc_isoc_ep(imx21, ep);
768 		if (ep_priv == NULL) {
769 			ret = -ENOMEM;
770 			goto alloc_ep_failed;
771 		}
772 	} else {
773 		ep_priv = ep->hcpriv;
774 	}
775 
776 	ret = alloc_isoc_etds(imx21, ep_priv);
777 	if (ret)
778 		goto alloc_etd_failed;
779 
780 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
781 	if (ret)
782 		goto link_failed;
783 
784 	urb->status = -EINPROGRESS;
785 	urb->actual_length = 0;
786 	urb->error_count = 0;
787 	urb->hcpriv = urb_priv;
788 	urb_priv->ep = ep;
789 
790 	/* allocate data memory for largest packets if not already done */
791 	maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
792 	for (i = 0; i < NUM_ISO_ETDS; i++) {
793 		struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
794 
795 		if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
796 			/* not sure if this can really occur.... */
797 			dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
798 				etd->dmem_size, maxpacket);
799 			ret = -EMSGSIZE;
800 			goto alloc_dmem_failed;
801 		}
802 
803 		if (etd->dmem_size == 0) {
804 			etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
805 			if (etd->dmem_offset < 0) {
806 				dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
807 				ret = -EAGAIN;
808 				goto alloc_dmem_failed;
809 			}
810 			etd->dmem_size = maxpacket;
811 		}
812 	}
813 
814 	/* calculate frame */
815 	cur_frame = imx21_hc_get_frame(hcd);
816 	i = 0;
817 	if (list_empty(&ep_priv->td_list)) {
818 		urb->start_frame = wrap_frame(cur_frame + 5);
819 	} else {
820 		urb->start_frame = wrap_frame(list_entry(ep_priv->td_list.prev,
821 				struct td, list)->frame + urb->interval);
822 
823 		if (frame_after(cur_frame, urb->start_frame)) {
824 			dev_dbg(imx21->dev,
825 				"enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
826 				urb->start_frame, cur_frame,
827 				(urb->transfer_flags & URB_ISO_ASAP) != 0);
828 			i = DIV_ROUND_UP(wrap_frame(
829 					cur_frame - urb->start_frame),
830 					urb->interval);
831 
832 			/* Treat underruns as if URB_ISO_ASAP was set */
833 			if ((urb->transfer_flags & URB_ISO_ASAP) ||
834 					i >= urb->number_of_packets) {
835 				urb->start_frame = wrap_frame(urb->start_frame
836 						+ i * urb->interval);
837 				i = 0;
838 			}
839 		}
840 	}
841 
842 	/* set up transfers */
843 	urb_priv->isoc_remaining = urb->number_of_packets - i;
844 	td = urb_priv->isoc_td;
845 	for (; i < urb->number_of_packets; i++, td++) {
846 		unsigned int offset = urb->iso_frame_desc[i].offset;
847 		td->ep = ep;
848 		td->urb = urb;
849 		td->len = urb->iso_frame_desc[i].length;
850 		td->isoc_index = i;
851 		td->frame = wrap_frame(urb->start_frame + urb->interval * i);
852 		td->dma_handle = urb->transfer_dma + offset;
853 		td->cpu_buffer = urb->transfer_buffer + offset;
854 		list_add_tail(&td->list, &ep_priv->td_list);
855 	}
856 
857 	dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
858 		urb->number_of_packets, urb->start_frame, td->frame);
859 
860 	debug_urb_submitted(imx21, urb);
861 	schedule_isoc_etds(hcd, ep);
862 
863 	spin_unlock_irqrestore(&imx21->lock, flags);
864 	return 0;
865 
866 alloc_dmem_failed:
867 	usb_hcd_unlink_urb_from_ep(hcd, urb);
868 
869 link_failed:
870 alloc_etd_failed:
871 alloc_ep_failed:
872 	spin_unlock_irqrestore(&imx21->lock, flags);
873 	kfree(urb_priv->isoc_td);
874 
875 alloc_td_failed:
876 	kfree(urb_priv);
877 	return ret;
878 }
879 
dequeue_isoc_urb(struct imx21 * imx21,struct urb * urb,struct ep_priv * ep_priv)880 static void dequeue_isoc_urb(struct imx21 *imx21,
881 	struct urb *urb, struct ep_priv *ep_priv)
882 {
883 	struct urb_priv *urb_priv = urb->hcpriv;
884 	struct td *td, *tmp;
885 	int i;
886 
887 	if (urb_priv->active) {
888 		for (i = 0; i < NUM_ISO_ETDS; i++) {
889 			int etd_num = ep_priv->etd[i];
890 			if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
891 				struct etd_priv *etd = imx21->etd + etd_num;
892 
893 				reset_etd(imx21, etd_num);
894 				free_dmem(imx21, etd);
895 			}
896 		}
897 	}
898 
899 	list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
900 		if (td->urb == urb) {
901 			dev_vdbg(imx21->dev, "removing td %p\n", td);
902 			list_del(&td->list);
903 		}
904 	}
905 }
906 
907 /* =========================================== */
908 /* NON ISOC Handling ... 			*/
909 /* =========================================== */
910 
schedule_nonisoc_etd(struct imx21 * imx21,struct urb * urb)911 static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
912 {
913 	unsigned int pipe = urb->pipe;
914 	struct urb_priv *urb_priv = urb->hcpriv;
915 	struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
916 	int state = urb_priv->state;
917 	int etd_num = ep_priv->etd[0];
918 	struct etd_priv *etd;
919 	u32 count;
920 	u16 etd_buf_size;
921 	u16 maxpacket;
922 	u8 dir;
923 	u8 bufround;
924 	u8 datatoggle;
925 	u8 interval = 0;
926 	u8 relpolpos = 0;
927 
928 	if (etd_num < 0) {
929 		dev_err(imx21->dev, "No valid ETD\n");
930 		return;
931 	}
932 	if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
933 		dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
934 
935 	etd = &imx21->etd[etd_num];
936 	maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
937 	if (!maxpacket)
938 		maxpacket = 8;
939 
940 	if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
941 		if (state == US_CTRL_SETUP) {
942 			dir = TD_DIR_SETUP;
943 			if (unsuitable_for_dma(urb->setup_dma))
944 				usb_hcd_unmap_urb_setup_for_dma(imx21->hcd,
945 					urb);
946 			etd->dma_handle = urb->setup_dma;
947 			etd->cpu_buffer = urb->setup_packet;
948 			bufround = 0;
949 			count = 8;
950 			datatoggle = TD_TOGGLE_DATA0;
951 		} else {	/* US_CTRL_ACK */
952 			dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
953 			bufround = 0;
954 			count = 0;
955 			datatoggle = TD_TOGGLE_DATA1;
956 		}
957 	} else {
958 		dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
959 		bufround = (dir == TD_DIR_IN) ? 1 : 0;
960 		if (unsuitable_for_dma(urb->transfer_dma))
961 			usb_hcd_unmap_urb_for_dma(imx21->hcd, urb);
962 
963 		etd->dma_handle = urb->transfer_dma;
964 		etd->cpu_buffer = urb->transfer_buffer;
965 		if (usb_pipebulk(pipe) && (state == US_BULK0))
966 			count = 0;
967 		else
968 			count = urb->transfer_buffer_length;
969 
970 		if (usb_pipecontrol(pipe)) {
971 			datatoggle = TD_TOGGLE_DATA1;
972 		} else {
973 			if (usb_gettoggle(
974 					urb->dev,
975 					usb_pipeendpoint(urb->pipe),
976 					usb_pipeout(urb->pipe)))
977 				datatoggle = TD_TOGGLE_DATA1;
978 			else
979 				datatoggle = TD_TOGGLE_DATA0;
980 		}
981 	}
982 
983 	etd->urb = urb;
984 	etd->ep = urb_priv->ep;
985 	etd->len = count;
986 
987 	if (usb_pipeint(pipe)) {
988 		interval = urb->interval;
989 		relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
990 	}
991 
992 	/* Write ETD to device memory */
993 	setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
994 
995 	etd_writel(imx21, etd_num, 2,
996 		(u32) interval << DW2_POLINTERV |
997 		((u32) relpolpos << DW2_RELPOLPOS) |
998 		((u32) dir << DW2_DIRPID) |
999 		((u32) bufround << DW2_BUFROUND) |
1000 		((u32) datatoggle << DW2_DATATOG) |
1001 		((u32) TD_NOTACCESSED << DW2_COMPCODE));
1002 
1003 	/* DMA will always transfer buffer size even if TOBYCNT in DWORD3
1004 	   is smaller. Make sure we don't overrun the buffer!
1005 	 */
1006 	if (count && count < maxpacket)
1007 		etd_buf_size = count;
1008 	else
1009 		etd_buf_size = maxpacket;
1010 
1011 	etd_writel(imx21, etd_num, 3,
1012 		((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
1013 
1014 	if (!count)
1015 		etd->dma_handle = 0;
1016 
1017 	/* allocate x and y buffer space at once */
1018 	etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
1019 	etd->dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
1020 	if (etd->dmem_offset < 0) {
1021 		/* Setup everything we can in HW and update when we get DMEM */
1022 		etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
1023 
1024 		dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
1025 		debug_urb_queued_for_dmem(imx21, urb);
1026 		list_add_tail(&etd->queue, &imx21->queue_for_dmem);
1027 		return;
1028 	}
1029 
1030 	etd_writel(imx21, etd_num, 1,
1031 		(((u32) etd->dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
1032 		(u32) etd->dmem_offset);
1033 
1034 	urb_priv->active = 1;
1035 
1036 	/* enable the ETD to kick off transfer */
1037 	dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
1038 		etd_num, count, dir != TD_DIR_IN ? "out" : "in");
1039 	activate_etd(imx21, etd_num, dir);
1040 
1041 }
1042 
nonisoc_etd_done(struct usb_hcd * hcd,int etd_num)1043 static void nonisoc_etd_done(struct usb_hcd *hcd, int etd_num)
1044 {
1045 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1046 	struct etd_priv *etd = &imx21->etd[etd_num];
1047 	struct urb *urb = etd->urb;
1048 	u32 etd_mask = 1 << etd_num;
1049 	struct urb_priv *urb_priv = urb->hcpriv;
1050 	int dir;
1051 	int cc;
1052 	u32 bytes_xfrd;
1053 	int etd_done;
1054 
1055 	disactivate_etd(imx21, etd_num);
1056 
1057 	dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
1058 	cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
1059 	bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
1060 
1061 	/* save toggle carry */
1062 	usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1063 		      usb_pipeout(urb->pipe),
1064 		      (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
1065 
1066 	if (dir == TD_DIR_IN) {
1067 		clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
1068 		clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
1069 
1070 		if (etd->bounce_buffer) {
1071 			memcpy(etd->cpu_buffer, etd->bounce_buffer, bytes_xfrd);
1072 			dma_unmap_single(imx21->dev,
1073 				etd->dma_handle, etd->len, DMA_FROM_DEVICE);
1074 		} else if (!etd->dma_handle && bytes_xfrd) {/* PIO */
1075 			memcpy_fromio(etd->cpu_buffer,
1076 				imx21->regs + USBOTG_DMEM + etd->dmem_offset,
1077 				bytes_xfrd);
1078 		}
1079 	}
1080 
1081 	kfree(etd->bounce_buffer);
1082 	etd->bounce_buffer = NULL;
1083 	free_dmem(imx21, etd);
1084 
1085 	urb->error_count = 0;
1086 	if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
1087 			&& (cc == TD_DATAUNDERRUN))
1088 		cc = TD_CC_NOERROR;
1089 
1090 	if (cc != 0)
1091 		dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
1092 
1093 	etd_done = (cc_to_error[cc] != 0);	/* stop if error */
1094 
1095 	switch (usb_pipetype(urb->pipe)) {
1096 	case PIPE_CONTROL:
1097 		switch (urb_priv->state) {
1098 		case US_CTRL_SETUP:
1099 			if (urb->transfer_buffer_length > 0)
1100 				urb_priv->state = US_CTRL_DATA;
1101 			else
1102 				urb_priv->state = US_CTRL_ACK;
1103 			break;
1104 		case US_CTRL_DATA:
1105 			urb->actual_length += bytes_xfrd;
1106 			urb_priv->state = US_CTRL_ACK;
1107 			break;
1108 		case US_CTRL_ACK:
1109 			etd_done = 1;
1110 			break;
1111 		default:
1112 			dev_err(imx21->dev,
1113 				"Invalid pipe state %d\n", urb_priv->state);
1114 			etd_done = 1;
1115 			break;
1116 		}
1117 		break;
1118 
1119 	case PIPE_BULK:
1120 		urb->actual_length += bytes_xfrd;
1121 		if ((urb_priv->state == US_BULK)
1122 		    && (urb->transfer_flags & URB_ZERO_PACKET)
1123 		    && urb->transfer_buffer_length > 0
1124 		    && ((urb->transfer_buffer_length %
1125 			 usb_maxpacket(urb->dev, urb->pipe,
1126 				       usb_pipeout(urb->pipe))) == 0)) {
1127 			/* need a 0-packet */
1128 			urb_priv->state = US_BULK0;
1129 		} else {
1130 			etd_done = 1;
1131 		}
1132 		break;
1133 
1134 	case PIPE_INTERRUPT:
1135 		urb->actual_length += bytes_xfrd;
1136 		etd_done = 1;
1137 		break;
1138 	}
1139 
1140 	if (etd_done)
1141 		nonisoc_urb_completed_for_etd(imx21, etd, cc_to_error[cc]);
1142 	else {
1143 		dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
1144 		schedule_nonisoc_etd(imx21, urb);
1145 	}
1146 }
1147 
1148 
alloc_ep(void)1149 static struct ep_priv *alloc_ep(void)
1150 {
1151 	int i;
1152 	struct ep_priv *ep_priv;
1153 
1154 	ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
1155 	if (!ep_priv)
1156 		return NULL;
1157 
1158 	for (i = 0; i < NUM_ISO_ETDS; ++i)
1159 		ep_priv->etd[i] = -1;
1160 
1161 	return ep_priv;
1162 }
1163 
imx21_hc_urb_enqueue(struct usb_hcd * hcd,struct urb * urb,gfp_t mem_flags)1164 static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
1165 				struct urb *urb, gfp_t mem_flags)
1166 {
1167 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1168 	struct usb_host_endpoint *ep = urb->ep;
1169 	struct urb_priv *urb_priv;
1170 	struct ep_priv *ep_priv;
1171 	struct etd_priv *etd;
1172 	int ret;
1173 	unsigned long flags;
1174 
1175 	dev_vdbg(imx21->dev,
1176 		"enqueue urb=%p ep=%p len=%d "
1177 		"buffer=%p dma=%pad setupBuf=%p setupDma=%pad\n",
1178 		urb, ep,
1179 		urb->transfer_buffer_length,
1180 		urb->transfer_buffer, &urb->transfer_dma,
1181 		urb->setup_packet, &urb->setup_dma);
1182 
1183 	if (usb_pipeisoc(urb->pipe))
1184 		return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
1185 
1186 	urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
1187 	if (!urb_priv)
1188 		return -ENOMEM;
1189 
1190 	spin_lock_irqsave(&imx21->lock, flags);
1191 
1192 	ep_priv = ep->hcpriv;
1193 	if (ep_priv == NULL) {
1194 		ep_priv = alloc_ep();
1195 		if (!ep_priv) {
1196 			ret = -ENOMEM;
1197 			goto failed_alloc_ep;
1198 		}
1199 		ep->hcpriv = ep_priv;
1200 		ep_priv->ep = ep;
1201 	}
1202 
1203 	ret = usb_hcd_link_urb_to_ep(hcd, urb);
1204 	if (ret)
1205 		goto failed_link;
1206 
1207 	urb->status = -EINPROGRESS;
1208 	urb->actual_length = 0;
1209 	urb->error_count = 0;
1210 	urb->hcpriv = urb_priv;
1211 	urb_priv->ep = ep;
1212 
1213 	switch (usb_pipetype(urb->pipe)) {
1214 	case PIPE_CONTROL:
1215 		urb_priv->state = US_CTRL_SETUP;
1216 		break;
1217 	case PIPE_BULK:
1218 		urb_priv->state = US_BULK;
1219 		break;
1220 	}
1221 
1222 	debug_urb_submitted(imx21, urb);
1223 	if (ep_priv->etd[0] < 0) {
1224 		if (ep_priv->waiting_etd) {
1225 			dev_dbg(imx21->dev,
1226 				"no ETD available already queued %p\n",
1227 				ep_priv);
1228 			debug_urb_queued_for_etd(imx21, urb);
1229 			goto out;
1230 		}
1231 		ep_priv->etd[0] = alloc_etd(imx21);
1232 		if (ep_priv->etd[0] < 0) {
1233 			dev_dbg(imx21->dev,
1234 				"no ETD available queueing %p\n", ep_priv);
1235 			debug_urb_queued_for_etd(imx21, urb);
1236 			list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
1237 			ep_priv->waiting_etd = 1;
1238 			goto out;
1239 		}
1240 	}
1241 
1242 	/* Schedule if no URB already active for this endpoint */
1243 	etd = &imx21->etd[ep_priv->etd[0]];
1244 	if (etd->urb == NULL) {
1245 		DEBUG_LOG_FRAME(imx21, etd, last_req);
1246 		schedule_nonisoc_etd(imx21, urb);
1247 	}
1248 
1249 out:
1250 	spin_unlock_irqrestore(&imx21->lock, flags);
1251 	return 0;
1252 
1253 failed_link:
1254 failed_alloc_ep:
1255 	spin_unlock_irqrestore(&imx21->lock, flags);
1256 	kfree(urb_priv);
1257 	return ret;
1258 }
1259 
imx21_hc_urb_dequeue(struct usb_hcd * hcd,struct urb * urb,int status)1260 static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
1261 				int status)
1262 {
1263 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1264 	unsigned long flags;
1265 	struct usb_host_endpoint *ep;
1266 	struct ep_priv *ep_priv;
1267 	struct urb_priv *urb_priv = urb->hcpriv;
1268 	int ret = -EINVAL;
1269 
1270 	dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
1271 		urb, usb_pipeisoc(urb->pipe), status);
1272 
1273 	spin_lock_irqsave(&imx21->lock, flags);
1274 
1275 	ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1276 	if (ret)
1277 		goto fail;
1278 	ep = urb_priv->ep;
1279 	ep_priv = ep->hcpriv;
1280 
1281 	debug_urb_unlinked(imx21, urb);
1282 
1283 	if (usb_pipeisoc(urb->pipe)) {
1284 		dequeue_isoc_urb(imx21, urb, ep_priv);
1285 		schedule_isoc_etds(hcd, ep);
1286 	} else if (urb_priv->active) {
1287 		int etd_num = ep_priv->etd[0];
1288 		if (etd_num != -1) {
1289 			struct etd_priv *etd = &imx21->etd[etd_num];
1290 
1291 			disactivate_etd(imx21, etd_num);
1292 			free_dmem(imx21, etd);
1293 			etd->urb = NULL;
1294 			kfree(etd->bounce_buffer);
1295 			etd->bounce_buffer = NULL;
1296 		}
1297 	}
1298 
1299 	urb_done(hcd, urb, status);
1300 
1301 	spin_unlock_irqrestore(&imx21->lock, flags);
1302 	return 0;
1303 
1304 fail:
1305 	spin_unlock_irqrestore(&imx21->lock, flags);
1306 	return ret;
1307 }
1308 
1309 /* =========================================== */
1310 /* Interrupt dispatch	 			*/
1311 /* =========================================== */
1312 
process_etds(struct usb_hcd * hcd,struct imx21 * imx21,int sof)1313 static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
1314 {
1315 	int etd_num;
1316 	int enable_sof_int = 0;
1317 	unsigned long flags;
1318 
1319 	spin_lock_irqsave(&imx21->lock, flags);
1320 
1321 	for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
1322 		u32 etd_mask = 1 << etd_num;
1323 		u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
1324 		u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
1325 		struct etd_priv *etd = &imx21->etd[etd_num];
1326 
1327 
1328 		if (done) {
1329 			DEBUG_LOG_FRAME(imx21, etd, last_int);
1330 		} else {
1331 /*
1332  * Kludge warning!
1333  *
1334  * When multiple transfers are using the bus we sometimes get into a state
1335  * where the transfer has completed (the CC field of the ETD is != 0x0F),
1336  * the ETD has self disabled but the ETDDONESTAT flag is not set
1337  * (and hence no interrupt occurs).
1338  * This causes the transfer in question to hang.
1339  * The kludge below checks for this condition at each SOF and processes any
1340  * blocked ETDs (after an arbitrary 10 frame wait)
1341  *
1342  * With a single active transfer the usbtest test suite will run for days
1343  * without the kludge.
1344  * With other bus activity (eg mass storage) even just test1 will hang without
1345  * the kludge.
1346  */
1347 			u32 dword0;
1348 			int cc;
1349 
1350 			if (etd->active_count && !enabled) /* suspicious... */
1351 				enable_sof_int = 1;
1352 
1353 			if (!sof || enabled || !etd->active_count)
1354 				continue;
1355 
1356 			cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
1357 			if (cc == TD_NOTACCESSED)
1358 				continue;
1359 
1360 			if (++etd->active_count < 10)
1361 				continue;
1362 
1363 			dword0 = etd_readl(imx21, etd_num, 0);
1364 			dev_dbg(imx21->dev,
1365 				"unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
1366 				etd_num, dword0 & 0x7F,
1367 				(dword0 >> DW0_ENDPNT) & 0x0F,
1368 				cc);
1369 
1370 #ifdef DEBUG
1371 			dev_dbg(imx21->dev,
1372 				"frame: act=%d disact=%d"
1373 				" int=%d req=%d cur=%d\n",
1374 				etd->activated_frame,
1375 				etd->disactivated_frame,
1376 				etd->last_int_frame,
1377 				etd->last_req_frame,
1378 				readl(imx21->regs + USBH_FRMNUB));
1379 			imx21->debug_unblocks++;
1380 #endif
1381 			etd->active_count = 0;
1382 /* End of kludge */
1383 		}
1384 
1385 		if (etd->ep == NULL || etd->urb == NULL) {
1386 			dev_dbg(imx21->dev,
1387 				"Interrupt for unexpected etd %d"
1388 				" ep=%p urb=%p\n",
1389 				etd_num, etd->ep, etd->urb);
1390 			disactivate_etd(imx21, etd_num);
1391 			continue;
1392 		}
1393 
1394 		if (usb_pipeisoc(etd->urb->pipe))
1395 			isoc_etd_done(hcd, etd_num);
1396 		else
1397 			nonisoc_etd_done(hcd, etd_num);
1398 	}
1399 
1400 	/* only enable SOF interrupt if it may be needed for the kludge */
1401 	if (enable_sof_int)
1402 		set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1403 	else
1404 		clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
1405 
1406 
1407 	spin_unlock_irqrestore(&imx21->lock, flags);
1408 }
1409 
imx21_irq(struct usb_hcd * hcd)1410 static irqreturn_t imx21_irq(struct usb_hcd *hcd)
1411 {
1412 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1413 	u32 ints = readl(imx21->regs + USBH_SYSISR);
1414 
1415 	if (ints & USBH_SYSIEN_HERRINT)
1416 		dev_dbg(imx21->dev, "Scheduling error\n");
1417 
1418 	if (ints & USBH_SYSIEN_SORINT)
1419 		dev_dbg(imx21->dev, "Scheduling overrun\n");
1420 
1421 	if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
1422 		process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
1423 
1424 	writel(ints, imx21->regs + USBH_SYSISR);
1425 	return IRQ_HANDLED;
1426 }
1427 
imx21_hc_endpoint_disable(struct usb_hcd * hcd,struct usb_host_endpoint * ep)1428 static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
1429 				      struct usb_host_endpoint *ep)
1430 {
1431 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1432 	unsigned long flags;
1433 	struct ep_priv *ep_priv;
1434 	int i;
1435 
1436 	if (ep == NULL)
1437 		return;
1438 
1439 	spin_lock_irqsave(&imx21->lock, flags);
1440 	ep_priv = ep->hcpriv;
1441 	dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
1442 
1443 	if (!list_empty(&ep->urb_list))
1444 		dev_dbg(imx21->dev, "ep's URB list is not empty\n");
1445 
1446 	if (ep_priv != NULL) {
1447 		for (i = 0; i < NUM_ISO_ETDS; i++) {
1448 			if (ep_priv->etd[i] > -1)
1449 				dev_dbg(imx21->dev, "free etd %d for disable\n",
1450 					ep_priv->etd[i]);
1451 
1452 			free_etd(imx21, ep_priv->etd[i]);
1453 		}
1454 		kfree(ep_priv);
1455 		ep->hcpriv = NULL;
1456 	}
1457 
1458 	for (i = 0; i < USB_NUM_ETD; i++) {
1459 		if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
1460 			dev_err(imx21->dev,
1461 				"Active etd %d for disabled ep=%p!\n", i, ep);
1462 			free_etd(imx21, i);
1463 		}
1464 	}
1465 	free_epdmem(imx21, ep);
1466 	spin_unlock_irqrestore(&imx21->lock, flags);
1467 }
1468 
1469 /* =========================================== */
1470 /* Hub handling		 			*/
1471 /* =========================================== */
1472 
get_hub_descriptor(struct usb_hcd * hcd,struct usb_hub_descriptor * desc)1473 static int get_hub_descriptor(struct usb_hcd *hcd,
1474 			      struct usb_hub_descriptor *desc)
1475 {
1476 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1477 	desc->bDescriptorType = USB_DT_HUB; /* HUB descriptor */
1478 	desc->bHubContrCurrent = 0;
1479 
1480 	desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
1481 		& USBH_ROOTHUBA_NDNSTMPRT_MASK;
1482 	desc->bDescLength = 9;
1483 	desc->bPwrOn2PwrGood = 0;
1484 	desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
1485 		HUB_CHAR_NO_LPSM |	/* No power switching */
1486 		HUB_CHAR_NO_OCPM);	/* No over current protection */
1487 
1488 	desc->u.hs.DeviceRemovable[0] = 1 << 1;
1489 	desc->u.hs.DeviceRemovable[1] = ~0;
1490 	return 0;
1491 }
1492 
imx21_hc_hub_status_data(struct usb_hcd * hcd,char * buf)1493 static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
1494 {
1495 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1496 	int ports;
1497 	int changed = 0;
1498 	int i;
1499 	unsigned long flags;
1500 
1501 	spin_lock_irqsave(&imx21->lock, flags);
1502 	ports = readl(imx21->regs + USBH_ROOTHUBA)
1503 		& USBH_ROOTHUBA_NDNSTMPRT_MASK;
1504 	if (ports > 7) {
1505 		ports = 7;
1506 		dev_err(imx21->dev, "ports %d > 7\n", ports);
1507 	}
1508 	for (i = 0; i < ports; i++) {
1509 		if (readl(imx21->regs + USBH_PORTSTAT(i)) &
1510 			(USBH_PORTSTAT_CONNECTSC |
1511 			USBH_PORTSTAT_PRTENBLSC |
1512 			USBH_PORTSTAT_PRTSTATSC |
1513 			USBH_PORTSTAT_OVRCURIC |
1514 			USBH_PORTSTAT_PRTRSTSC)) {
1515 
1516 			changed = 1;
1517 			buf[0] |= 1 << (i + 1);
1518 		}
1519 	}
1520 	spin_unlock_irqrestore(&imx21->lock, flags);
1521 
1522 	if (changed)
1523 		dev_info(imx21->dev, "Hub status changed\n");
1524 	return changed;
1525 }
1526 
imx21_hc_hub_control(struct usb_hcd * hcd,u16 typeReq,u16 wValue,u16 wIndex,char * buf,u16 wLength)1527 static int imx21_hc_hub_control(struct usb_hcd *hcd,
1528 				u16 typeReq,
1529 				u16 wValue, u16 wIndex, char *buf, u16 wLength)
1530 {
1531 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1532 	int rc = 0;
1533 	u32 status_write = 0;
1534 
1535 	switch (typeReq) {
1536 	case ClearHubFeature:
1537 		dev_dbg(imx21->dev, "ClearHubFeature\n");
1538 		switch (wValue) {
1539 		case C_HUB_OVER_CURRENT:
1540 			dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1541 			break;
1542 		case C_HUB_LOCAL_POWER:
1543 			dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1544 			break;
1545 		default:
1546 			dev_dbg(imx21->dev, "    unknown\n");
1547 			rc = -EINVAL;
1548 			break;
1549 		}
1550 		break;
1551 
1552 	case ClearPortFeature:
1553 		dev_dbg(imx21->dev, "ClearPortFeature\n");
1554 		switch (wValue) {
1555 		case USB_PORT_FEAT_ENABLE:
1556 			dev_dbg(imx21->dev, "    ENABLE\n");
1557 			status_write = USBH_PORTSTAT_CURCONST;
1558 			break;
1559 		case USB_PORT_FEAT_SUSPEND:
1560 			dev_dbg(imx21->dev, "    SUSPEND\n");
1561 			status_write = USBH_PORTSTAT_PRTOVRCURI;
1562 			break;
1563 		case USB_PORT_FEAT_POWER:
1564 			dev_dbg(imx21->dev, "    POWER\n");
1565 			status_write = USBH_PORTSTAT_LSDEVCON;
1566 			break;
1567 		case USB_PORT_FEAT_C_ENABLE:
1568 			dev_dbg(imx21->dev, "    C_ENABLE\n");
1569 			status_write = USBH_PORTSTAT_PRTENBLSC;
1570 			break;
1571 		case USB_PORT_FEAT_C_SUSPEND:
1572 			dev_dbg(imx21->dev, "    C_SUSPEND\n");
1573 			status_write = USBH_PORTSTAT_PRTSTATSC;
1574 			break;
1575 		case USB_PORT_FEAT_C_CONNECTION:
1576 			dev_dbg(imx21->dev, "    C_CONNECTION\n");
1577 			status_write = USBH_PORTSTAT_CONNECTSC;
1578 			break;
1579 		case USB_PORT_FEAT_C_OVER_CURRENT:
1580 			dev_dbg(imx21->dev, "    C_OVER_CURRENT\n");
1581 			status_write = USBH_PORTSTAT_OVRCURIC;
1582 			break;
1583 		case USB_PORT_FEAT_C_RESET:
1584 			dev_dbg(imx21->dev, "    C_RESET\n");
1585 			status_write = USBH_PORTSTAT_PRTRSTSC;
1586 			break;
1587 		default:
1588 			dev_dbg(imx21->dev, "    unknown\n");
1589 			rc = -EINVAL;
1590 			break;
1591 		}
1592 
1593 		break;
1594 
1595 	case GetHubDescriptor:
1596 		dev_dbg(imx21->dev, "GetHubDescriptor\n");
1597 		rc = get_hub_descriptor(hcd, (void *)buf);
1598 		break;
1599 
1600 	case GetHubStatus:
1601 		dev_dbg(imx21->dev, "  GetHubStatus\n");
1602 		*(__le32 *) buf = 0;
1603 		break;
1604 
1605 	case GetPortStatus:
1606 		dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
1607 		    wIndex, USBH_PORTSTAT(wIndex - 1));
1608 		*(__le32 *) buf = readl(imx21->regs +
1609 			USBH_PORTSTAT(wIndex - 1));
1610 		break;
1611 
1612 	case SetHubFeature:
1613 		dev_dbg(imx21->dev, "SetHubFeature\n");
1614 		switch (wValue) {
1615 		case C_HUB_OVER_CURRENT:
1616 			dev_dbg(imx21->dev, "    OVER_CURRENT\n");
1617 			break;
1618 
1619 		case C_HUB_LOCAL_POWER:
1620 			dev_dbg(imx21->dev, "    LOCAL_POWER\n");
1621 			break;
1622 		default:
1623 			dev_dbg(imx21->dev, "    unknown\n");
1624 			rc = -EINVAL;
1625 			break;
1626 		}
1627 
1628 		break;
1629 
1630 	case SetPortFeature:
1631 		dev_dbg(imx21->dev, "SetPortFeature\n");
1632 		switch (wValue) {
1633 		case USB_PORT_FEAT_SUSPEND:
1634 			dev_dbg(imx21->dev, "    SUSPEND\n");
1635 			status_write = USBH_PORTSTAT_PRTSUSPST;
1636 			break;
1637 		case USB_PORT_FEAT_POWER:
1638 			dev_dbg(imx21->dev, "    POWER\n");
1639 			status_write = USBH_PORTSTAT_PRTPWRST;
1640 			break;
1641 		case USB_PORT_FEAT_RESET:
1642 			dev_dbg(imx21->dev, "    RESET\n");
1643 			status_write = USBH_PORTSTAT_PRTRSTST;
1644 			break;
1645 		default:
1646 			dev_dbg(imx21->dev, "    unknown\n");
1647 			rc = -EINVAL;
1648 			break;
1649 		}
1650 		break;
1651 
1652 	default:
1653 		dev_dbg(imx21->dev, "  unknown\n");
1654 		rc = -EINVAL;
1655 		break;
1656 	}
1657 
1658 	if (status_write)
1659 		writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
1660 	return rc;
1661 }
1662 
1663 /* =========================================== */
1664 /* Host controller management 			*/
1665 /* =========================================== */
1666 
imx21_hc_reset(struct usb_hcd * hcd)1667 static int imx21_hc_reset(struct usb_hcd *hcd)
1668 {
1669 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1670 	unsigned long timeout;
1671 	unsigned long flags;
1672 
1673 	spin_lock_irqsave(&imx21->lock, flags);
1674 
1675 	/* Reset the Host controller modules */
1676 	writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
1677 		USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
1678 		imx21->regs + USBOTG_RST_CTRL);
1679 
1680 	/* Wait for reset to finish */
1681 	timeout = jiffies + HZ;
1682 	while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
1683 		if (time_after(jiffies, timeout)) {
1684 			spin_unlock_irqrestore(&imx21->lock, flags);
1685 			dev_err(imx21->dev, "timeout waiting for reset\n");
1686 			return -ETIMEDOUT;
1687 		}
1688 		spin_unlock_irq(&imx21->lock);
1689 		schedule_timeout_uninterruptible(1);
1690 		spin_lock_irq(&imx21->lock);
1691 	}
1692 	spin_unlock_irqrestore(&imx21->lock, flags);
1693 	return 0;
1694 }
1695 
imx21_hc_start(struct usb_hcd * hcd)1696 static int imx21_hc_start(struct usb_hcd *hcd)
1697 {
1698 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1699 	unsigned long flags;
1700 	int i, j;
1701 	u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
1702 	u32 usb_control = 0;
1703 
1704 	hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
1705 			USBOTG_HWMODE_HOSTXCVR_MASK);
1706 	hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
1707 			USBOTG_HWMODE_OTGXCVR_MASK);
1708 
1709 	if (imx21->pdata->host1_txenoe)
1710 		usb_control |= USBCTRL_HOST1_TXEN_OE;
1711 
1712 	if (!imx21->pdata->host1_xcverless)
1713 		usb_control |= USBCTRL_HOST1_BYP_TLL;
1714 
1715 	if (imx21->pdata->otg_ext_xcvr)
1716 		usb_control |= USBCTRL_OTC_RCV_RXDP;
1717 
1718 
1719 	spin_lock_irqsave(&imx21->lock, flags);
1720 
1721 	writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
1722 		imx21->regs + USBOTG_CLK_CTRL);
1723 	writel(hw_mode, imx21->regs + USBOTG_HWMODE);
1724 	writel(usb_control, imx21->regs + USBCTRL);
1725 	writel(USB_MISCCONTROL_SKPRTRY  | USB_MISCCONTROL_ARBMODE,
1726 		imx21->regs + USB_MISCCONTROL);
1727 
1728 	/* Clear the ETDs */
1729 	for (i = 0; i < USB_NUM_ETD; i++)
1730 		for (j = 0; j < 4; j++)
1731 			etd_writel(imx21, i, j, 0);
1732 
1733 	/* Take the HC out of reset */
1734 	writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
1735 		imx21->regs + USBH_HOST_CTRL);
1736 
1737 	/* Enable ports */
1738 	if (imx21->pdata->enable_otg_host)
1739 		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1740 			imx21->regs + USBH_PORTSTAT(0));
1741 
1742 	if (imx21->pdata->enable_host1)
1743 		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1744 			imx21->regs + USBH_PORTSTAT(1));
1745 
1746 	if (imx21->pdata->enable_host2)
1747 		writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
1748 			imx21->regs + USBH_PORTSTAT(2));
1749 
1750 
1751 	hcd->state = HC_STATE_RUNNING;
1752 
1753 	/* Enable host controller interrupts */
1754 	set_register_bits(imx21, USBH_SYSIEN,
1755 		USBH_SYSIEN_HERRINT |
1756 		USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
1757 	set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1758 
1759 	spin_unlock_irqrestore(&imx21->lock, flags);
1760 
1761 	return 0;
1762 }
1763 
imx21_hc_stop(struct usb_hcd * hcd)1764 static void imx21_hc_stop(struct usb_hcd *hcd)
1765 {
1766 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1767 	unsigned long flags;
1768 
1769 	spin_lock_irqsave(&imx21->lock, flags);
1770 
1771 	writel(0, imx21->regs + USBH_SYSIEN);
1772 	clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
1773 	clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
1774 					USBOTG_CLK_CTRL);
1775 	spin_unlock_irqrestore(&imx21->lock, flags);
1776 }
1777 
1778 /* =========================================== */
1779 /* Driver glue		 			*/
1780 /* =========================================== */
1781 
1782 static const struct hc_driver imx21_hc_driver = {
1783 	.description = hcd_name,
1784 	.product_desc = "IMX21 USB Host Controller",
1785 	.hcd_priv_size = sizeof(struct imx21),
1786 
1787 	.flags = HCD_USB11,
1788 	.irq = imx21_irq,
1789 
1790 	.reset = imx21_hc_reset,
1791 	.start = imx21_hc_start,
1792 	.stop = imx21_hc_stop,
1793 
1794 	/* I/O requests */
1795 	.urb_enqueue = imx21_hc_urb_enqueue,
1796 	.urb_dequeue = imx21_hc_urb_dequeue,
1797 	.endpoint_disable = imx21_hc_endpoint_disable,
1798 
1799 	/* scheduling support */
1800 	.get_frame_number = imx21_hc_get_frame,
1801 
1802 	/* Root hub support */
1803 	.hub_status_data = imx21_hc_hub_status_data,
1804 	.hub_control = imx21_hc_hub_control,
1805 
1806 };
1807 
1808 static struct mx21_usbh_platform_data default_pdata = {
1809 	.host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1810 	.otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
1811 	.enable_host1 = 1,
1812 	.enable_host2 = 1,
1813 	.enable_otg_host = 1,
1814 
1815 };
1816 
imx21_remove(struct platform_device * pdev)1817 static int imx21_remove(struct platform_device *pdev)
1818 {
1819 	struct usb_hcd *hcd = platform_get_drvdata(pdev);
1820 	struct imx21 *imx21 = hcd_to_imx21(hcd);
1821 	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1822 
1823 	remove_debug_files(imx21);
1824 	usb_remove_hcd(hcd);
1825 
1826 	if (res != NULL) {
1827 		clk_disable_unprepare(imx21->clk);
1828 		clk_put(imx21->clk);
1829 		iounmap(imx21->regs);
1830 		release_mem_region(res->start, resource_size(res));
1831 	}
1832 
1833 	kfree(hcd);
1834 	return 0;
1835 }
1836 
1837 
imx21_probe(struct platform_device * pdev)1838 static int imx21_probe(struct platform_device *pdev)
1839 {
1840 	struct usb_hcd *hcd;
1841 	struct imx21 *imx21;
1842 	struct resource *res;
1843 	int ret;
1844 	int irq;
1845 
1846 	printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
1847 
1848 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1849 	if (!res)
1850 		return -ENODEV;
1851 	irq = platform_get_irq(pdev, 0);
1852 	if (irq < 0) {
1853 		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
1854 		return irq;
1855 	}
1856 
1857 	hcd = usb_create_hcd(&imx21_hc_driver,
1858 		&pdev->dev, dev_name(&pdev->dev));
1859 	if (hcd == NULL) {
1860 		dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
1861 		    dev_name(&pdev->dev));
1862 		return -ENOMEM;
1863 	}
1864 
1865 	imx21 = hcd_to_imx21(hcd);
1866 	imx21->hcd = hcd;
1867 	imx21->dev = &pdev->dev;
1868 	imx21->pdata = dev_get_platdata(&pdev->dev);
1869 	if (!imx21->pdata)
1870 		imx21->pdata = &default_pdata;
1871 
1872 	spin_lock_init(&imx21->lock);
1873 	INIT_LIST_HEAD(&imx21->dmem_list);
1874 	INIT_LIST_HEAD(&imx21->queue_for_etd);
1875 	INIT_LIST_HEAD(&imx21->queue_for_dmem);
1876 	create_debug_files(imx21);
1877 
1878 	res = request_mem_region(res->start, resource_size(res), hcd_name);
1879 	if (!res) {
1880 		ret = -EBUSY;
1881 		goto failed_request_mem;
1882 	}
1883 
1884 	imx21->regs = ioremap(res->start, resource_size(res));
1885 	if (imx21->regs == NULL) {
1886 		dev_err(imx21->dev, "Cannot map registers\n");
1887 		ret = -ENOMEM;
1888 		goto failed_ioremap;
1889 	}
1890 
1891 	/* Enable clocks source */
1892 	imx21->clk = clk_get(imx21->dev, NULL);
1893 	if (IS_ERR(imx21->clk)) {
1894 		dev_err(imx21->dev, "no clock found\n");
1895 		ret = PTR_ERR(imx21->clk);
1896 		goto failed_clock_get;
1897 	}
1898 
1899 	ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
1900 	if (ret)
1901 		goto failed_clock_set;
1902 	ret = clk_prepare_enable(imx21->clk);
1903 	if (ret)
1904 		goto failed_clock_enable;
1905 
1906 	dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
1907 		(readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
1908 
1909 	ret = usb_add_hcd(hcd, irq, 0);
1910 	if (ret != 0) {
1911 		dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
1912 		goto failed_add_hcd;
1913 	}
1914 	device_wakeup_enable(hcd->self.controller);
1915 
1916 	return 0;
1917 
1918 failed_add_hcd:
1919 	clk_disable_unprepare(imx21->clk);
1920 failed_clock_enable:
1921 failed_clock_set:
1922 	clk_put(imx21->clk);
1923 failed_clock_get:
1924 	iounmap(imx21->regs);
1925 failed_ioremap:
1926 	release_mem_region(res->start, resource_size(res));
1927 failed_request_mem:
1928 	remove_debug_files(imx21);
1929 	usb_put_hcd(hcd);
1930 	return ret;
1931 }
1932 
1933 static struct platform_driver imx21_hcd_driver = {
1934 	.driver = {
1935 		   .name = hcd_name,
1936 		   },
1937 	.probe = imx21_probe,
1938 	.remove = imx21_remove,
1939 	.suspend = NULL,
1940 	.resume = NULL,
1941 };
1942 
1943 module_platform_driver(imx21_hcd_driver);
1944 
1945 MODULE_DESCRIPTION("i.MX21 USB Host controller");
1946 MODULE_AUTHOR("Martin Fuzzey");
1947 MODULE_LICENSE("GPL");
1948 MODULE_ALIAS("platform:imx21-hcd");
1949