• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
3  * Author: Chao Xie <chao.xie@marvell.com>
4  *	   Neil Zhang <zhangwm@marvell.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16 #include <linux/kernel.h>
17 #include <linux/delay.h>
18 #include <linux/ioport.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/timer.h>
24 #include <linux/list.h>
25 #include <linux/interrupt.h>
26 #include <linux/moduleparam.h>
27 #include <linux/device.h>
28 #include <linux/usb/ch9.h>
29 #include <linux/usb/gadget.h>
30 #include <linux/usb/otg.h>
31 #include <linux/pm.h>
32 #include <linux/io.h>
33 #include <linux/irq.h>
34 #include <linux/platform_device.h>
35 #include <linux/clk.h>
36 #include <linux/platform_data/mv_usb.h>
37 #include <asm/unaligned.h>
38 
39 #include "mv_udc.h"
40 
41 #define DRIVER_DESC		"Marvell PXA USB Device Controller driver"
42 
43 #define ep_dir(ep)	(((ep)->ep_num == 0) ? \
44 				((ep)->udc->ep0_dir) : ((ep)->direction))
45 
46 /* timeout value -- usec */
47 #define RESET_TIMEOUT		10000
48 #define FLUSH_TIMEOUT		10000
49 #define EPSTATUS_TIMEOUT	10000
50 #define PRIME_TIMEOUT		10000
51 #define READSAFE_TIMEOUT	1000
52 
53 #define LOOPS_USEC_SHIFT	1
54 #define LOOPS_USEC		(1 << LOOPS_USEC_SHIFT)
55 #define LOOPS(timeout)		((timeout) >> LOOPS_USEC_SHIFT)
56 
57 static DECLARE_COMPLETION(release_done);
58 
59 static const char driver_name[] = "mv_udc";
60 static const char driver_desc[] = DRIVER_DESC;
61 
62 static void nuke(struct mv_ep *ep, int status);
63 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver);
64 
65 /* for endpoint 0 operations */
66 static const struct usb_endpoint_descriptor mv_ep0_desc = {
67 	.bLength =		USB_DT_ENDPOINT_SIZE,
68 	.bDescriptorType =	USB_DT_ENDPOINT,
69 	.bEndpointAddress =	0,
70 	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
71 	.wMaxPacketSize =	EP0_MAX_PKT_SIZE,
72 };
73 
ep0_reset(struct mv_udc * udc)74 static void ep0_reset(struct mv_udc *udc)
75 {
76 	struct mv_ep *ep;
77 	u32 epctrlx;
78 	int i = 0;
79 
80 	/* ep0 in and out */
81 	for (i = 0; i < 2; i++) {
82 		ep = &udc->eps[i];
83 		ep->udc = udc;
84 
85 		/* ep0 dQH */
86 		ep->dqh = &udc->ep_dqh[i];
87 
88 		/* configure ep0 endpoint capabilities in dQH */
89 		ep->dqh->max_packet_length =
90 			(EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
91 			| EP_QUEUE_HEAD_IOS;
92 
93 		ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE;
94 
95 		epctrlx = readl(&udc->op_regs->epctrlx[0]);
96 		if (i) {	/* TX */
97 			epctrlx |= EPCTRL_TX_ENABLE
98 				| (USB_ENDPOINT_XFER_CONTROL
99 					<< EPCTRL_TX_EP_TYPE_SHIFT);
100 
101 		} else {	/* RX */
102 			epctrlx |= EPCTRL_RX_ENABLE
103 				| (USB_ENDPOINT_XFER_CONTROL
104 					<< EPCTRL_RX_EP_TYPE_SHIFT);
105 		}
106 
107 		writel(epctrlx, &udc->op_regs->epctrlx[0]);
108 	}
109 }
110 
111 /* protocol ep0 stall, will automatically be cleared on new transaction */
ep0_stall(struct mv_udc * udc)112 static void ep0_stall(struct mv_udc *udc)
113 {
114 	u32	epctrlx;
115 
116 	/* set TX and RX to stall */
117 	epctrlx = readl(&udc->op_regs->epctrlx[0]);
118 	epctrlx |= EPCTRL_RX_EP_STALL | EPCTRL_TX_EP_STALL;
119 	writel(epctrlx, &udc->op_regs->epctrlx[0]);
120 
121 	/* update ep0 state */
122 	udc->ep0_state = WAIT_FOR_SETUP;
123 	udc->ep0_dir = EP_DIR_OUT;
124 }
125 
process_ep_req(struct mv_udc * udc,int index,struct mv_req * curr_req)126 static int process_ep_req(struct mv_udc *udc, int index,
127 	struct mv_req *curr_req)
128 {
129 	struct mv_dtd	*curr_dtd;
130 	struct mv_dqh	*curr_dqh;
131 	int actual, remaining_length;
132 	int i, direction;
133 	int retval = 0;
134 	u32 errors;
135 	u32 bit_pos;
136 
137 	curr_dqh = &udc->ep_dqh[index];
138 	direction = index % 2;
139 
140 	curr_dtd = curr_req->head;
141 	actual = curr_req->req.length;
142 
143 	for (i = 0; i < curr_req->dtd_count; i++) {
144 		if (curr_dtd->size_ioc_sts & DTD_STATUS_ACTIVE) {
145 			dev_dbg(&udc->dev->dev, "%s, dTD not completed\n",
146 				udc->eps[index].name);
147 			return 1;
148 		}
149 
150 		errors = curr_dtd->size_ioc_sts & DTD_ERROR_MASK;
151 		if (!errors) {
152 			remaining_length =
153 				(curr_dtd->size_ioc_sts	& DTD_PACKET_SIZE)
154 					>> DTD_LENGTH_BIT_POS;
155 			actual -= remaining_length;
156 
157 			if (remaining_length) {
158 				if (direction) {
159 					dev_dbg(&udc->dev->dev,
160 						"TX dTD remains data\n");
161 					retval = -EPROTO;
162 					break;
163 				} else
164 					break;
165 			}
166 		} else {
167 			dev_info(&udc->dev->dev,
168 				"complete_tr error: ep=%d %s: error = 0x%x\n",
169 				index >> 1, direction ? "SEND" : "RECV",
170 				errors);
171 			if (errors & DTD_STATUS_HALTED) {
172 				/* Clear the errors and Halt condition */
173 				curr_dqh->size_ioc_int_sts &= ~errors;
174 				retval = -EPIPE;
175 			} else if (errors & DTD_STATUS_DATA_BUFF_ERR) {
176 				retval = -EPROTO;
177 			} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
178 				retval = -EILSEQ;
179 			}
180 		}
181 		if (i != curr_req->dtd_count - 1)
182 			curr_dtd = (struct mv_dtd *)curr_dtd->next_dtd_virt;
183 	}
184 	if (retval)
185 		return retval;
186 
187 	if (direction == EP_DIR_OUT)
188 		bit_pos = 1 << curr_req->ep->ep_num;
189 	else
190 		bit_pos = 1 << (16 + curr_req->ep->ep_num);
191 
192 	while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
193 		if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
194 			while (readl(&udc->op_regs->epstatus) & bit_pos)
195 				udelay(1);
196 			break;
197 		}
198 		udelay(1);
199 	}
200 
201 	curr_req->req.actual = actual;
202 
203 	return 0;
204 }
205 
206 /*
207  * done() - retire a request; caller blocked irqs
208  * @status : request status to be set, only works when
209  * request is still in progress.
210  */
done(struct mv_ep * ep,struct mv_req * req,int status)211 static void done(struct mv_ep *ep, struct mv_req *req, int status)
212 	__releases(&ep->udc->lock)
213 	__acquires(&ep->udc->lock)
214 {
215 	struct mv_udc *udc = NULL;
216 	unsigned char stopped = ep->stopped;
217 	struct mv_dtd *curr_td, *next_td;
218 	int j;
219 
220 	udc = (struct mv_udc *)ep->udc;
221 	/* Removed the req from fsl_ep->queue */
222 	list_del_init(&req->queue);
223 
224 	/* req.status should be set as -EINPROGRESS in ep_queue() */
225 	if (req->req.status == -EINPROGRESS)
226 		req->req.status = status;
227 	else
228 		status = req->req.status;
229 
230 	/* Free dtd for the request */
231 	next_td = req->head;
232 	for (j = 0; j < req->dtd_count; j++) {
233 		curr_td = next_td;
234 		if (j != req->dtd_count - 1)
235 			next_td = curr_td->next_dtd_virt;
236 		dma_pool_free(udc->dtd_pool, curr_td, curr_td->td_dma);
237 	}
238 
239 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
240 
241 	if (status && (status != -ESHUTDOWN))
242 		dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
243 			ep->ep.name, &req->req, status,
244 			req->req.actual, req->req.length);
245 
246 	ep->stopped = 1;
247 
248 	spin_unlock(&ep->udc->lock);
249 
250 	usb_gadget_giveback_request(&ep->ep, &req->req);
251 
252 	spin_lock(&ep->udc->lock);
253 	ep->stopped = stopped;
254 }
255 
queue_dtd(struct mv_ep * ep,struct mv_req * req)256 static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
257 {
258 	struct mv_udc *udc;
259 	struct mv_dqh *dqh;
260 	u32 bit_pos, direction;
261 	u32 usbcmd, epstatus;
262 	unsigned int loops;
263 	int retval = 0;
264 
265 	udc = ep->udc;
266 	direction = ep_dir(ep);
267 	dqh = &(udc->ep_dqh[ep->ep_num * 2 + direction]);
268 	bit_pos = 1 << (((direction == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
269 
270 	/* check if the pipe is empty */
271 	if (!(list_empty(&ep->queue))) {
272 		struct mv_req *lastreq;
273 		lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
274 		lastreq->tail->dtd_next =
275 			req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
276 
277 		wmb();
278 
279 		if (readl(&udc->op_regs->epprime) & bit_pos)
280 			goto done;
281 
282 		loops = LOOPS(READSAFE_TIMEOUT);
283 		while (1) {
284 			/* start with setting the semaphores */
285 			usbcmd = readl(&udc->op_regs->usbcmd);
286 			usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
287 			writel(usbcmd, &udc->op_regs->usbcmd);
288 
289 			/* read the endpoint status */
290 			epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
291 
292 			/*
293 			 * Reread the ATDTW semaphore bit to check if it is
294 			 * cleared. When hardware see a hazard, it will clear
295 			 * the bit or else we remain set to 1 and we can
296 			 * proceed with priming of endpoint if not already
297 			 * primed.
298 			 */
299 			if (readl(&udc->op_regs->usbcmd)
300 				& USBCMD_ATDTW_TRIPWIRE_SET)
301 				break;
302 
303 			loops--;
304 			if (loops == 0) {
305 				dev_err(&udc->dev->dev,
306 					"Timeout for ATDTW_TRIPWIRE...\n");
307 				retval = -ETIME;
308 				goto done;
309 			}
310 			udelay(LOOPS_USEC);
311 		}
312 
313 		/* Clear the semaphore */
314 		usbcmd = readl(&udc->op_regs->usbcmd);
315 		usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
316 		writel(usbcmd, &udc->op_regs->usbcmd);
317 
318 		if (epstatus)
319 			goto done;
320 	}
321 
322 	/* Write dQH next pointer and terminate bit to 0 */
323 	dqh->next_dtd_ptr = req->head->td_dma
324 				& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
325 
326 	/* clear active and halt bit, in case set from a previous error */
327 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
328 
329 	/* Ensure that updates to the QH will occur before priming. */
330 	wmb();
331 
332 	/* Prime the Endpoint */
333 	writel(bit_pos, &udc->op_regs->epprime);
334 
335 done:
336 	return retval;
337 }
338 
build_dtd(struct mv_req * req,unsigned * length,dma_addr_t * dma,int * is_last)339 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
340 		dma_addr_t *dma, int *is_last)
341 {
342 	struct mv_dtd *dtd;
343 	struct mv_udc *udc;
344 	struct mv_dqh *dqh;
345 	u32 temp, mult = 0;
346 
347 	/* how big will this transfer be? */
348 	if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) {
349 		dqh = req->ep->dqh;
350 		mult = (dqh->max_packet_length >> EP_QUEUE_HEAD_MULT_POS)
351 				& 0x3;
352 		*length = min(req->req.length - req->req.actual,
353 				(unsigned)(mult * req->ep->ep.maxpacket));
354 	} else
355 		*length = min(req->req.length - req->req.actual,
356 				(unsigned)EP_MAX_LENGTH_TRANSFER);
357 
358 	udc = req->ep->udc;
359 
360 	/*
361 	 * Be careful that no _GFP_HIGHMEM is set,
362 	 * or we can not use dma_to_virt
363 	 */
364 	dtd = dma_pool_alloc(udc->dtd_pool, GFP_ATOMIC, dma);
365 	if (dtd == NULL)
366 		return dtd;
367 
368 	dtd->td_dma = *dma;
369 	/* initialize buffer page pointers */
370 	temp = (u32)(req->req.dma + req->req.actual);
371 	dtd->buff_ptr0 = cpu_to_le32(temp);
372 	temp &= ~0xFFF;
373 	dtd->buff_ptr1 = cpu_to_le32(temp + 0x1000);
374 	dtd->buff_ptr2 = cpu_to_le32(temp + 0x2000);
375 	dtd->buff_ptr3 = cpu_to_le32(temp + 0x3000);
376 	dtd->buff_ptr4 = cpu_to_le32(temp + 0x4000);
377 
378 	req->req.actual += *length;
379 
380 	/* zlp is needed if req->req.zero is set */
381 	if (req->req.zero) {
382 		if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0)
383 			*is_last = 1;
384 		else
385 			*is_last = 0;
386 	} else if (req->req.length == req->req.actual)
387 		*is_last = 1;
388 	else
389 		*is_last = 0;
390 
391 	/* Fill in the transfer size; set active bit */
392 	temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
393 
394 	/* Enable interrupt for the last dtd of a request */
395 	if (*is_last && !req->req.no_interrupt)
396 		temp |= DTD_IOC;
397 
398 	temp |= mult << 10;
399 
400 	dtd->size_ioc_sts = temp;
401 
402 	mb();
403 
404 	return dtd;
405 }
406 
407 /* generate dTD linked list for a request */
req_to_dtd(struct mv_req * req)408 static int req_to_dtd(struct mv_req *req)
409 {
410 	unsigned count;
411 	int is_last, is_first = 1;
412 	struct mv_dtd *dtd, *last_dtd = NULL;
413 	dma_addr_t dma;
414 
415 	do {
416 		dtd = build_dtd(req, &count, &dma, &is_last);
417 		if (dtd == NULL)
418 			return -ENOMEM;
419 
420 		if (is_first) {
421 			is_first = 0;
422 			req->head = dtd;
423 		} else {
424 			last_dtd->dtd_next = dma;
425 			last_dtd->next_dtd_virt = dtd;
426 		}
427 		last_dtd = dtd;
428 		req->dtd_count++;
429 	} while (!is_last);
430 
431 	/* set terminate bit to 1 for the last dTD */
432 	dtd->dtd_next = DTD_NEXT_TERMINATE;
433 
434 	req->tail = dtd;
435 
436 	return 0;
437 }
438 
mv_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)439 static int mv_ep_enable(struct usb_ep *_ep,
440 		const struct usb_endpoint_descriptor *desc)
441 {
442 	struct mv_udc *udc;
443 	struct mv_ep *ep;
444 	struct mv_dqh *dqh;
445 	u16 max = 0;
446 	u32 bit_pos, epctrlx, direction;
447 	const unsigned char zlt = 1;
448 	unsigned char ios, mult;
449 	unsigned long flags;
450 
451 	ep = container_of(_ep, struct mv_ep, ep);
452 	udc = ep->udc;
453 
454 	if (!_ep || !desc
455 			|| desc->bDescriptorType != USB_DT_ENDPOINT)
456 		return -EINVAL;
457 
458 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
459 		return -ESHUTDOWN;
460 
461 	direction = ep_dir(ep);
462 	max = usb_endpoint_maxp(desc);
463 
464 	/*
465 	 * disable HW zero length termination select
466 	 * driver handles zero length packet through req->req.zero
467 	 */
468 	bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
469 
470 	/* Check if the Endpoint is Primed */
471 	if ((readl(&udc->op_regs->epprime) & bit_pos)
472 		|| (readl(&udc->op_regs->epstatus) & bit_pos)) {
473 		dev_info(&udc->dev->dev,
474 			"ep=%d %s: Init ERROR: ENDPTPRIME=0x%x,"
475 			" ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
476 			(unsigned)ep->ep_num, direction ? "SEND" : "RECV",
477 			(unsigned)readl(&udc->op_regs->epprime),
478 			(unsigned)readl(&udc->op_regs->epstatus),
479 			(unsigned)bit_pos);
480 		goto en_done;
481 	}
482 
483 	/* Set the max packet length, interrupt on Setup and Mult fields */
484 	ios = 0;
485 	mult = 0;
486 	switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
487 	case USB_ENDPOINT_XFER_BULK:
488 	case USB_ENDPOINT_XFER_INT:
489 		break;
490 	case USB_ENDPOINT_XFER_CONTROL:
491 		ios = 1;
492 		break;
493 	case USB_ENDPOINT_XFER_ISOC:
494 		/* Calculate transactions needed for high bandwidth iso */
495 		mult = usb_endpoint_maxp_mult(desc);
496 		/* 3 transactions at most */
497 		if (mult > 3)
498 			goto en_done;
499 		break;
500 	default:
501 		goto en_done;
502 	}
503 
504 	spin_lock_irqsave(&udc->lock, flags);
505 	/* Get the endpoint queue head address */
506 	dqh = ep->dqh;
507 	dqh->max_packet_length = (max << EP_QUEUE_HEAD_MAX_PKT_LEN_POS)
508 		| (mult << EP_QUEUE_HEAD_MULT_POS)
509 		| (zlt ? EP_QUEUE_HEAD_ZLT_SEL : 0)
510 		| (ios ? EP_QUEUE_HEAD_IOS : 0);
511 	dqh->next_dtd_ptr = 1;
512 	dqh->size_ioc_int_sts = 0;
513 
514 	ep->ep.maxpacket = max;
515 	ep->ep.desc = desc;
516 	ep->stopped = 0;
517 
518 	/* Enable the endpoint for Rx or Tx and set the endpoint type */
519 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
520 	if (direction == EP_DIR_IN) {
521 		epctrlx &= ~EPCTRL_TX_ALL_MASK;
522 		epctrlx |= EPCTRL_TX_ENABLE | EPCTRL_TX_DATA_TOGGLE_RST
523 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
524 				<< EPCTRL_TX_EP_TYPE_SHIFT);
525 	} else {
526 		epctrlx &= ~EPCTRL_RX_ALL_MASK;
527 		epctrlx |= EPCTRL_RX_ENABLE | EPCTRL_RX_DATA_TOGGLE_RST
528 			| ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
529 				<< EPCTRL_RX_EP_TYPE_SHIFT);
530 	}
531 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
532 
533 	/*
534 	 * Implement Guideline (GL# USB-7) The unused endpoint type must
535 	 * be programmed to bulk.
536 	 */
537 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
538 	if ((epctrlx & EPCTRL_RX_ENABLE) == 0) {
539 		epctrlx |= (USB_ENDPOINT_XFER_BULK
540 				<< EPCTRL_RX_EP_TYPE_SHIFT);
541 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
542 	}
543 
544 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
545 	if ((epctrlx & EPCTRL_TX_ENABLE) == 0) {
546 		epctrlx |= (USB_ENDPOINT_XFER_BULK
547 				<< EPCTRL_TX_EP_TYPE_SHIFT);
548 		writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
549 	}
550 
551 	spin_unlock_irqrestore(&udc->lock, flags);
552 
553 	return 0;
554 en_done:
555 	return -EINVAL;
556 }
557 
mv_ep_disable(struct usb_ep * _ep)558 static int  mv_ep_disable(struct usb_ep *_ep)
559 {
560 	struct mv_udc *udc;
561 	struct mv_ep *ep;
562 	struct mv_dqh *dqh;
563 	u32 epctrlx, direction;
564 	unsigned long flags;
565 
566 	ep = container_of(_ep, struct mv_ep, ep);
567 	if ((_ep == NULL) || !ep->ep.desc)
568 		return -EINVAL;
569 
570 	udc = ep->udc;
571 
572 	/* Get the endpoint queue head address */
573 	dqh = ep->dqh;
574 
575 	spin_lock_irqsave(&udc->lock, flags);
576 
577 	direction = ep_dir(ep);
578 
579 	/* Reset the max packet length and the interrupt on Setup */
580 	dqh->max_packet_length = 0;
581 
582 	/* Disable the endpoint for Rx or Tx and reset the endpoint type */
583 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
584 	epctrlx &= ~((direction == EP_DIR_IN)
585 			? (EPCTRL_TX_ENABLE | EPCTRL_TX_TYPE)
586 			: (EPCTRL_RX_ENABLE | EPCTRL_RX_TYPE));
587 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
588 
589 	/* nuke all pending requests (does flush) */
590 	nuke(ep, -ESHUTDOWN);
591 
592 	ep->ep.desc = NULL;
593 	ep->stopped = 1;
594 
595 	spin_unlock_irqrestore(&udc->lock, flags);
596 
597 	return 0;
598 }
599 
600 static struct usb_request *
mv_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)601 mv_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
602 {
603 	struct mv_req *req = NULL;
604 
605 	req = kzalloc(sizeof *req, gfp_flags);
606 	if (!req)
607 		return NULL;
608 
609 	req->req.dma = DMA_ADDR_INVALID;
610 	INIT_LIST_HEAD(&req->queue);
611 
612 	return &req->req;
613 }
614 
mv_free_request(struct usb_ep * _ep,struct usb_request * _req)615 static void mv_free_request(struct usb_ep *_ep, struct usb_request *_req)
616 {
617 	struct mv_req *req = NULL;
618 
619 	req = container_of(_req, struct mv_req, req);
620 
621 	if (_req)
622 		kfree(req);
623 }
624 
mv_ep_fifo_flush(struct usb_ep * _ep)625 static void mv_ep_fifo_flush(struct usb_ep *_ep)
626 {
627 	struct mv_udc *udc;
628 	u32 bit_pos, direction;
629 	struct mv_ep *ep;
630 	unsigned int loops;
631 
632 	if (!_ep)
633 		return;
634 
635 	ep = container_of(_ep, struct mv_ep, ep);
636 	if (!ep->ep.desc)
637 		return;
638 
639 	udc = ep->udc;
640 	direction = ep_dir(ep);
641 
642 	if (ep->ep_num == 0)
643 		bit_pos = (1 << 16) | 1;
644 	else if (direction == EP_DIR_OUT)
645 		bit_pos = 1 << ep->ep_num;
646 	else
647 		bit_pos = 1 << (16 + ep->ep_num);
648 
649 	loops = LOOPS(EPSTATUS_TIMEOUT);
650 	do {
651 		unsigned int inter_loops;
652 
653 		if (loops == 0) {
654 			dev_err(&udc->dev->dev,
655 				"TIMEOUT for ENDPTSTATUS=0x%x, bit_pos=0x%x\n",
656 				(unsigned)readl(&udc->op_regs->epstatus),
657 				(unsigned)bit_pos);
658 			return;
659 		}
660 		/* Write 1 to the Flush register */
661 		writel(bit_pos, &udc->op_regs->epflush);
662 
663 		/* Wait until flushing completed */
664 		inter_loops = LOOPS(FLUSH_TIMEOUT);
665 		while (readl(&udc->op_regs->epflush)) {
666 			/*
667 			 * ENDPTFLUSH bit should be cleared to indicate this
668 			 * operation is complete
669 			 */
670 			if (inter_loops == 0) {
671 				dev_err(&udc->dev->dev,
672 					"TIMEOUT for ENDPTFLUSH=0x%x,"
673 					"bit_pos=0x%x\n",
674 					(unsigned)readl(&udc->op_regs->epflush),
675 					(unsigned)bit_pos);
676 				return;
677 			}
678 			inter_loops--;
679 			udelay(LOOPS_USEC);
680 		}
681 		loops--;
682 	} while (readl(&udc->op_regs->epstatus) & bit_pos);
683 }
684 
685 /* queues (submits) an I/O request to an endpoint */
686 static int
mv_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)687 mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
688 {
689 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
690 	struct mv_req *req = container_of(_req, struct mv_req, req);
691 	struct mv_udc *udc = ep->udc;
692 	unsigned long flags;
693 	int retval;
694 
695 	/* catch various bogus parameters */
696 	if (!_req || !req->req.complete || !req->req.buf
697 			|| !list_empty(&req->queue)) {
698 		dev_err(&udc->dev->dev, "%s, bad params", __func__);
699 		return -EINVAL;
700 	}
701 	if (unlikely(!_ep || !ep->ep.desc)) {
702 		dev_err(&udc->dev->dev, "%s, bad ep", __func__);
703 		return -EINVAL;
704 	}
705 
706 	udc = ep->udc;
707 	if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
708 		return -ESHUTDOWN;
709 
710 	req->ep = ep;
711 
712 	/* map virtual address to hardware */
713 	retval = usb_gadget_map_request(&udc->gadget, _req, ep_dir(ep));
714 	if (retval)
715 		return retval;
716 
717 	req->req.status = -EINPROGRESS;
718 	req->req.actual = 0;
719 	req->dtd_count = 0;
720 
721 	spin_lock_irqsave(&udc->lock, flags);
722 
723 	/* build dtds and push them to device queue */
724 	if (!req_to_dtd(req)) {
725 		retval = queue_dtd(ep, req);
726 		if (retval) {
727 			spin_unlock_irqrestore(&udc->lock, flags);
728 			dev_err(&udc->dev->dev, "Failed to queue dtd\n");
729 			goto err_unmap_dma;
730 		}
731 	} else {
732 		spin_unlock_irqrestore(&udc->lock, flags);
733 		dev_err(&udc->dev->dev, "Failed to dma_pool_alloc\n");
734 		retval = -ENOMEM;
735 		goto err_unmap_dma;
736 	}
737 
738 	/* Update ep0 state */
739 	if (ep->ep_num == 0)
740 		udc->ep0_state = DATA_STATE_XMIT;
741 
742 	/* irq handler advances the queue */
743 	list_add_tail(&req->queue, &ep->queue);
744 	spin_unlock_irqrestore(&udc->lock, flags);
745 
746 	return 0;
747 
748 err_unmap_dma:
749 	usb_gadget_unmap_request(&udc->gadget, _req, ep_dir(ep));
750 
751 	return retval;
752 }
753 
mv_prime_ep(struct mv_ep * ep,struct mv_req * req)754 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
755 {
756 	struct mv_dqh *dqh = ep->dqh;
757 	u32 bit_pos;
758 
759 	/* Write dQH next pointer and terminate bit to 0 */
760 	dqh->next_dtd_ptr = req->head->td_dma
761 		& EP_QUEUE_HEAD_NEXT_POINTER_MASK;
762 
763 	/* clear active and halt bit, in case set from a previous error */
764 	dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
765 
766 	/* Ensure that updates to the QH will occure before priming. */
767 	wmb();
768 
769 	bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
770 
771 	/* Prime the Endpoint */
772 	writel(bit_pos, &ep->udc->op_regs->epprime);
773 }
774 
775 /* dequeues (cancels, unlinks) an I/O request from an endpoint */
mv_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)776 static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
777 {
778 	struct mv_ep *ep = container_of(_ep, struct mv_ep, ep);
779 	struct mv_req *req;
780 	struct mv_udc *udc = ep->udc;
781 	unsigned long flags;
782 	int stopped, ret = 0;
783 	u32 epctrlx;
784 
785 	if (!_ep || !_req)
786 		return -EINVAL;
787 
788 	spin_lock_irqsave(&ep->udc->lock, flags);
789 	stopped = ep->stopped;
790 
791 	/* Stop the ep before we deal with the queue */
792 	ep->stopped = 1;
793 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
794 	if (ep_dir(ep) == EP_DIR_IN)
795 		epctrlx &= ~EPCTRL_TX_ENABLE;
796 	else
797 		epctrlx &= ~EPCTRL_RX_ENABLE;
798 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
799 
800 	/* make sure it's actually queued on this endpoint */
801 	list_for_each_entry(req, &ep->queue, queue) {
802 		if (&req->req == _req)
803 			break;
804 	}
805 	if (&req->req != _req) {
806 		ret = -EINVAL;
807 		goto out;
808 	}
809 
810 	/* The request is in progress, or completed but not dequeued */
811 	if (ep->queue.next == &req->queue) {
812 		_req->status = -ECONNRESET;
813 		mv_ep_fifo_flush(_ep);	/* flush current transfer */
814 
815 		/* The request isn't the last request in this ep queue */
816 		if (req->queue.next != &ep->queue) {
817 			struct mv_req *next_req;
818 
819 			next_req = list_entry(req->queue.next,
820 				struct mv_req, queue);
821 
822 			/* Point the QH to the first TD of next request */
823 			mv_prime_ep(ep, next_req);
824 		} else {
825 			struct mv_dqh *qh;
826 
827 			qh = ep->dqh;
828 			qh->next_dtd_ptr = 1;
829 			qh->size_ioc_int_sts = 0;
830 		}
831 
832 		/* The request hasn't been processed, patch up the TD chain */
833 	} else {
834 		struct mv_req *prev_req;
835 
836 		prev_req = list_entry(req->queue.prev, struct mv_req, queue);
837 		writel(readl(&req->tail->dtd_next),
838 				&prev_req->tail->dtd_next);
839 
840 	}
841 
842 	done(ep, req, -ECONNRESET);
843 
844 	/* Enable EP */
845 out:
846 	epctrlx = readl(&udc->op_regs->epctrlx[ep->ep_num]);
847 	if (ep_dir(ep) == EP_DIR_IN)
848 		epctrlx |= EPCTRL_TX_ENABLE;
849 	else
850 		epctrlx |= EPCTRL_RX_ENABLE;
851 	writel(epctrlx, &udc->op_regs->epctrlx[ep->ep_num]);
852 	ep->stopped = stopped;
853 
854 	spin_unlock_irqrestore(&ep->udc->lock, flags);
855 	return ret;
856 }
857 
ep_set_stall(struct mv_udc * udc,u8 ep_num,u8 direction,int stall)858 static void ep_set_stall(struct mv_udc *udc, u8 ep_num, u8 direction, int stall)
859 {
860 	u32 epctrlx;
861 
862 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
863 
864 	if (stall) {
865 		if (direction == EP_DIR_IN)
866 			epctrlx |= EPCTRL_TX_EP_STALL;
867 		else
868 			epctrlx |= EPCTRL_RX_EP_STALL;
869 	} else {
870 		if (direction == EP_DIR_IN) {
871 			epctrlx &= ~EPCTRL_TX_EP_STALL;
872 			epctrlx |= EPCTRL_TX_DATA_TOGGLE_RST;
873 		} else {
874 			epctrlx &= ~EPCTRL_RX_EP_STALL;
875 			epctrlx |= EPCTRL_RX_DATA_TOGGLE_RST;
876 		}
877 	}
878 	writel(epctrlx, &udc->op_regs->epctrlx[ep_num]);
879 }
880 
ep_is_stall(struct mv_udc * udc,u8 ep_num,u8 direction)881 static int ep_is_stall(struct mv_udc *udc, u8 ep_num, u8 direction)
882 {
883 	u32 epctrlx;
884 
885 	epctrlx = readl(&udc->op_regs->epctrlx[ep_num]);
886 
887 	if (direction == EP_DIR_OUT)
888 		return (epctrlx & EPCTRL_RX_EP_STALL) ? 1 : 0;
889 	else
890 		return (epctrlx & EPCTRL_TX_EP_STALL) ? 1 : 0;
891 }
892 
mv_ep_set_halt_wedge(struct usb_ep * _ep,int halt,int wedge)893 static int mv_ep_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
894 {
895 	struct mv_ep *ep;
896 	unsigned long flags = 0;
897 	int status = 0;
898 	struct mv_udc *udc;
899 
900 	ep = container_of(_ep, struct mv_ep, ep);
901 	udc = ep->udc;
902 	if (!_ep || !ep->ep.desc) {
903 		status = -EINVAL;
904 		goto out;
905 	}
906 
907 	if (ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
908 		status = -EOPNOTSUPP;
909 		goto out;
910 	}
911 
912 	/*
913 	 * Attempt to halt IN ep will fail if any transfer requests
914 	 * are still queue
915 	 */
916 	if (halt && (ep_dir(ep) == EP_DIR_IN) && !list_empty(&ep->queue)) {
917 		status = -EAGAIN;
918 		goto out;
919 	}
920 
921 	spin_lock_irqsave(&ep->udc->lock, flags);
922 	ep_set_stall(udc, ep->ep_num, ep_dir(ep), halt);
923 	if (halt && wedge)
924 		ep->wedge = 1;
925 	else if (!halt)
926 		ep->wedge = 0;
927 	spin_unlock_irqrestore(&ep->udc->lock, flags);
928 
929 	if (ep->ep_num == 0) {
930 		udc->ep0_state = WAIT_FOR_SETUP;
931 		udc->ep0_dir = EP_DIR_OUT;
932 	}
933 out:
934 	return status;
935 }
936 
mv_ep_set_halt(struct usb_ep * _ep,int halt)937 static int mv_ep_set_halt(struct usb_ep *_ep, int halt)
938 {
939 	return mv_ep_set_halt_wedge(_ep, halt, 0);
940 }
941 
mv_ep_set_wedge(struct usb_ep * _ep)942 static int mv_ep_set_wedge(struct usb_ep *_ep)
943 {
944 	return mv_ep_set_halt_wedge(_ep, 1, 1);
945 }
946 
947 static const struct usb_ep_ops mv_ep_ops = {
948 	.enable		= mv_ep_enable,
949 	.disable	= mv_ep_disable,
950 
951 	.alloc_request	= mv_alloc_request,
952 	.free_request	= mv_free_request,
953 
954 	.queue		= mv_ep_queue,
955 	.dequeue	= mv_ep_dequeue,
956 
957 	.set_wedge	= mv_ep_set_wedge,
958 	.set_halt	= mv_ep_set_halt,
959 	.fifo_flush	= mv_ep_fifo_flush,	/* flush fifo */
960 };
961 
udc_clock_enable(struct mv_udc * udc)962 static int udc_clock_enable(struct mv_udc *udc)
963 {
964 	return clk_prepare_enable(udc->clk);
965 }
966 
udc_clock_disable(struct mv_udc * udc)967 static void udc_clock_disable(struct mv_udc *udc)
968 {
969 	clk_disable_unprepare(udc->clk);
970 }
971 
udc_stop(struct mv_udc * udc)972 static void udc_stop(struct mv_udc *udc)
973 {
974 	u32 tmp;
975 
976 	/* Disable interrupts */
977 	tmp = readl(&udc->op_regs->usbintr);
978 	tmp &= ~(USBINTR_INT_EN | USBINTR_ERR_INT_EN |
979 		USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
980 	writel(tmp, &udc->op_regs->usbintr);
981 
982 	udc->stopped = 1;
983 
984 	/* Reset the Run the bit in the command register to stop VUSB */
985 	tmp = readl(&udc->op_regs->usbcmd);
986 	tmp &= ~USBCMD_RUN_STOP;
987 	writel(tmp, &udc->op_regs->usbcmd);
988 }
989 
udc_start(struct mv_udc * udc)990 static void udc_start(struct mv_udc *udc)
991 {
992 	u32 usbintr;
993 
994 	usbintr = USBINTR_INT_EN | USBINTR_ERR_INT_EN
995 		| USBINTR_PORT_CHANGE_DETECT_EN
996 		| USBINTR_RESET_EN | USBINTR_DEVICE_SUSPEND;
997 	/* Enable interrupts */
998 	writel(usbintr, &udc->op_regs->usbintr);
999 
1000 	udc->stopped = 0;
1001 
1002 	/* Set the Run bit in the command register */
1003 	writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
1004 }
1005 
udc_reset(struct mv_udc * udc)1006 static int udc_reset(struct mv_udc *udc)
1007 {
1008 	unsigned int loops;
1009 	u32 tmp, portsc;
1010 
1011 	/* Stop the controller */
1012 	tmp = readl(&udc->op_regs->usbcmd);
1013 	tmp &= ~USBCMD_RUN_STOP;
1014 	writel(tmp, &udc->op_regs->usbcmd);
1015 
1016 	/* Reset the controller to get default values */
1017 	writel(USBCMD_CTRL_RESET, &udc->op_regs->usbcmd);
1018 
1019 	/* wait for reset to complete */
1020 	loops = LOOPS(RESET_TIMEOUT);
1021 	while (readl(&udc->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
1022 		if (loops == 0) {
1023 			dev_err(&udc->dev->dev,
1024 				"Wait for RESET completed TIMEOUT\n");
1025 			return -ETIMEDOUT;
1026 		}
1027 		loops--;
1028 		udelay(LOOPS_USEC);
1029 	}
1030 
1031 	/* set controller to device mode */
1032 	tmp = readl(&udc->op_regs->usbmode);
1033 	tmp |= USBMODE_CTRL_MODE_DEVICE;
1034 
1035 	/* turn setup lockout off, require setup tripwire in usbcmd */
1036 	tmp |= USBMODE_SETUP_LOCK_OFF;
1037 
1038 	writel(tmp, &udc->op_regs->usbmode);
1039 
1040 	writel(0x0, &udc->op_regs->epsetupstat);
1041 
1042 	/* Configure the Endpoint List Address */
1043 	writel(udc->ep_dqh_dma & USB_EP_LIST_ADDRESS_MASK,
1044 		&udc->op_regs->eplistaddr);
1045 
1046 	portsc = readl(&udc->op_regs->portsc[0]);
1047 	if (readl(&udc->cap_regs->hcsparams) & HCSPARAMS_PPC)
1048 		portsc &= (~PORTSCX_W1C_BITS | ~PORTSCX_PORT_POWER);
1049 
1050 	if (udc->force_fs)
1051 		portsc |= PORTSCX_FORCE_FULL_SPEED_CONNECT;
1052 	else
1053 		portsc &= (~PORTSCX_FORCE_FULL_SPEED_CONNECT);
1054 
1055 	writel(portsc, &udc->op_regs->portsc[0]);
1056 
1057 	tmp = readl(&udc->op_regs->epctrlx[0]);
1058 	tmp &= ~(EPCTRL_TX_EP_STALL | EPCTRL_RX_EP_STALL);
1059 	writel(tmp, &udc->op_regs->epctrlx[0]);
1060 
1061 	return 0;
1062 }
1063 
mv_udc_enable_internal(struct mv_udc * udc)1064 static int mv_udc_enable_internal(struct mv_udc *udc)
1065 {
1066 	int retval;
1067 
1068 	if (udc->active)
1069 		return 0;
1070 
1071 	dev_dbg(&udc->dev->dev, "enable udc\n");
1072 	retval = udc_clock_enable(udc);
1073 	if (retval)
1074 		return retval;
1075 
1076 	if (udc->pdata->phy_init) {
1077 		retval = udc->pdata->phy_init(udc->phy_regs);
1078 		if (retval) {
1079 			dev_err(&udc->dev->dev,
1080 				"init phy error %d\n", retval);
1081 			udc_clock_disable(udc);
1082 			return retval;
1083 		}
1084 	}
1085 	udc->active = 1;
1086 
1087 	return 0;
1088 }
1089 
mv_udc_enable(struct mv_udc * udc)1090 static int mv_udc_enable(struct mv_udc *udc)
1091 {
1092 	if (udc->clock_gating)
1093 		return mv_udc_enable_internal(udc);
1094 
1095 	return 0;
1096 }
1097 
mv_udc_disable_internal(struct mv_udc * udc)1098 static void mv_udc_disable_internal(struct mv_udc *udc)
1099 {
1100 	if (udc->active) {
1101 		dev_dbg(&udc->dev->dev, "disable udc\n");
1102 		if (udc->pdata->phy_deinit)
1103 			udc->pdata->phy_deinit(udc->phy_regs);
1104 		udc_clock_disable(udc);
1105 		udc->active = 0;
1106 	}
1107 }
1108 
mv_udc_disable(struct mv_udc * udc)1109 static void mv_udc_disable(struct mv_udc *udc)
1110 {
1111 	if (udc->clock_gating)
1112 		mv_udc_disable_internal(udc);
1113 }
1114 
mv_udc_get_frame(struct usb_gadget * gadget)1115 static int mv_udc_get_frame(struct usb_gadget *gadget)
1116 {
1117 	struct mv_udc *udc;
1118 	u16	retval;
1119 
1120 	if (!gadget)
1121 		return -ENODEV;
1122 
1123 	udc = container_of(gadget, struct mv_udc, gadget);
1124 
1125 	retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
1126 
1127 	return retval;
1128 }
1129 
1130 /* Tries to wake up the host connected to this gadget */
mv_udc_wakeup(struct usb_gadget * gadget)1131 static int mv_udc_wakeup(struct usb_gadget *gadget)
1132 {
1133 	struct mv_udc *udc = container_of(gadget, struct mv_udc, gadget);
1134 	u32 portsc;
1135 
1136 	/* Remote wakeup feature not enabled by host */
1137 	if (!udc->remote_wakeup)
1138 		return -ENOTSUPP;
1139 
1140 	portsc = readl(&udc->op_regs->portsc);
1141 	/* not suspended? */
1142 	if (!(portsc & PORTSCX_PORT_SUSPEND))
1143 		return 0;
1144 	/* trigger force resume */
1145 	portsc |= PORTSCX_PORT_FORCE_RESUME;
1146 	writel(portsc, &udc->op_regs->portsc[0]);
1147 	return 0;
1148 }
1149 
mv_udc_vbus_session(struct usb_gadget * gadget,int is_active)1150 static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
1151 {
1152 	struct mv_udc *udc;
1153 	unsigned long flags;
1154 	int retval = 0;
1155 
1156 	udc = container_of(gadget, struct mv_udc, gadget);
1157 	spin_lock_irqsave(&udc->lock, flags);
1158 
1159 	udc->vbus_active = (is_active != 0);
1160 
1161 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1162 		__func__, udc->softconnect, udc->vbus_active);
1163 
1164 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1165 		retval = mv_udc_enable(udc);
1166 		if (retval == 0) {
1167 			/* Clock is disabled, need re-init registers */
1168 			udc_reset(udc);
1169 			ep0_reset(udc);
1170 			udc_start(udc);
1171 		}
1172 	} else if (udc->driver && udc->softconnect) {
1173 		if (!udc->active)
1174 			goto out;
1175 
1176 		/* stop all the transfer in queue*/
1177 		stop_activity(udc, udc->driver);
1178 		udc_stop(udc);
1179 		mv_udc_disable(udc);
1180 	}
1181 
1182 out:
1183 	spin_unlock_irqrestore(&udc->lock, flags);
1184 	return retval;
1185 }
1186 
mv_udc_pullup(struct usb_gadget * gadget,int is_on)1187 static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
1188 {
1189 	struct mv_udc *udc;
1190 	unsigned long flags;
1191 	int retval = 0;
1192 
1193 	udc = container_of(gadget, struct mv_udc, gadget);
1194 	spin_lock_irqsave(&udc->lock, flags);
1195 
1196 	udc->softconnect = (is_on != 0);
1197 
1198 	dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
1199 			__func__, udc->softconnect, udc->vbus_active);
1200 
1201 	if (udc->driver && udc->softconnect && udc->vbus_active) {
1202 		retval = mv_udc_enable(udc);
1203 		if (retval == 0) {
1204 			/* Clock is disabled, need re-init registers */
1205 			udc_reset(udc);
1206 			ep0_reset(udc);
1207 			udc_start(udc);
1208 		}
1209 	} else if (udc->driver && udc->vbus_active) {
1210 		/* stop all the transfer in queue*/
1211 		stop_activity(udc, udc->driver);
1212 		udc_stop(udc);
1213 		mv_udc_disable(udc);
1214 	}
1215 
1216 	spin_unlock_irqrestore(&udc->lock, flags);
1217 	return retval;
1218 }
1219 
1220 static int mv_udc_start(struct usb_gadget *, struct usb_gadget_driver *);
1221 static int mv_udc_stop(struct usb_gadget *);
1222 /* device controller usb_gadget_ops structure */
1223 static const struct usb_gadget_ops mv_ops = {
1224 
1225 	/* returns the current frame number */
1226 	.get_frame	= mv_udc_get_frame,
1227 
1228 	/* tries to wake up the host connected to this gadget */
1229 	.wakeup		= mv_udc_wakeup,
1230 
1231 	/* notify controller that VBUS is powered or not */
1232 	.vbus_session	= mv_udc_vbus_session,
1233 
1234 	/* D+ pullup, software-controlled connect/disconnect to USB host */
1235 	.pullup		= mv_udc_pullup,
1236 	.udc_start	= mv_udc_start,
1237 	.udc_stop	= mv_udc_stop,
1238 };
1239 
eps_init(struct mv_udc * udc)1240 static int eps_init(struct mv_udc *udc)
1241 {
1242 	struct mv_ep	*ep;
1243 	char name[14];
1244 	int i;
1245 
1246 	/* initialize ep0 */
1247 	ep = &udc->eps[0];
1248 	ep->udc = udc;
1249 	strncpy(ep->name, "ep0", sizeof(ep->name));
1250 	ep->ep.name = ep->name;
1251 	ep->ep.ops = &mv_ep_ops;
1252 	ep->wedge = 0;
1253 	ep->stopped = 0;
1254 	usb_ep_set_maxpacket_limit(&ep->ep, EP0_MAX_PKT_SIZE);
1255 	ep->ep.caps.type_control = true;
1256 	ep->ep.caps.dir_in = true;
1257 	ep->ep.caps.dir_out = true;
1258 	ep->ep_num = 0;
1259 	ep->ep.desc = &mv_ep0_desc;
1260 	INIT_LIST_HEAD(&ep->queue);
1261 
1262 	ep->ep_type = USB_ENDPOINT_XFER_CONTROL;
1263 
1264 	/* initialize other endpoints */
1265 	for (i = 2; i < udc->max_eps * 2; i++) {
1266 		ep = &udc->eps[i];
1267 		if (i % 2) {
1268 			snprintf(name, sizeof(name), "ep%din", i / 2);
1269 			ep->direction = EP_DIR_IN;
1270 			ep->ep.caps.dir_in = true;
1271 		} else {
1272 			snprintf(name, sizeof(name), "ep%dout", i / 2);
1273 			ep->direction = EP_DIR_OUT;
1274 			ep->ep.caps.dir_out = true;
1275 		}
1276 		ep->udc = udc;
1277 		strncpy(ep->name, name, sizeof(ep->name));
1278 		ep->ep.name = ep->name;
1279 
1280 		ep->ep.caps.type_iso = true;
1281 		ep->ep.caps.type_bulk = true;
1282 		ep->ep.caps.type_int = true;
1283 
1284 		ep->ep.ops = &mv_ep_ops;
1285 		ep->stopped = 0;
1286 		usb_ep_set_maxpacket_limit(&ep->ep, (unsigned short) ~0);
1287 		ep->ep_num = i / 2;
1288 
1289 		INIT_LIST_HEAD(&ep->queue);
1290 		list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
1291 
1292 		ep->dqh = &udc->ep_dqh[i];
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 /* delete all endpoint requests, called with spinlock held */
nuke(struct mv_ep * ep,int status)1299 static void nuke(struct mv_ep *ep, int status)
1300 {
1301 	/* called with spinlock held */
1302 	ep->stopped = 1;
1303 
1304 	/* endpoint fifo flush */
1305 	mv_ep_fifo_flush(&ep->ep);
1306 
1307 	while (!list_empty(&ep->queue)) {
1308 		struct mv_req *req = NULL;
1309 		req = list_entry(ep->queue.next, struct mv_req, queue);
1310 		done(ep, req, status);
1311 	}
1312 }
1313 
gadget_reset(struct mv_udc * udc,struct usb_gadget_driver * driver)1314 static void gadget_reset(struct mv_udc *udc, struct usb_gadget_driver *driver)
1315 {
1316 	struct mv_ep	*ep;
1317 
1318 	nuke(&udc->eps[0], -ESHUTDOWN);
1319 
1320 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1321 		nuke(ep, -ESHUTDOWN);
1322 	}
1323 
1324 	/* report reset; the driver is already quiesced */
1325 	if (driver) {
1326 		spin_unlock(&udc->lock);
1327 		usb_gadget_udc_reset(&udc->gadget, driver);
1328 		spin_lock(&udc->lock);
1329 	}
1330 }
1331 /* stop all USB activities */
stop_activity(struct mv_udc * udc,struct usb_gadget_driver * driver)1332 static void stop_activity(struct mv_udc *udc, struct usb_gadget_driver *driver)
1333 {
1334 	struct mv_ep	*ep;
1335 
1336 	nuke(&udc->eps[0], -ESHUTDOWN);
1337 
1338 	list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
1339 		nuke(ep, -ESHUTDOWN);
1340 	}
1341 
1342 	/* report disconnect; the driver is already quiesced */
1343 	if (driver) {
1344 		spin_unlock(&udc->lock);
1345 		driver->disconnect(&udc->gadget);
1346 		spin_lock(&udc->lock);
1347 	}
1348 }
1349 
mv_udc_start(struct usb_gadget * gadget,struct usb_gadget_driver * driver)1350 static int mv_udc_start(struct usb_gadget *gadget,
1351 		struct usb_gadget_driver *driver)
1352 {
1353 	struct mv_udc *udc;
1354 	int retval = 0;
1355 	unsigned long flags;
1356 
1357 	udc = container_of(gadget, struct mv_udc, gadget);
1358 
1359 	if (udc->driver)
1360 		return -EBUSY;
1361 
1362 	spin_lock_irqsave(&udc->lock, flags);
1363 
1364 	/* hook up the driver ... */
1365 	driver->driver.bus = NULL;
1366 	udc->driver = driver;
1367 
1368 	udc->usb_state = USB_STATE_ATTACHED;
1369 	udc->ep0_state = WAIT_FOR_SETUP;
1370 	udc->ep0_dir = EP_DIR_OUT;
1371 
1372 	spin_unlock_irqrestore(&udc->lock, flags);
1373 
1374 	if (udc->transceiver) {
1375 		retval = otg_set_peripheral(udc->transceiver->otg,
1376 					&udc->gadget);
1377 		if (retval) {
1378 			dev_err(&udc->dev->dev,
1379 				"unable to register peripheral to otg\n");
1380 			udc->driver = NULL;
1381 			return retval;
1382 		}
1383 	}
1384 
1385 	/* When boot with cable attached, there will be no vbus irq occurred */
1386 	if (udc->qwork)
1387 		queue_work(udc->qwork, &udc->vbus_work);
1388 
1389 	return 0;
1390 }
1391 
mv_udc_stop(struct usb_gadget * gadget)1392 static int mv_udc_stop(struct usb_gadget *gadget)
1393 {
1394 	struct mv_udc *udc;
1395 	unsigned long flags;
1396 
1397 	udc = container_of(gadget, struct mv_udc, gadget);
1398 
1399 	spin_lock_irqsave(&udc->lock, flags);
1400 
1401 	mv_udc_enable(udc);
1402 	udc_stop(udc);
1403 
1404 	/* stop all usb activities */
1405 	udc->gadget.speed = USB_SPEED_UNKNOWN;
1406 	stop_activity(udc, NULL);
1407 	mv_udc_disable(udc);
1408 
1409 	spin_unlock_irqrestore(&udc->lock, flags);
1410 
1411 	/* unbind gadget driver */
1412 	udc->driver = NULL;
1413 
1414 	return 0;
1415 }
1416 
mv_set_ptc(struct mv_udc * udc,u32 mode)1417 static void mv_set_ptc(struct mv_udc *udc, u32 mode)
1418 {
1419 	u32 portsc;
1420 
1421 	portsc = readl(&udc->op_regs->portsc[0]);
1422 	portsc |= mode << 16;
1423 	writel(portsc, &udc->op_regs->portsc[0]);
1424 }
1425 
prime_status_complete(struct usb_ep * ep,struct usb_request * _req)1426 static void prime_status_complete(struct usb_ep *ep, struct usb_request *_req)
1427 {
1428 	struct mv_ep *mvep = container_of(ep, struct mv_ep, ep);
1429 	struct mv_req *req = container_of(_req, struct mv_req, req);
1430 	struct mv_udc *udc;
1431 	unsigned long flags;
1432 
1433 	udc = mvep->udc;
1434 
1435 	dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode);
1436 
1437 	spin_lock_irqsave(&udc->lock, flags);
1438 	if (req->test_mode) {
1439 		mv_set_ptc(udc, req->test_mode);
1440 		req->test_mode = 0;
1441 	}
1442 	spin_unlock_irqrestore(&udc->lock, flags);
1443 }
1444 
1445 static int
udc_prime_status(struct mv_udc * udc,u8 direction,u16 status,bool empty)1446 udc_prime_status(struct mv_udc *udc, u8 direction, u16 status, bool empty)
1447 {
1448 	int retval = 0;
1449 	struct mv_req *req;
1450 	struct mv_ep *ep;
1451 
1452 	ep = &udc->eps[0];
1453 	udc->ep0_dir = direction;
1454 	udc->ep0_state = WAIT_FOR_OUT_STATUS;
1455 
1456 	req = udc->status_req;
1457 
1458 	/* fill in the reqest structure */
1459 	if (empty == false) {
1460 		*((u16 *) req->req.buf) = cpu_to_le16(status);
1461 		req->req.length = 2;
1462 	} else
1463 		req->req.length = 0;
1464 
1465 	req->ep = ep;
1466 	req->req.status = -EINPROGRESS;
1467 	req->req.actual = 0;
1468 	if (udc->test_mode) {
1469 		req->req.complete = prime_status_complete;
1470 		req->test_mode = udc->test_mode;
1471 		udc->test_mode = 0;
1472 	} else
1473 		req->req.complete = NULL;
1474 	req->dtd_count = 0;
1475 
1476 	if (req->req.dma == DMA_ADDR_INVALID) {
1477 		req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
1478 				req->req.buf, req->req.length,
1479 				ep_dir(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
1480 		req->mapped = 1;
1481 	}
1482 
1483 	/* prime the data phase */
1484 	if (!req_to_dtd(req)) {
1485 		retval = queue_dtd(ep, req);
1486 		if (retval) {
1487 			dev_err(&udc->dev->dev,
1488 				"Failed to queue dtd when prime status\n");
1489 			goto out;
1490 		}
1491 	} else{	/* no mem */
1492 		retval = -ENOMEM;
1493 		dev_err(&udc->dev->dev,
1494 			"Failed to dma_pool_alloc when prime status\n");
1495 		goto out;
1496 	}
1497 
1498 	list_add_tail(&req->queue, &ep->queue);
1499 
1500 	return 0;
1501 out:
1502 	usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
1503 
1504 	return retval;
1505 }
1506 
mv_udc_testmode(struct mv_udc * udc,u16 index)1507 static void mv_udc_testmode(struct mv_udc *udc, u16 index)
1508 {
1509 	if (index <= TEST_FORCE_EN) {
1510 		udc->test_mode = index;
1511 		if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1512 			ep0_stall(udc);
1513 	} else
1514 		dev_err(&udc->dev->dev,
1515 			"This test mode(%d) is not supported\n", index);
1516 }
1517 
ch9setaddress(struct mv_udc * udc,struct usb_ctrlrequest * setup)1518 static void ch9setaddress(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1519 {
1520 	udc->dev_addr = (u8)setup->wValue;
1521 
1522 	/* update usb state */
1523 	udc->usb_state = USB_STATE_ADDRESS;
1524 
1525 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1526 		ep0_stall(udc);
1527 }
1528 
ch9getstatus(struct mv_udc * udc,u8 ep_num,struct usb_ctrlrequest * setup)1529 static void ch9getstatus(struct mv_udc *udc, u8 ep_num,
1530 	struct usb_ctrlrequest *setup)
1531 {
1532 	u16 status = 0;
1533 	int retval;
1534 
1535 	if ((setup->bRequestType & (USB_DIR_IN | USB_TYPE_MASK))
1536 		!= (USB_DIR_IN | USB_TYPE_STANDARD))
1537 		return;
1538 
1539 	if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
1540 		status = 1 << USB_DEVICE_SELF_POWERED;
1541 		status |= udc->remote_wakeup << USB_DEVICE_REMOTE_WAKEUP;
1542 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1543 			== USB_RECIP_INTERFACE) {
1544 		/* get interface status */
1545 		status = 0;
1546 	} else if ((setup->bRequestType & USB_RECIP_MASK)
1547 			== USB_RECIP_ENDPOINT) {
1548 		u8 ep_num, direction;
1549 
1550 		ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1551 		direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1552 				? EP_DIR_IN : EP_DIR_OUT;
1553 		status = ep_is_stall(udc, ep_num, direction)
1554 				<< USB_ENDPOINT_HALT;
1555 	}
1556 
1557 	retval = udc_prime_status(udc, EP_DIR_IN, status, false);
1558 	if (retval)
1559 		ep0_stall(udc);
1560 	else
1561 		udc->ep0_state = DATA_STATE_XMIT;
1562 }
1563 
ch9clearfeature(struct mv_udc * udc,struct usb_ctrlrequest * setup)1564 static void ch9clearfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1565 {
1566 	u8 ep_num;
1567 	u8 direction;
1568 	struct mv_ep *ep;
1569 
1570 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1571 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1572 		switch (setup->wValue) {
1573 		case USB_DEVICE_REMOTE_WAKEUP:
1574 			udc->remote_wakeup = 0;
1575 			break;
1576 		default:
1577 			goto out;
1578 		}
1579 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1580 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1581 		switch (setup->wValue) {
1582 		case USB_ENDPOINT_HALT:
1583 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1584 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1585 				? EP_DIR_IN : EP_DIR_OUT;
1586 			if (setup->wValue != 0 || setup->wLength != 0
1587 				|| ep_num > udc->max_eps)
1588 				goto out;
1589 			ep = &udc->eps[ep_num * 2 + direction];
1590 			if (ep->wedge == 1)
1591 				break;
1592 			spin_unlock(&udc->lock);
1593 			ep_set_stall(udc, ep_num, direction, 0);
1594 			spin_lock(&udc->lock);
1595 			break;
1596 		default:
1597 			goto out;
1598 		}
1599 	} else
1600 		goto out;
1601 
1602 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1603 		ep0_stall(udc);
1604 out:
1605 	return;
1606 }
1607 
ch9setfeature(struct mv_udc * udc,struct usb_ctrlrequest * setup)1608 static void ch9setfeature(struct mv_udc *udc, struct usb_ctrlrequest *setup)
1609 {
1610 	u8 ep_num;
1611 	u8 direction;
1612 
1613 	if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1614 		== ((USB_TYPE_STANDARD | USB_RECIP_DEVICE))) {
1615 		switch (setup->wValue) {
1616 		case USB_DEVICE_REMOTE_WAKEUP:
1617 			udc->remote_wakeup = 1;
1618 			break;
1619 		case USB_DEVICE_TEST_MODE:
1620 			if (setup->wIndex & 0xFF
1621 				||  udc->gadget.speed != USB_SPEED_HIGH)
1622 				ep0_stall(udc);
1623 
1624 			if (udc->usb_state != USB_STATE_CONFIGURED
1625 				&& udc->usb_state != USB_STATE_ADDRESS
1626 				&& udc->usb_state != USB_STATE_DEFAULT)
1627 				ep0_stall(udc);
1628 
1629 			mv_udc_testmode(udc, (setup->wIndex >> 8));
1630 			goto out;
1631 		default:
1632 			goto out;
1633 		}
1634 	} else if ((setup->bRequestType & (USB_TYPE_MASK | USB_RECIP_MASK))
1635 		== ((USB_TYPE_STANDARD | USB_RECIP_ENDPOINT))) {
1636 		switch (setup->wValue) {
1637 		case USB_ENDPOINT_HALT:
1638 			ep_num = setup->wIndex & USB_ENDPOINT_NUMBER_MASK;
1639 			direction = (setup->wIndex & USB_ENDPOINT_DIR_MASK)
1640 				? EP_DIR_IN : EP_DIR_OUT;
1641 			if (setup->wValue != 0 || setup->wLength != 0
1642 				|| ep_num > udc->max_eps)
1643 				goto out;
1644 			spin_unlock(&udc->lock);
1645 			ep_set_stall(udc, ep_num, direction, 1);
1646 			spin_lock(&udc->lock);
1647 			break;
1648 		default:
1649 			goto out;
1650 		}
1651 	} else
1652 		goto out;
1653 
1654 	if (udc_prime_status(udc, EP_DIR_IN, 0, true))
1655 		ep0_stall(udc);
1656 out:
1657 	return;
1658 }
1659 
handle_setup_packet(struct mv_udc * udc,u8 ep_num,struct usb_ctrlrequest * setup)1660 static void handle_setup_packet(struct mv_udc *udc, u8 ep_num,
1661 	struct usb_ctrlrequest *setup)
1662 	__releases(&ep->udc->lock)
1663 	__acquires(&ep->udc->lock)
1664 {
1665 	bool delegate = false;
1666 
1667 	nuke(&udc->eps[ep_num * 2 + EP_DIR_OUT], -ESHUTDOWN);
1668 
1669 	dev_dbg(&udc->dev->dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1670 			setup->bRequestType, setup->bRequest,
1671 			setup->wValue, setup->wIndex, setup->wLength);
1672 	/* We process some standard setup requests here */
1673 	if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
1674 		switch (setup->bRequest) {
1675 		case USB_REQ_GET_STATUS:
1676 			ch9getstatus(udc, ep_num, setup);
1677 			break;
1678 
1679 		case USB_REQ_SET_ADDRESS:
1680 			ch9setaddress(udc, setup);
1681 			break;
1682 
1683 		case USB_REQ_CLEAR_FEATURE:
1684 			ch9clearfeature(udc, setup);
1685 			break;
1686 
1687 		case USB_REQ_SET_FEATURE:
1688 			ch9setfeature(udc, setup);
1689 			break;
1690 
1691 		default:
1692 			delegate = true;
1693 		}
1694 	} else
1695 		delegate = true;
1696 
1697 	/* delegate USB standard requests to the gadget driver */
1698 	if (delegate == true) {
1699 		/* USB requests handled by gadget */
1700 		if (setup->wLength) {
1701 			/* DATA phase from gadget, STATUS phase from udc */
1702 			udc->ep0_dir = (setup->bRequestType & USB_DIR_IN)
1703 					?  EP_DIR_IN : EP_DIR_OUT;
1704 			spin_unlock(&udc->lock);
1705 			if (udc->driver->setup(&udc->gadget,
1706 				&udc->local_setup_buff) < 0)
1707 				ep0_stall(udc);
1708 			spin_lock(&udc->lock);
1709 			udc->ep0_state = (setup->bRequestType & USB_DIR_IN)
1710 					?  DATA_STATE_XMIT : DATA_STATE_RECV;
1711 		} else {
1712 			/* no DATA phase, IN STATUS phase from gadget */
1713 			udc->ep0_dir = EP_DIR_IN;
1714 			spin_unlock(&udc->lock);
1715 			if (udc->driver->setup(&udc->gadget,
1716 				&udc->local_setup_buff) < 0)
1717 				ep0_stall(udc);
1718 			spin_lock(&udc->lock);
1719 			udc->ep0_state = WAIT_FOR_OUT_STATUS;
1720 		}
1721 	}
1722 }
1723 
1724 /* complete DATA or STATUS phase of ep0 prime status phase if needed */
ep0_req_complete(struct mv_udc * udc,struct mv_ep * ep0,struct mv_req * req)1725 static void ep0_req_complete(struct mv_udc *udc,
1726 	struct mv_ep *ep0, struct mv_req *req)
1727 {
1728 	u32 new_addr;
1729 
1730 	if (udc->usb_state == USB_STATE_ADDRESS) {
1731 		/* set the new address */
1732 		new_addr = (u32)udc->dev_addr;
1733 		writel(new_addr << USB_DEVICE_ADDRESS_BIT_SHIFT,
1734 			&udc->op_regs->deviceaddr);
1735 	}
1736 
1737 	done(ep0, req, 0);
1738 
1739 	switch (udc->ep0_state) {
1740 	case DATA_STATE_XMIT:
1741 		/* receive status phase */
1742 		if (udc_prime_status(udc, EP_DIR_OUT, 0, true))
1743 			ep0_stall(udc);
1744 		break;
1745 	case DATA_STATE_RECV:
1746 		/* send status phase */
1747 		if (udc_prime_status(udc, EP_DIR_IN, 0 , true))
1748 			ep0_stall(udc);
1749 		break;
1750 	case WAIT_FOR_OUT_STATUS:
1751 		udc->ep0_state = WAIT_FOR_SETUP;
1752 		break;
1753 	case WAIT_FOR_SETUP:
1754 		dev_err(&udc->dev->dev, "unexpect ep0 packets\n");
1755 		break;
1756 	default:
1757 		ep0_stall(udc);
1758 		break;
1759 	}
1760 }
1761 
get_setup_data(struct mv_udc * udc,u8 ep_num,u8 * buffer_ptr)1762 static void get_setup_data(struct mv_udc *udc, u8 ep_num, u8 *buffer_ptr)
1763 {
1764 	u32 temp;
1765 	struct mv_dqh *dqh;
1766 
1767 	dqh = &udc->ep_dqh[ep_num * 2 + EP_DIR_OUT];
1768 
1769 	/* Clear bit in ENDPTSETUPSTAT */
1770 	writel((1 << ep_num), &udc->op_regs->epsetupstat);
1771 
1772 	/* while a hazard exists when setup package arrives */
1773 	do {
1774 		/* Set Setup Tripwire */
1775 		temp = readl(&udc->op_regs->usbcmd);
1776 		writel(temp | USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1777 
1778 		/* Copy the setup packet to local buffer */
1779 		memcpy(buffer_ptr, (u8 *) dqh->setup_buffer, 8);
1780 	} while (!(readl(&udc->op_regs->usbcmd) & USBCMD_SETUP_TRIPWIRE_SET));
1781 
1782 	/* Clear Setup Tripwire */
1783 	temp = readl(&udc->op_regs->usbcmd);
1784 	writel(temp & ~USBCMD_SETUP_TRIPWIRE_SET, &udc->op_regs->usbcmd);
1785 }
1786 
irq_process_tr_complete(struct mv_udc * udc)1787 static void irq_process_tr_complete(struct mv_udc *udc)
1788 {
1789 	u32 tmp, bit_pos;
1790 	int i, ep_num = 0, direction = 0;
1791 	struct mv_ep	*curr_ep;
1792 	struct mv_req *curr_req, *temp_req;
1793 	int status;
1794 
1795 	/*
1796 	 * We use separate loops for ENDPTSETUPSTAT and ENDPTCOMPLETE
1797 	 * because the setup packets are to be read ASAP
1798 	 */
1799 
1800 	/* Process all Setup packet received interrupts */
1801 	tmp = readl(&udc->op_regs->epsetupstat);
1802 
1803 	if (tmp) {
1804 		for (i = 0; i < udc->max_eps; i++) {
1805 			if (tmp & (1 << i)) {
1806 				get_setup_data(udc, i,
1807 					(u8 *)(&udc->local_setup_buff));
1808 				handle_setup_packet(udc, i,
1809 					&udc->local_setup_buff);
1810 			}
1811 		}
1812 	}
1813 
1814 	/* Don't clear the endpoint setup status register here.
1815 	 * It is cleared as a setup packet is read out of the buffer
1816 	 */
1817 
1818 	/* Process non-setup transaction complete interrupts */
1819 	tmp = readl(&udc->op_regs->epcomplete);
1820 
1821 	if (!tmp)
1822 		return;
1823 
1824 	writel(tmp, &udc->op_regs->epcomplete);
1825 
1826 	for (i = 0; i < udc->max_eps * 2; i++) {
1827 		ep_num = i >> 1;
1828 		direction = i % 2;
1829 
1830 		bit_pos = 1 << (ep_num + 16 * direction);
1831 
1832 		if (!(bit_pos & tmp))
1833 			continue;
1834 
1835 		if (i == 1)
1836 			curr_ep = &udc->eps[0];
1837 		else
1838 			curr_ep = &udc->eps[i];
1839 		/* process the req queue until an uncomplete request */
1840 		list_for_each_entry_safe(curr_req, temp_req,
1841 			&curr_ep->queue, queue) {
1842 			status = process_ep_req(udc, i, curr_req);
1843 			if (status)
1844 				break;
1845 
1846 			/* write back status to req */
1847 			curr_req->req.status = status;
1848 
1849 			/* ep0 request completion */
1850 			if (ep_num == 0) {
1851 				ep0_req_complete(udc, curr_ep, curr_req);
1852 				break;
1853 			} else {
1854 				done(curr_ep, curr_req, status);
1855 			}
1856 		}
1857 	}
1858 }
1859 
irq_process_reset(struct mv_udc * udc)1860 static void irq_process_reset(struct mv_udc *udc)
1861 {
1862 	u32 tmp;
1863 	unsigned int loops;
1864 
1865 	udc->ep0_dir = EP_DIR_OUT;
1866 	udc->ep0_state = WAIT_FOR_SETUP;
1867 	udc->remote_wakeup = 0;		/* default to 0 on reset */
1868 
1869 	/* The address bits are past bit 25-31. Set the address */
1870 	tmp = readl(&udc->op_regs->deviceaddr);
1871 	tmp &= ~(USB_DEVICE_ADDRESS_MASK);
1872 	writel(tmp, &udc->op_regs->deviceaddr);
1873 
1874 	/* Clear all the setup token semaphores */
1875 	tmp = readl(&udc->op_regs->epsetupstat);
1876 	writel(tmp, &udc->op_regs->epsetupstat);
1877 
1878 	/* Clear all the endpoint complete status bits */
1879 	tmp = readl(&udc->op_regs->epcomplete);
1880 	writel(tmp, &udc->op_regs->epcomplete);
1881 
1882 	/* wait until all endptprime bits cleared */
1883 	loops = LOOPS(PRIME_TIMEOUT);
1884 	while (readl(&udc->op_regs->epprime) & 0xFFFFFFFF) {
1885 		if (loops == 0) {
1886 			dev_err(&udc->dev->dev,
1887 				"Timeout for ENDPTPRIME = 0x%x\n",
1888 				readl(&udc->op_regs->epprime));
1889 			break;
1890 		}
1891 		loops--;
1892 		udelay(LOOPS_USEC);
1893 	}
1894 
1895 	/* Write 1s to the Flush register */
1896 	writel((u32)~0, &udc->op_regs->epflush);
1897 
1898 	if (readl(&udc->op_regs->portsc[0]) & PORTSCX_PORT_RESET) {
1899 		dev_info(&udc->dev->dev, "usb bus reset\n");
1900 		udc->usb_state = USB_STATE_DEFAULT;
1901 		/* reset all the queues, stop all USB activities */
1902 		gadget_reset(udc, udc->driver);
1903 	} else {
1904 		dev_info(&udc->dev->dev, "USB reset portsc 0x%x\n",
1905 			readl(&udc->op_regs->portsc));
1906 
1907 		/*
1908 		 * re-initialize
1909 		 * controller reset
1910 		 */
1911 		udc_reset(udc);
1912 
1913 		/* reset all the queues, stop all USB activities */
1914 		stop_activity(udc, udc->driver);
1915 
1916 		/* reset ep0 dQH and endptctrl */
1917 		ep0_reset(udc);
1918 
1919 		/* enable interrupt and set controller to run state */
1920 		udc_start(udc);
1921 
1922 		udc->usb_state = USB_STATE_ATTACHED;
1923 	}
1924 }
1925 
handle_bus_resume(struct mv_udc * udc)1926 static void handle_bus_resume(struct mv_udc *udc)
1927 {
1928 	udc->usb_state = udc->resume_state;
1929 	udc->resume_state = 0;
1930 
1931 	/* report resume to the driver */
1932 	if (udc->driver) {
1933 		if (udc->driver->resume) {
1934 			spin_unlock(&udc->lock);
1935 			udc->driver->resume(&udc->gadget);
1936 			spin_lock(&udc->lock);
1937 		}
1938 	}
1939 }
1940 
irq_process_suspend(struct mv_udc * udc)1941 static void irq_process_suspend(struct mv_udc *udc)
1942 {
1943 	udc->resume_state = udc->usb_state;
1944 	udc->usb_state = USB_STATE_SUSPENDED;
1945 
1946 	if (udc->driver->suspend) {
1947 		spin_unlock(&udc->lock);
1948 		udc->driver->suspend(&udc->gadget);
1949 		spin_lock(&udc->lock);
1950 	}
1951 }
1952 
irq_process_port_change(struct mv_udc * udc)1953 static void irq_process_port_change(struct mv_udc *udc)
1954 {
1955 	u32 portsc;
1956 
1957 	portsc = readl(&udc->op_regs->portsc[0]);
1958 	if (!(portsc & PORTSCX_PORT_RESET)) {
1959 		/* Get the speed */
1960 		u32 speed = portsc & PORTSCX_PORT_SPEED_MASK;
1961 		switch (speed) {
1962 		case PORTSCX_PORT_SPEED_HIGH:
1963 			udc->gadget.speed = USB_SPEED_HIGH;
1964 			break;
1965 		case PORTSCX_PORT_SPEED_FULL:
1966 			udc->gadget.speed = USB_SPEED_FULL;
1967 			break;
1968 		case PORTSCX_PORT_SPEED_LOW:
1969 			udc->gadget.speed = USB_SPEED_LOW;
1970 			break;
1971 		default:
1972 			udc->gadget.speed = USB_SPEED_UNKNOWN;
1973 			break;
1974 		}
1975 	}
1976 
1977 	if (portsc & PORTSCX_PORT_SUSPEND) {
1978 		udc->resume_state = udc->usb_state;
1979 		udc->usb_state = USB_STATE_SUSPENDED;
1980 		if (udc->driver->suspend) {
1981 			spin_unlock(&udc->lock);
1982 			udc->driver->suspend(&udc->gadget);
1983 			spin_lock(&udc->lock);
1984 		}
1985 	}
1986 
1987 	if (!(portsc & PORTSCX_PORT_SUSPEND)
1988 		&& udc->usb_state == USB_STATE_SUSPENDED) {
1989 		handle_bus_resume(udc);
1990 	}
1991 
1992 	if (!udc->resume_state)
1993 		udc->usb_state = USB_STATE_DEFAULT;
1994 }
1995 
irq_process_error(struct mv_udc * udc)1996 static void irq_process_error(struct mv_udc *udc)
1997 {
1998 	/* Increment the error count */
1999 	udc->errors++;
2000 }
2001 
mv_udc_irq(int irq,void * dev)2002 static irqreturn_t mv_udc_irq(int irq, void *dev)
2003 {
2004 	struct mv_udc *udc = (struct mv_udc *)dev;
2005 	u32 status, intr;
2006 
2007 	/* Disable ISR when stopped bit is set */
2008 	if (udc->stopped)
2009 		return IRQ_NONE;
2010 
2011 	spin_lock(&udc->lock);
2012 
2013 	status = readl(&udc->op_regs->usbsts);
2014 	intr = readl(&udc->op_regs->usbintr);
2015 	status &= intr;
2016 
2017 	if (status == 0) {
2018 		spin_unlock(&udc->lock);
2019 		return IRQ_NONE;
2020 	}
2021 
2022 	/* Clear all the interrupts occurred */
2023 	writel(status, &udc->op_regs->usbsts);
2024 
2025 	if (status & USBSTS_ERR)
2026 		irq_process_error(udc);
2027 
2028 	if (status & USBSTS_RESET)
2029 		irq_process_reset(udc);
2030 
2031 	if (status & USBSTS_PORT_CHANGE)
2032 		irq_process_port_change(udc);
2033 
2034 	if (status & USBSTS_INT)
2035 		irq_process_tr_complete(udc);
2036 
2037 	if (status & USBSTS_SUSPEND)
2038 		irq_process_suspend(udc);
2039 
2040 	spin_unlock(&udc->lock);
2041 
2042 	return IRQ_HANDLED;
2043 }
2044 
mv_udc_vbus_irq(int irq,void * dev)2045 static irqreturn_t mv_udc_vbus_irq(int irq, void *dev)
2046 {
2047 	struct mv_udc *udc = (struct mv_udc *)dev;
2048 
2049 	/* polling VBUS and init phy may cause too much time*/
2050 	if (udc->qwork)
2051 		queue_work(udc->qwork, &udc->vbus_work);
2052 
2053 	return IRQ_HANDLED;
2054 }
2055 
mv_udc_vbus_work(struct work_struct * work)2056 static void mv_udc_vbus_work(struct work_struct *work)
2057 {
2058 	struct mv_udc *udc;
2059 	unsigned int vbus;
2060 
2061 	udc = container_of(work, struct mv_udc, vbus_work);
2062 	if (!udc->pdata->vbus)
2063 		return;
2064 
2065 	vbus = udc->pdata->vbus->poll();
2066 	dev_info(&udc->dev->dev, "vbus is %d\n", vbus);
2067 
2068 	if (vbus == VBUS_HIGH)
2069 		mv_udc_vbus_session(&udc->gadget, 1);
2070 	else if (vbus == VBUS_LOW)
2071 		mv_udc_vbus_session(&udc->gadget, 0);
2072 }
2073 
2074 /* release device structure */
gadget_release(struct device * _dev)2075 static void gadget_release(struct device *_dev)
2076 {
2077 	struct mv_udc *udc;
2078 
2079 	udc = dev_get_drvdata(_dev);
2080 
2081 	complete(udc->done);
2082 }
2083 
mv_udc_remove(struct platform_device * pdev)2084 static int mv_udc_remove(struct platform_device *pdev)
2085 {
2086 	struct mv_udc *udc;
2087 
2088 	udc = platform_get_drvdata(pdev);
2089 
2090 	usb_del_gadget_udc(&udc->gadget);
2091 
2092 	if (udc->qwork) {
2093 		flush_workqueue(udc->qwork);
2094 		destroy_workqueue(udc->qwork);
2095 	}
2096 
2097 	/* free memory allocated in probe */
2098 	dma_pool_destroy(udc->dtd_pool);
2099 
2100 	if (udc->ep_dqh)
2101 		dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2102 			udc->ep_dqh, udc->ep_dqh_dma);
2103 
2104 	mv_udc_disable(udc);
2105 
2106 	/* free dev, wait for the release() finished */
2107 	wait_for_completion(udc->done);
2108 
2109 	return 0;
2110 }
2111 
mv_udc_probe(struct platform_device * pdev)2112 static int mv_udc_probe(struct platform_device *pdev)
2113 {
2114 	struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
2115 	struct mv_udc *udc;
2116 	int retval = 0;
2117 	struct resource *r;
2118 	size_t size;
2119 
2120 	if (pdata == NULL) {
2121 		dev_err(&pdev->dev, "missing platform_data\n");
2122 		return -ENODEV;
2123 	}
2124 
2125 	udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
2126 	if (udc == NULL)
2127 		return -ENOMEM;
2128 
2129 	udc->done = &release_done;
2130 	udc->pdata = dev_get_platdata(&pdev->dev);
2131 	spin_lock_init(&udc->lock);
2132 
2133 	udc->dev = pdev;
2134 
2135 	if (pdata->mode == MV_USB_MODE_OTG) {
2136 		udc->transceiver = devm_usb_get_phy(&pdev->dev,
2137 					USB_PHY_TYPE_USB2);
2138 		if (IS_ERR(udc->transceiver)) {
2139 			retval = PTR_ERR(udc->transceiver);
2140 
2141 			if (retval == -ENXIO)
2142 				return retval;
2143 
2144 			udc->transceiver = NULL;
2145 			return -EPROBE_DEFER;
2146 		}
2147 	}
2148 
2149 	/* udc only have one sysclk. */
2150 	udc->clk = devm_clk_get(&pdev->dev, NULL);
2151 	if (IS_ERR(udc->clk))
2152 		return PTR_ERR(udc->clk);
2153 
2154 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "capregs");
2155 	if (r == NULL) {
2156 		dev_err(&pdev->dev, "no I/O memory resource defined\n");
2157 		return -ENODEV;
2158 	}
2159 
2160 	udc->cap_regs = (struct mv_cap_regs __iomem *)
2161 		devm_ioremap(&pdev->dev, r->start, resource_size(r));
2162 	if (udc->cap_regs == NULL) {
2163 		dev_err(&pdev->dev, "failed to map I/O memory\n");
2164 		return -EBUSY;
2165 	}
2166 
2167 	r = platform_get_resource_byname(udc->dev, IORESOURCE_MEM, "phyregs");
2168 	if (r == NULL) {
2169 		dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
2170 		return -ENODEV;
2171 	}
2172 
2173 	udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2174 	if (udc->phy_regs == NULL) {
2175 		dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2176 		return -EBUSY;
2177 	}
2178 
2179 	/* we will acces controller register, so enable the clk */
2180 	retval = mv_udc_enable_internal(udc);
2181 	if (retval)
2182 		return retval;
2183 
2184 	udc->op_regs =
2185 		(struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
2186 		+ (readl(&udc->cap_regs->caplength_hciversion)
2187 			& CAPLENGTH_MASK));
2188 	udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
2189 
2190 	/*
2191 	 * some platform will use usb to download image, it may not disconnect
2192 	 * usb gadget before loading kernel. So first stop udc here.
2193 	 */
2194 	udc_stop(udc);
2195 	writel(0xFFFFFFFF, &udc->op_regs->usbsts);
2196 
2197 	size = udc->max_eps * sizeof(struct mv_dqh) *2;
2198 	size = (size + DQH_ALIGNMENT - 1) & ~(DQH_ALIGNMENT - 1);
2199 	udc->ep_dqh = dma_alloc_coherent(&pdev->dev, size,
2200 					&udc->ep_dqh_dma, GFP_KERNEL);
2201 
2202 	if (udc->ep_dqh == NULL) {
2203 		dev_err(&pdev->dev, "allocate dQH memory failed\n");
2204 		retval = -ENOMEM;
2205 		goto err_disable_clock;
2206 	}
2207 	udc->ep_dqh_size = size;
2208 
2209 	/* create dTD dma_pool resource */
2210 	udc->dtd_pool = dma_pool_create("mv_dtd",
2211 			&pdev->dev,
2212 			sizeof(struct mv_dtd),
2213 			DTD_ALIGNMENT,
2214 			DMA_BOUNDARY);
2215 
2216 	if (!udc->dtd_pool) {
2217 		retval = -ENOMEM;
2218 		goto err_free_dma;
2219 	}
2220 
2221 	size = udc->max_eps * sizeof(struct mv_ep) *2;
2222 	udc->eps = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
2223 	if (udc->eps == NULL) {
2224 		retval = -ENOMEM;
2225 		goto err_destroy_dma;
2226 	}
2227 
2228 	/* initialize ep0 status request structure */
2229 	udc->status_req = devm_kzalloc(&pdev->dev, sizeof(struct mv_req),
2230 					GFP_KERNEL);
2231 	if (!udc->status_req) {
2232 		retval = -ENOMEM;
2233 		goto err_destroy_dma;
2234 	}
2235 	INIT_LIST_HEAD(&udc->status_req->queue);
2236 
2237 	/* allocate a small amount of memory to get valid address */
2238 	udc->status_req->req.buf = kzalloc(8, GFP_KERNEL);
2239 	udc->status_req->req.dma = DMA_ADDR_INVALID;
2240 
2241 	udc->resume_state = USB_STATE_NOTATTACHED;
2242 	udc->usb_state = USB_STATE_POWERED;
2243 	udc->ep0_dir = EP_DIR_OUT;
2244 	udc->remote_wakeup = 0;
2245 
2246 	r = platform_get_resource(udc->dev, IORESOURCE_IRQ, 0);
2247 	if (r == NULL) {
2248 		dev_err(&pdev->dev, "no IRQ resource defined\n");
2249 		retval = -ENODEV;
2250 		goto err_destroy_dma;
2251 	}
2252 	udc->irq = r->start;
2253 	if (devm_request_irq(&pdev->dev, udc->irq, mv_udc_irq,
2254 		IRQF_SHARED, driver_name, udc)) {
2255 		dev_err(&pdev->dev, "Request irq %d for UDC failed\n",
2256 			udc->irq);
2257 		retval = -ENODEV;
2258 		goto err_destroy_dma;
2259 	}
2260 
2261 	/* initialize gadget structure */
2262 	udc->gadget.ops = &mv_ops;	/* usb_gadget_ops */
2263 	udc->gadget.ep0 = &udc->eps[0].ep;	/* gadget ep0 */
2264 	INIT_LIST_HEAD(&udc->gadget.ep_list);	/* ep_list */
2265 	udc->gadget.speed = USB_SPEED_UNKNOWN;	/* speed */
2266 	udc->gadget.max_speed = USB_SPEED_HIGH;	/* support dual speed */
2267 
2268 	/* the "gadget" abstracts/virtualizes the controller */
2269 	udc->gadget.name = driver_name;		/* gadget name */
2270 
2271 	eps_init(udc);
2272 
2273 	/* VBUS detect: we can disable/enable clock on demand.*/
2274 	if (udc->transceiver)
2275 		udc->clock_gating = 1;
2276 	else if (pdata->vbus) {
2277 		udc->clock_gating = 1;
2278 		retval = devm_request_threaded_irq(&pdev->dev,
2279 				pdata->vbus->irq, NULL,
2280 				mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
2281 		if (retval) {
2282 			dev_info(&pdev->dev,
2283 				"Can not request irq for VBUS, "
2284 				"disable clock gating\n");
2285 			udc->clock_gating = 0;
2286 		}
2287 
2288 		udc->qwork = create_singlethread_workqueue("mv_udc_queue");
2289 		if (!udc->qwork) {
2290 			dev_err(&pdev->dev, "cannot create workqueue\n");
2291 			retval = -ENOMEM;
2292 			goto err_destroy_dma;
2293 		}
2294 
2295 		INIT_WORK(&udc->vbus_work, mv_udc_vbus_work);
2296 	}
2297 
2298 	/*
2299 	 * When clock gating is supported, we can disable clk and phy.
2300 	 * If not, it means that VBUS detection is not supported, we
2301 	 * have to enable vbus active all the time to let controller work.
2302 	 */
2303 	if (udc->clock_gating)
2304 		mv_udc_disable_internal(udc);
2305 	else
2306 		udc->vbus_active = 1;
2307 
2308 	retval = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
2309 			gadget_release);
2310 	if (retval)
2311 		goto err_create_workqueue;
2312 
2313 	platform_set_drvdata(pdev, udc);
2314 	dev_info(&pdev->dev, "successful probe UDC device %s clock gating.\n",
2315 		udc->clock_gating ? "with" : "without");
2316 
2317 	return 0;
2318 
2319 err_create_workqueue:
2320 	destroy_workqueue(udc->qwork);
2321 err_destroy_dma:
2322 	dma_pool_destroy(udc->dtd_pool);
2323 err_free_dma:
2324 	dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
2325 			udc->ep_dqh, udc->ep_dqh_dma);
2326 err_disable_clock:
2327 	mv_udc_disable_internal(udc);
2328 
2329 	return retval;
2330 }
2331 
2332 #ifdef CONFIG_PM
mv_udc_suspend(struct device * dev)2333 static int mv_udc_suspend(struct device *dev)
2334 {
2335 	struct mv_udc *udc;
2336 
2337 	udc = dev_get_drvdata(dev);
2338 
2339 	/* if OTG is enabled, the following will be done in OTG driver*/
2340 	if (udc->transceiver)
2341 		return 0;
2342 
2343 	if (udc->pdata->vbus && udc->pdata->vbus->poll)
2344 		if (udc->pdata->vbus->poll() == VBUS_HIGH) {
2345 			dev_info(&udc->dev->dev, "USB cable is connected!\n");
2346 			return -EAGAIN;
2347 		}
2348 
2349 	/*
2350 	 * only cable is unplugged, udc can suspend.
2351 	 * So do not care about clock_gating == 1.
2352 	 */
2353 	if (!udc->clock_gating) {
2354 		udc_stop(udc);
2355 
2356 		spin_lock_irq(&udc->lock);
2357 		/* stop all usb activities */
2358 		stop_activity(udc, udc->driver);
2359 		spin_unlock_irq(&udc->lock);
2360 
2361 		mv_udc_disable_internal(udc);
2362 	}
2363 
2364 	return 0;
2365 }
2366 
mv_udc_resume(struct device * dev)2367 static int mv_udc_resume(struct device *dev)
2368 {
2369 	struct mv_udc *udc;
2370 	int retval;
2371 
2372 	udc = dev_get_drvdata(dev);
2373 
2374 	/* if OTG is enabled, the following will be done in OTG driver*/
2375 	if (udc->transceiver)
2376 		return 0;
2377 
2378 	if (!udc->clock_gating) {
2379 		retval = mv_udc_enable_internal(udc);
2380 		if (retval)
2381 			return retval;
2382 
2383 		if (udc->driver && udc->softconnect) {
2384 			udc_reset(udc);
2385 			ep0_reset(udc);
2386 			udc_start(udc);
2387 		}
2388 	}
2389 
2390 	return 0;
2391 }
2392 
2393 static const struct dev_pm_ops mv_udc_pm_ops = {
2394 	.suspend	= mv_udc_suspend,
2395 	.resume		= mv_udc_resume,
2396 };
2397 #endif
2398 
mv_udc_shutdown(struct platform_device * pdev)2399 static void mv_udc_shutdown(struct platform_device *pdev)
2400 {
2401 	struct mv_udc *udc;
2402 	u32 mode;
2403 
2404 	udc = platform_get_drvdata(pdev);
2405 	/* reset controller mode to IDLE */
2406 	mv_udc_enable(udc);
2407 	mode = readl(&udc->op_regs->usbmode);
2408 	mode &= ~3;
2409 	writel(mode, &udc->op_regs->usbmode);
2410 	mv_udc_disable(udc);
2411 }
2412 
2413 static struct platform_driver udc_driver = {
2414 	.probe		= mv_udc_probe,
2415 	.remove		= mv_udc_remove,
2416 	.shutdown	= mv_udc_shutdown,
2417 	.driver		= {
2418 		.name	= "mv-udc",
2419 #ifdef CONFIG_PM
2420 		.pm	= &mv_udc_pm_ops,
2421 #endif
2422 	},
2423 };
2424 
2425 module_platform_driver(udc_driver);
2426 MODULE_ALIAS("platform:mv-udc");
2427 MODULE_DESCRIPTION(DRIVER_DESC);
2428 MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
2429 MODULE_LICENSE("GPL");
2430