1 /*
2 * MUSB OTG driver core code
3 *
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 /*
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
37 *
38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread.
42 *
43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver.
47 *
48 *
49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include:
53 *
54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must.
56 *
57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode.
61 *
62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in
66 * each direction.
67 *
68 * RESULT: one device may be perceived as blocking another one.
69 *
70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices.
74 *
75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub.
80 */
81
82 /*
83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable
85 * - platform_device for addressing, irq, and platform_data
86 * - platform_data is mostly for board-specific information
87 * (plus recentrly, SOC or family details)
88 *
89 * Most of the conditional compilation will (someday) vanish.
90 */
91
92 #include <linux/module.h>
93 #include <linux/kernel.h>
94 #include <linux/sched.h>
95 #include <linux/slab.h>
96 #include <linux/list.h>
97 #include <linux/kobject.h>
98 #include <linux/prefetch.h>
99 #include <linux/platform_device.h>
100 #include <linux/io.h>
101 #include <linux/dma-mapping.h>
102 #include <linux/usb.h>
103 #include <linux/usb/of.h>
104
105 #include "musb_core.h"
106 #include "musb_trace.h"
107
108 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
109
110
111 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
112 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
113
114 #define MUSB_VERSION "6.0"
115
116 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
117
118 #define MUSB_DRIVER_NAME "musb-hdrc"
119 const char musb_driver_name[] = MUSB_DRIVER_NAME;
120
121 MODULE_DESCRIPTION(DRIVER_INFO);
122 MODULE_AUTHOR(DRIVER_AUTHOR);
123 MODULE_LICENSE("GPL");
124 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
125
126
127 /*-------------------------------------------------------------------------*/
128
dev_to_musb(struct device * dev)129 static inline struct musb *dev_to_musb(struct device *dev)
130 {
131 return dev_get_drvdata(dev);
132 }
133
musb_get_mode(struct device * dev)134 enum musb_mode musb_get_mode(struct device *dev)
135 {
136 enum usb_dr_mode mode;
137
138 mode = usb_get_dr_mode(dev);
139 switch (mode) {
140 case USB_DR_MODE_HOST:
141 return MUSB_HOST;
142 case USB_DR_MODE_PERIPHERAL:
143 return MUSB_PERIPHERAL;
144 case USB_DR_MODE_OTG:
145 case USB_DR_MODE_UNKNOWN:
146 default:
147 return MUSB_OTG;
148 }
149 }
150 EXPORT_SYMBOL_GPL(musb_get_mode);
151
152 /*-------------------------------------------------------------------------*/
153
154 #ifndef CONFIG_BLACKFIN
musb_ulpi_read(struct usb_phy * phy,u32 reg)155 static int musb_ulpi_read(struct usb_phy *phy, u32 reg)
156 {
157 void __iomem *addr = phy->io_priv;
158 int i = 0;
159 u8 r;
160 u8 power;
161 int ret;
162
163 pm_runtime_get_sync(phy->io_dev);
164
165 /* Make sure the transceiver is not in low power mode */
166 power = musb_readb(addr, MUSB_POWER);
167 power &= ~MUSB_POWER_SUSPENDM;
168 musb_writeb(addr, MUSB_POWER, power);
169
170 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
171 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
172 */
173
174 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
175 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
176 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
177
178 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
179 & MUSB_ULPI_REG_CMPLT)) {
180 i++;
181 if (i == 10000) {
182 ret = -ETIMEDOUT;
183 goto out;
184 }
185
186 }
187 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
188 r &= ~MUSB_ULPI_REG_CMPLT;
189 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
190
191 ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
192
193 out:
194 pm_runtime_put(phy->io_dev);
195
196 return ret;
197 }
198
musb_ulpi_write(struct usb_phy * phy,u32 val,u32 reg)199 static int musb_ulpi_write(struct usb_phy *phy, u32 val, u32 reg)
200 {
201 void __iomem *addr = phy->io_priv;
202 int i = 0;
203 u8 r = 0;
204 u8 power;
205 int ret = 0;
206
207 pm_runtime_get_sync(phy->io_dev);
208
209 /* Make sure the transceiver is not in low power mode */
210 power = musb_readb(addr, MUSB_POWER);
211 power &= ~MUSB_POWER_SUSPENDM;
212 musb_writeb(addr, MUSB_POWER, power);
213
214 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)reg);
215 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)val);
216 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
217
218 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
219 & MUSB_ULPI_REG_CMPLT)) {
220 i++;
221 if (i == 10000) {
222 ret = -ETIMEDOUT;
223 goto out;
224 }
225 }
226
227 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
228 r &= ~MUSB_ULPI_REG_CMPLT;
229 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
230
231 out:
232 pm_runtime_put(phy->io_dev);
233
234 return ret;
235 }
236 #else
237 #define musb_ulpi_read NULL
238 #define musb_ulpi_write NULL
239 #endif
240
241 static struct usb_phy_io_ops musb_ulpi_access = {
242 .read = musb_ulpi_read,
243 .write = musb_ulpi_write,
244 };
245
246 /*-------------------------------------------------------------------------*/
247
musb_default_fifo_offset(u8 epnum)248 static u32 musb_default_fifo_offset(u8 epnum)
249 {
250 return 0x20 + (epnum * 4);
251 }
252
253 /* "flat" mapping: each endpoint has its own i/o address */
musb_flat_ep_select(void __iomem * mbase,u8 epnum)254 static void musb_flat_ep_select(void __iomem *mbase, u8 epnum)
255 {
256 }
257
musb_flat_ep_offset(u8 epnum,u16 offset)258 static u32 musb_flat_ep_offset(u8 epnum, u16 offset)
259 {
260 return 0x100 + (0x10 * epnum) + offset;
261 }
262
263 /* "indexed" mapping: INDEX register controls register bank select */
musb_indexed_ep_select(void __iomem * mbase,u8 epnum)264 static void musb_indexed_ep_select(void __iomem *mbase, u8 epnum)
265 {
266 musb_writeb(mbase, MUSB_INDEX, epnum);
267 }
268
musb_indexed_ep_offset(u8 epnum,u16 offset)269 static u32 musb_indexed_ep_offset(u8 epnum, u16 offset)
270 {
271 return 0x10 + offset;
272 }
273
musb_default_busctl_offset(u8 epnum,u16 offset)274 static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
275 {
276 return 0x80 + (0x08 * epnum) + offset;
277 }
278
musb_default_readb(const void __iomem * addr,unsigned offset)279 static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
280 {
281 u8 data = __raw_readb(addr + offset);
282
283 trace_musb_readb(__builtin_return_address(0), addr, offset, data);
284 return data;
285 }
286
musb_default_writeb(void __iomem * addr,unsigned offset,u8 data)287 static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
288 {
289 trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
290 __raw_writeb(data, addr + offset);
291 }
292
musb_default_readw(const void __iomem * addr,unsigned offset)293 static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
294 {
295 u16 data = __raw_readw(addr + offset);
296
297 trace_musb_readw(__builtin_return_address(0), addr, offset, data);
298 return data;
299 }
300
musb_default_writew(void __iomem * addr,unsigned offset,u16 data)301 static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
302 {
303 trace_musb_writew(__builtin_return_address(0), addr, offset, data);
304 __raw_writew(data, addr + offset);
305 }
306
musb_default_readl(const void __iomem * addr,unsigned offset)307 static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
308 {
309 u32 data = __raw_readl(addr + offset);
310
311 trace_musb_readl(__builtin_return_address(0), addr, offset, data);
312 return data;
313 }
314
musb_default_writel(void __iomem * addr,unsigned offset,u32 data)315 static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
316 {
317 trace_musb_writel(__builtin_return_address(0), addr, offset, data);
318 __raw_writel(data, addr + offset);
319 }
320
321 /*
322 * Load an endpoint's FIFO
323 */
musb_default_write_fifo(struct musb_hw_ep * hw_ep,u16 len,const u8 * src)324 static void musb_default_write_fifo(struct musb_hw_ep *hw_ep, u16 len,
325 const u8 *src)
326 {
327 struct musb *musb = hw_ep->musb;
328 void __iomem *fifo = hw_ep->fifo;
329
330 if (unlikely(len == 0))
331 return;
332
333 prefetch((u8 *)src);
334
335 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
336 'T', hw_ep->epnum, fifo, len, src);
337
338 /* we can't assume unaligned reads work */
339 if (likely((0x01 & (unsigned long) src) == 0)) {
340 u16 index = 0;
341
342 /* best case is 32bit-aligned source address */
343 if ((0x02 & (unsigned long) src) == 0) {
344 if (len >= 4) {
345 iowrite32_rep(fifo, src + index, len >> 2);
346 index += len & ~0x03;
347 }
348 if (len & 0x02) {
349 __raw_writew(*(u16 *)&src[index], fifo);
350 index += 2;
351 }
352 } else {
353 if (len >= 2) {
354 iowrite16_rep(fifo, src + index, len >> 1);
355 index += len & ~0x01;
356 }
357 }
358 if (len & 0x01)
359 __raw_writeb(src[index], fifo);
360 } else {
361 /* byte aligned */
362 iowrite8_rep(fifo, src, len);
363 }
364 }
365
366 /*
367 * Unload an endpoint's FIFO
368 */
musb_default_read_fifo(struct musb_hw_ep * hw_ep,u16 len,u8 * dst)369 static void musb_default_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
370 {
371 struct musb *musb = hw_ep->musb;
372 void __iomem *fifo = hw_ep->fifo;
373
374 if (unlikely(len == 0))
375 return;
376
377 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
378 'R', hw_ep->epnum, fifo, len, dst);
379
380 /* we can't assume unaligned writes work */
381 if (likely((0x01 & (unsigned long) dst) == 0)) {
382 u16 index = 0;
383
384 /* best case is 32bit-aligned destination address */
385 if ((0x02 & (unsigned long) dst) == 0) {
386 if (len >= 4) {
387 ioread32_rep(fifo, dst, len >> 2);
388 index = len & ~0x03;
389 }
390 if (len & 0x02) {
391 *(u16 *)&dst[index] = __raw_readw(fifo);
392 index += 2;
393 }
394 } else {
395 if (len >= 2) {
396 ioread16_rep(fifo, dst, len >> 1);
397 index = len & ~0x01;
398 }
399 }
400 if (len & 0x01)
401 dst[index] = __raw_readb(fifo);
402 } else {
403 /* byte aligned */
404 ioread8_rep(fifo, dst, len);
405 }
406 }
407
408 /*
409 * Old style IO functions
410 */
411 u8 (*musb_readb)(const void __iomem *addr, unsigned offset);
412 EXPORT_SYMBOL_GPL(musb_readb);
413
414 void (*musb_writeb)(void __iomem *addr, unsigned offset, u8 data);
415 EXPORT_SYMBOL_GPL(musb_writeb);
416
417 u16 (*musb_readw)(const void __iomem *addr, unsigned offset);
418 EXPORT_SYMBOL_GPL(musb_readw);
419
420 void (*musb_writew)(void __iomem *addr, unsigned offset, u16 data);
421 EXPORT_SYMBOL_GPL(musb_writew);
422
423 u32 (*musb_readl)(const void __iomem *addr, unsigned offset);
424 EXPORT_SYMBOL_GPL(musb_readl);
425
426 void (*musb_writel)(void __iomem *addr, unsigned offset, u32 data);
427 EXPORT_SYMBOL_GPL(musb_writel);
428
429 #ifndef CONFIG_MUSB_PIO_ONLY
430 struct dma_controller *
431 (*musb_dma_controller_create)(struct musb *musb, void __iomem *base);
432 EXPORT_SYMBOL(musb_dma_controller_create);
433
434 void (*musb_dma_controller_destroy)(struct dma_controller *c);
435 EXPORT_SYMBOL(musb_dma_controller_destroy);
436 #endif
437
438 /*
439 * New style IO functions
440 */
musb_read_fifo(struct musb_hw_ep * hw_ep,u16 len,u8 * dst)441 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
442 {
443 return hw_ep->musb->io.read_fifo(hw_ep, len, dst);
444 }
445
musb_write_fifo(struct musb_hw_ep * hw_ep,u16 len,const u8 * src)446 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
447 {
448 return hw_ep->musb->io.write_fifo(hw_ep, len, src);
449 }
450
451 /*-------------------------------------------------------------------------*/
452
453 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
454 static const u8 musb_test_packet[53] = {
455 /* implicit SYNC then DATA0 to start */
456
457 /* JKJKJKJK x9 */
458 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
459 /* JJKKJJKK x8 */
460 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
461 /* JJJJKKKK x8 */
462 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
463 /* JJJJJJJKKKKKKK x8 */
464 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
465 /* JJJJJJJK x8 */
466 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
467 /* JKKKKKKK x10, JK */
468 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
469
470 /* implicit CRC16 then EOP to end */
471 };
472
musb_load_testpacket(struct musb * musb)473 void musb_load_testpacket(struct musb *musb)
474 {
475 void __iomem *regs = musb->endpoints[0].regs;
476
477 musb_ep_select(musb->mregs, 0);
478 musb_write_fifo(musb->control_ep,
479 sizeof(musb_test_packet), musb_test_packet);
480 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
481 }
482
483 /*-------------------------------------------------------------------------*/
484
485 /*
486 * Handles OTG hnp timeouts, such as b_ase0_brst
487 */
musb_otg_timer_func(unsigned long data)488 static void musb_otg_timer_func(unsigned long data)
489 {
490 struct musb *musb = (struct musb *)data;
491 unsigned long flags;
492
493 spin_lock_irqsave(&musb->lock, flags);
494 switch (musb->xceiv->otg->state) {
495 case OTG_STATE_B_WAIT_ACON:
496 musb_dbg(musb,
497 "HNP: b_wait_acon timeout; back to b_peripheral");
498 musb_g_disconnect(musb);
499 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
500 musb->is_active = 0;
501 break;
502 case OTG_STATE_A_SUSPEND:
503 case OTG_STATE_A_WAIT_BCON:
504 musb_dbg(musb, "HNP: %s timeout",
505 usb_otg_state_string(musb->xceiv->otg->state));
506 musb_platform_set_vbus(musb, 0);
507 musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
508 break;
509 default:
510 musb_dbg(musb, "HNP: Unhandled mode %s",
511 usb_otg_state_string(musb->xceiv->otg->state));
512 }
513 spin_unlock_irqrestore(&musb->lock, flags);
514 }
515
516 /*
517 * Stops the HNP transition. Caller must take care of locking.
518 */
musb_hnp_stop(struct musb * musb)519 void musb_hnp_stop(struct musb *musb)
520 {
521 struct usb_hcd *hcd = musb->hcd;
522 void __iomem *mbase = musb->mregs;
523 u8 reg;
524
525 musb_dbg(musb, "HNP: stop from %s",
526 usb_otg_state_string(musb->xceiv->otg->state));
527
528 switch (musb->xceiv->otg->state) {
529 case OTG_STATE_A_PERIPHERAL:
530 musb_g_disconnect(musb);
531 musb_dbg(musb, "HNP: back to %s",
532 usb_otg_state_string(musb->xceiv->otg->state));
533 break;
534 case OTG_STATE_B_HOST:
535 musb_dbg(musb, "HNP: Disabling HR");
536 if (hcd)
537 hcd->self.is_b_host = 0;
538 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
539 MUSB_DEV_MODE(musb);
540 reg = musb_readb(mbase, MUSB_POWER);
541 reg |= MUSB_POWER_SUSPENDM;
542 musb_writeb(mbase, MUSB_POWER, reg);
543 /* REVISIT: Start SESSION_REQUEST here? */
544 break;
545 default:
546 musb_dbg(musb, "HNP: Stopping in unknown state %s",
547 usb_otg_state_string(musb->xceiv->otg->state));
548 }
549
550 /*
551 * When returning to A state after HNP, avoid hub_port_rebounce(),
552 * which cause occasional OPT A "Did not receive reset after connect"
553 * errors.
554 */
555 musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
556 }
557
558 static void musb_recover_from_babble(struct musb *musb);
559
560 /*
561 * Interrupt Service Routine to record USB "global" interrupts.
562 * Since these do not happen often and signify things of
563 * paramount importance, it seems OK to check them individually;
564 * the order of the tests is specified in the manual
565 *
566 * @param musb instance pointer
567 * @param int_usb register contents
568 * @param devctl
569 * @param power
570 */
571
musb_stage0_irq(struct musb * musb,u8 int_usb,u8 devctl)572 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
573 u8 devctl)
574 {
575 irqreturn_t handled = IRQ_NONE;
576
577 musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
578
579 /* in host mode, the peripheral may issue remote wakeup.
580 * in peripheral mode, the host may resume the link.
581 * spurious RESUME irqs happen too, paired with SUSPEND.
582 */
583 if (int_usb & MUSB_INTR_RESUME) {
584 handled = IRQ_HANDLED;
585 musb_dbg(musb, "RESUME (%s)",
586 usb_otg_state_string(musb->xceiv->otg->state));
587
588 if (devctl & MUSB_DEVCTL_HM) {
589 switch (musb->xceiv->otg->state) {
590 case OTG_STATE_A_SUSPEND:
591 /* remote wakeup? */
592 musb->port1_status |=
593 (USB_PORT_STAT_C_SUSPEND << 16)
594 | MUSB_PORT_STAT_RESUME;
595 musb->rh_timer = jiffies
596 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
597 musb->xceiv->otg->state = OTG_STATE_A_HOST;
598 musb->is_active = 1;
599 musb_host_resume_root_hub(musb);
600 schedule_delayed_work(&musb->finish_resume_work,
601 msecs_to_jiffies(USB_RESUME_TIMEOUT));
602 break;
603 case OTG_STATE_B_WAIT_ACON:
604 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
605 musb->is_active = 1;
606 MUSB_DEV_MODE(musb);
607 break;
608 default:
609 WARNING("bogus %s RESUME (%s)\n",
610 "host",
611 usb_otg_state_string(musb->xceiv->otg->state));
612 }
613 } else {
614 switch (musb->xceiv->otg->state) {
615 case OTG_STATE_A_SUSPEND:
616 /* possibly DISCONNECT is upcoming */
617 musb->xceiv->otg->state = OTG_STATE_A_HOST;
618 musb_host_resume_root_hub(musb);
619 break;
620 case OTG_STATE_B_WAIT_ACON:
621 case OTG_STATE_B_PERIPHERAL:
622 /* disconnect while suspended? we may
623 * not get a disconnect irq...
624 */
625 if ((devctl & MUSB_DEVCTL_VBUS)
626 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
627 ) {
628 musb->int_usb |= MUSB_INTR_DISCONNECT;
629 musb->int_usb &= ~MUSB_INTR_SUSPEND;
630 break;
631 }
632 musb_g_resume(musb);
633 break;
634 case OTG_STATE_B_IDLE:
635 musb->int_usb &= ~MUSB_INTR_SUSPEND;
636 break;
637 default:
638 WARNING("bogus %s RESUME (%s)\n",
639 "peripheral",
640 usb_otg_state_string(musb->xceiv->otg->state));
641 }
642 }
643 }
644
645 /* see manual for the order of the tests */
646 if (int_usb & MUSB_INTR_SESSREQ) {
647 void __iomem *mbase = musb->mregs;
648
649 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
650 && (devctl & MUSB_DEVCTL_BDEVICE)) {
651 musb_dbg(musb, "SessReq while on B state");
652 return IRQ_HANDLED;
653 }
654
655 musb_dbg(musb, "SESSION_REQUEST (%s)",
656 usb_otg_state_string(musb->xceiv->otg->state));
657
658 /* IRQ arrives from ID pin sense or (later, if VBUS power
659 * is removed) SRP. responses are time critical:
660 * - turn on VBUS (with silicon-specific mechanism)
661 * - go through A_WAIT_VRISE
662 * - ... to A_WAIT_BCON.
663 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
664 */
665 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
666 musb->ep0_stage = MUSB_EP0_START;
667 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
668 MUSB_HST_MODE(musb);
669 musb_platform_set_vbus(musb, 1);
670
671 handled = IRQ_HANDLED;
672 }
673
674 if (int_usb & MUSB_INTR_VBUSERROR) {
675 int ignore = 0;
676
677 /* During connection as an A-Device, we may see a short
678 * current spikes causing voltage drop, because of cable
679 * and peripheral capacitance combined with vbus draw.
680 * (So: less common with truly self-powered devices, where
681 * vbus doesn't act like a power supply.)
682 *
683 * Such spikes are short; usually less than ~500 usec, max
684 * of ~2 msec. That is, they're not sustained overcurrent
685 * errors, though they're reported using VBUSERROR irqs.
686 *
687 * Workarounds: (a) hardware: use self powered devices.
688 * (b) software: ignore non-repeated VBUS errors.
689 *
690 * REVISIT: do delays from lots of DEBUG_KERNEL checks
691 * make trouble here, keeping VBUS < 4.4V ?
692 */
693 switch (musb->xceiv->otg->state) {
694 case OTG_STATE_A_HOST:
695 /* recovery is dicey once we've gotten past the
696 * initial stages of enumeration, but if VBUS
697 * stayed ok at the other end of the link, and
698 * another reset is due (at least for high speed,
699 * to redo the chirp etc), it might work OK...
700 */
701 case OTG_STATE_A_WAIT_BCON:
702 case OTG_STATE_A_WAIT_VRISE:
703 if (musb->vbuserr_retry) {
704 void __iomem *mbase = musb->mregs;
705
706 musb->vbuserr_retry--;
707 ignore = 1;
708 devctl |= MUSB_DEVCTL_SESSION;
709 musb_writeb(mbase, MUSB_DEVCTL, devctl);
710 } else {
711 musb->port1_status |=
712 USB_PORT_STAT_OVERCURRENT
713 | (USB_PORT_STAT_C_OVERCURRENT << 16);
714 }
715 break;
716 default:
717 break;
718 }
719
720 dev_printk(ignore ? KERN_DEBUG : KERN_ERR, musb->controller,
721 "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
722 usb_otg_state_string(musb->xceiv->otg->state),
723 devctl,
724 ({ char *s;
725 switch (devctl & MUSB_DEVCTL_VBUS) {
726 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
727 s = "<SessEnd"; break;
728 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
729 s = "<AValid"; break;
730 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
731 s = "<VBusValid"; break;
732 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
733 default:
734 s = "VALID"; break;
735 } s; }),
736 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
737 musb->port1_status);
738
739 /* go through A_WAIT_VFALL then start a new session */
740 if (!ignore)
741 musb_platform_set_vbus(musb, 0);
742 handled = IRQ_HANDLED;
743 }
744
745 if (int_usb & MUSB_INTR_SUSPEND) {
746 musb_dbg(musb, "SUSPEND (%s) devctl %02x",
747 usb_otg_state_string(musb->xceiv->otg->state), devctl);
748 handled = IRQ_HANDLED;
749
750 switch (musb->xceiv->otg->state) {
751 case OTG_STATE_A_PERIPHERAL:
752 /* We also come here if the cable is removed, since
753 * this silicon doesn't report ID-no-longer-grounded.
754 *
755 * We depend on T(a_wait_bcon) to shut us down, and
756 * hope users don't do anything dicey during this
757 * undesired detour through A_WAIT_BCON.
758 */
759 musb_hnp_stop(musb);
760 musb_host_resume_root_hub(musb);
761 musb_root_disconnect(musb);
762 musb_platform_try_idle(musb, jiffies
763 + msecs_to_jiffies(musb->a_wait_bcon
764 ? : OTG_TIME_A_WAIT_BCON));
765
766 break;
767 case OTG_STATE_B_IDLE:
768 if (!musb->is_active)
769 break;
770 case OTG_STATE_B_PERIPHERAL:
771 musb_g_suspend(musb);
772 musb->is_active = musb->g.b_hnp_enable;
773 if (musb->is_active) {
774 musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
775 musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
776 mod_timer(&musb->otg_timer, jiffies
777 + msecs_to_jiffies(
778 OTG_TIME_B_ASE0_BRST));
779 }
780 break;
781 case OTG_STATE_A_WAIT_BCON:
782 if (musb->a_wait_bcon != 0)
783 musb_platform_try_idle(musb, jiffies
784 + msecs_to_jiffies(musb->a_wait_bcon));
785 break;
786 case OTG_STATE_A_HOST:
787 musb->xceiv->otg->state = OTG_STATE_A_SUSPEND;
788 musb->is_active = musb->hcd->self.b_hnp_enable;
789 break;
790 case OTG_STATE_B_HOST:
791 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
792 musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
793 break;
794 default:
795 /* "should not happen" */
796 musb->is_active = 0;
797 break;
798 }
799 }
800
801 if (int_usb & MUSB_INTR_CONNECT) {
802 struct usb_hcd *hcd = musb->hcd;
803
804 handled = IRQ_HANDLED;
805 musb->is_active = 1;
806
807 musb->ep0_stage = MUSB_EP0_START;
808
809 musb->intrtxe = musb->epmask;
810 musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe);
811 musb->intrrxe = musb->epmask & 0xfffe;
812 musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe);
813 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
814 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
815 |USB_PORT_STAT_HIGH_SPEED
816 |USB_PORT_STAT_ENABLE
817 );
818 musb->port1_status |= USB_PORT_STAT_CONNECTION
819 |(USB_PORT_STAT_C_CONNECTION << 16);
820
821 /* high vs full speed is just a guess until after reset */
822 if (devctl & MUSB_DEVCTL_LSDEV)
823 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
824
825 /* indicate new connection to OTG machine */
826 switch (musb->xceiv->otg->state) {
827 case OTG_STATE_B_PERIPHERAL:
828 if (int_usb & MUSB_INTR_SUSPEND) {
829 musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
830 int_usb &= ~MUSB_INTR_SUSPEND;
831 goto b_host;
832 } else
833 musb_dbg(musb, "CONNECT as b_peripheral???");
834 break;
835 case OTG_STATE_B_WAIT_ACON:
836 musb_dbg(musb, "HNP: CONNECT, now b_host");
837 b_host:
838 musb->xceiv->otg->state = OTG_STATE_B_HOST;
839 if (musb->hcd)
840 musb->hcd->self.is_b_host = 1;
841 del_timer(&musb->otg_timer);
842 break;
843 default:
844 if ((devctl & MUSB_DEVCTL_VBUS)
845 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
846 musb->xceiv->otg->state = OTG_STATE_A_HOST;
847 if (hcd)
848 hcd->self.is_b_host = 0;
849 }
850 break;
851 }
852
853 musb_host_poke_root_hub(musb);
854
855 musb_dbg(musb, "CONNECT (%s) devctl %02x",
856 usb_otg_state_string(musb->xceiv->otg->state), devctl);
857 }
858
859 if (int_usb & MUSB_INTR_DISCONNECT) {
860 musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
861 usb_otg_state_string(musb->xceiv->otg->state),
862 MUSB_MODE(musb), devctl);
863 handled = IRQ_HANDLED;
864
865 switch (musb->xceiv->otg->state) {
866 case OTG_STATE_A_HOST:
867 case OTG_STATE_A_SUSPEND:
868 musb_host_resume_root_hub(musb);
869 musb_root_disconnect(musb);
870 if (musb->a_wait_bcon != 0)
871 musb_platform_try_idle(musb, jiffies
872 + msecs_to_jiffies(musb->a_wait_bcon));
873 break;
874 case OTG_STATE_B_HOST:
875 /* REVISIT this behaves for "real disconnect"
876 * cases; make sure the other transitions from
877 * from B_HOST act right too. The B_HOST code
878 * in hnp_stop() is currently not used...
879 */
880 musb_root_disconnect(musb);
881 if (musb->hcd)
882 musb->hcd->self.is_b_host = 0;
883 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
884 MUSB_DEV_MODE(musb);
885 musb_g_disconnect(musb);
886 break;
887 case OTG_STATE_A_PERIPHERAL:
888 musb_hnp_stop(musb);
889 musb_root_disconnect(musb);
890 /* FALLTHROUGH */
891 case OTG_STATE_B_WAIT_ACON:
892 /* FALLTHROUGH */
893 case OTG_STATE_B_PERIPHERAL:
894 case OTG_STATE_B_IDLE:
895 musb_g_disconnect(musb);
896 break;
897 default:
898 WARNING("unhandled DISCONNECT transition (%s)\n",
899 usb_otg_state_string(musb->xceiv->otg->state));
900 break;
901 }
902 }
903
904 /* mentor saves a bit: bus reset and babble share the same irq.
905 * only host sees babble; only peripheral sees bus reset.
906 */
907 if (int_usb & MUSB_INTR_RESET) {
908 handled = IRQ_HANDLED;
909 if (is_host_active(musb)) {
910 /*
911 * When BABBLE happens what we can depends on which
912 * platform MUSB is running, because some platforms
913 * implemented proprietary means for 'recovering' from
914 * Babble conditions. One such platform is AM335x. In
915 * most cases, however, the only thing we can do is
916 * drop the session.
917 */
918 dev_err(musb->controller, "Babble\n");
919 musb_recover_from_babble(musb);
920 } else {
921 musb_dbg(musb, "BUS RESET as %s",
922 usb_otg_state_string(musb->xceiv->otg->state));
923 switch (musb->xceiv->otg->state) {
924 case OTG_STATE_A_SUSPEND:
925 musb_g_reset(musb);
926 /* FALLTHROUGH */
927 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
928 /* never use invalid T(a_wait_bcon) */
929 musb_dbg(musb, "HNP: in %s, %d msec timeout",
930 usb_otg_state_string(musb->xceiv->otg->state),
931 TA_WAIT_BCON(musb));
932 mod_timer(&musb->otg_timer, jiffies
933 + msecs_to_jiffies(TA_WAIT_BCON(musb)));
934 break;
935 case OTG_STATE_A_PERIPHERAL:
936 del_timer(&musb->otg_timer);
937 musb_g_reset(musb);
938 break;
939 case OTG_STATE_B_WAIT_ACON:
940 musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
941 usb_otg_state_string(musb->xceiv->otg->state));
942 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
943 musb_g_reset(musb);
944 break;
945 case OTG_STATE_B_IDLE:
946 musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
947 /* FALLTHROUGH */
948 case OTG_STATE_B_PERIPHERAL:
949 musb_g_reset(musb);
950 break;
951 default:
952 musb_dbg(musb, "Unhandled BUS RESET as %s",
953 usb_otg_state_string(musb->xceiv->otg->state));
954 }
955 }
956 }
957
958 #if 0
959 /* REVISIT ... this would be for multiplexing periodic endpoints, or
960 * supporting transfer phasing to prevent exceeding ISO bandwidth
961 * limits of a given frame or microframe.
962 *
963 * It's not needed for peripheral side, which dedicates endpoints;
964 * though it _might_ use SOF irqs for other purposes.
965 *
966 * And it's not currently needed for host side, which also dedicates
967 * endpoints, relies on TX/RX interval registers, and isn't claimed
968 * to support ISO transfers yet.
969 */
970 if (int_usb & MUSB_INTR_SOF) {
971 void __iomem *mbase = musb->mregs;
972 struct musb_hw_ep *ep;
973 u8 epnum;
974 u16 frame;
975
976 dev_dbg(musb->controller, "START_OF_FRAME\n");
977 handled = IRQ_HANDLED;
978
979 /* start any periodic Tx transfers waiting for current frame */
980 frame = musb_readw(mbase, MUSB_FRAME);
981 ep = musb->endpoints;
982 for (epnum = 1; (epnum < musb->nr_endpoints)
983 && (musb->epmask >= (1 << epnum));
984 epnum++, ep++) {
985 /*
986 * FIXME handle framecounter wraps (12 bits)
987 * eliminate duplicated StartUrb logic
988 */
989 if (ep->dwWaitFrame >= frame) {
990 ep->dwWaitFrame = 0;
991 pr_debug("SOF --> periodic TX%s on %d\n",
992 ep->tx_channel ? " DMA" : "",
993 epnum);
994 if (!ep->tx_channel)
995 musb_h_tx_start(musb, epnum);
996 else
997 cppi_hostdma_start(musb, epnum);
998 }
999 } /* end of for loop */
1000 }
1001 #endif
1002
1003 schedule_delayed_work(&musb->irq_work, 0);
1004
1005 return handled;
1006 }
1007
1008 /*-------------------------------------------------------------------------*/
1009
musb_disable_interrupts(struct musb * musb)1010 static void musb_disable_interrupts(struct musb *musb)
1011 {
1012 void __iomem *mbase = musb->mregs;
1013 u16 temp;
1014
1015 /* disable interrupts */
1016 musb_writeb(mbase, MUSB_INTRUSBE, 0);
1017 musb->intrtxe = 0;
1018 musb_writew(mbase, MUSB_INTRTXE, 0);
1019 musb->intrrxe = 0;
1020 musb_writew(mbase, MUSB_INTRRXE, 0);
1021
1022 /* flush pending interrupts */
1023 temp = musb_readb(mbase, MUSB_INTRUSB);
1024 temp = musb_readw(mbase, MUSB_INTRTX);
1025 temp = musb_readw(mbase, MUSB_INTRRX);
1026 }
1027
musb_enable_interrupts(struct musb * musb)1028 static void musb_enable_interrupts(struct musb *musb)
1029 {
1030 void __iomem *regs = musb->mregs;
1031
1032 /* Set INT enable registers, enable interrupts */
1033 musb->intrtxe = musb->epmask;
1034 musb_writew(regs, MUSB_INTRTXE, musb->intrtxe);
1035 musb->intrrxe = musb->epmask & 0xfffe;
1036 musb_writew(regs, MUSB_INTRRXE, musb->intrrxe);
1037 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
1038
1039 }
1040
1041 /*
1042 * Program the HDRC to start (enable interrupts, dma, etc.).
1043 */
musb_start(struct musb * musb)1044 void musb_start(struct musb *musb)
1045 {
1046 void __iomem *regs = musb->mregs;
1047 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
1048 u8 power;
1049
1050 musb_dbg(musb, "<== devctl %02x", devctl);
1051
1052 musb_enable_interrupts(musb);
1053 musb_writeb(regs, MUSB_TESTMODE, 0);
1054
1055 power = MUSB_POWER_ISOUPDATE;
1056 /*
1057 * treating UNKNOWN as unspecified maximum speed, in which case
1058 * we will default to high-speed.
1059 */
1060 if (musb->config->maximum_speed == USB_SPEED_HIGH ||
1061 musb->config->maximum_speed == USB_SPEED_UNKNOWN)
1062 power |= MUSB_POWER_HSENAB;
1063 musb_writeb(regs, MUSB_POWER, power);
1064
1065 musb->is_active = 0;
1066 devctl = musb_readb(regs, MUSB_DEVCTL);
1067 devctl &= ~MUSB_DEVCTL_SESSION;
1068
1069 /* session started after:
1070 * (a) ID-grounded irq, host mode;
1071 * (b) vbus present/connect IRQ, peripheral mode;
1072 * (c) peripheral initiates, using SRP
1073 */
1074 if (musb->port_mode != MUSB_PORT_MODE_HOST &&
1075 musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
1076 (devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
1077 musb->is_active = 1;
1078 } else {
1079 devctl |= MUSB_DEVCTL_SESSION;
1080 }
1081
1082 musb_platform_enable(musb);
1083 musb_writeb(regs, MUSB_DEVCTL, devctl);
1084 }
1085
1086 /*
1087 * Make the HDRC stop (disable interrupts, etc.);
1088 * reversible by musb_start
1089 * called on gadget driver unregister
1090 * with controller locked, irqs blocked
1091 * acts as a NOP unless some role activated the hardware
1092 */
musb_stop(struct musb * musb)1093 void musb_stop(struct musb *musb)
1094 {
1095 /* stop IRQs, timers, ... */
1096 musb_platform_disable(musb);
1097 musb_disable_interrupts(musb);
1098 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1099
1100 /* FIXME
1101 * - mark host and/or peripheral drivers unusable/inactive
1102 * - disable DMA (and enable it in HdrcStart)
1103 * - make sure we can musb_start() after musb_stop(); with
1104 * OTG mode, gadget driver module rmmod/modprobe cycles that
1105 * - ...
1106 */
1107 musb_platform_try_idle(musb, 0);
1108 }
1109
1110 /*-------------------------------------------------------------------------*/
1111
1112 /*
1113 * The silicon either has hard-wired endpoint configurations, or else
1114 * "dynamic fifo" sizing. The driver has support for both, though at this
1115 * writing only the dynamic sizing is very well tested. Since we switched
1116 * away from compile-time hardware parameters, we can no longer rely on
1117 * dead code elimination to leave only the relevant one in the object file.
1118 *
1119 * We don't currently use dynamic fifo setup capability to do anything
1120 * more than selecting one of a bunch of predefined configurations.
1121 */
1122 static ushort fifo_mode;
1123
1124 /* "modprobe ... fifo_mode=1" etc */
1125 module_param(fifo_mode, ushort, 0);
1126 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1127
1128 /*
1129 * tables defining fifo_mode values. define more if you like.
1130 * for host side, make sure both halves of ep1 are set up.
1131 */
1132
1133 /* mode 0 - fits in 2KB */
1134 static struct musb_fifo_cfg mode_0_cfg[] = {
1135 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1136 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1137 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1138 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1139 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1140 };
1141
1142 /* mode 1 - fits in 4KB */
1143 static struct musb_fifo_cfg mode_1_cfg[] = {
1144 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1145 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1146 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1147 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1148 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1149 };
1150
1151 /* mode 2 - fits in 4KB */
1152 static struct musb_fifo_cfg mode_2_cfg[] = {
1153 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1154 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1155 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1156 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1157 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 960, },
1158 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 1024, },
1159 };
1160
1161 /* mode 3 - fits in 4KB */
1162 static struct musb_fifo_cfg mode_3_cfg[] = {
1163 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1164 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1165 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1166 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1167 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1168 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1169 };
1170
1171 /* mode 4 - fits in 16KB */
1172 static struct musb_fifo_cfg mode_4_cfg[] = {
1173 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1174 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1175 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1176 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1177 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1178 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1179 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1180 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1181 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1182 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1183 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1184 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1185 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1186 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1187 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1188 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1189 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1190 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1191 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
1192 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
1193 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
1194 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
1195 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
1196 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
1197 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1198 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1199 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1200 };
1201
1202 /* mode 5 - fits in 8KB */
1203 static struct musb_fifo_cfg mode_5_cfg[] = {
1204 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1205 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1206 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1207 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1208 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1209 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1210 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1211 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1212 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1213 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1214 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1215 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1216 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1217 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1218 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1219 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1220 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1221 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1222 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1223 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1224 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1225 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1226 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1227 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1228 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1229 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1230 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1231 };
1232
1233 /*
1234 * configure a fifo; for non-shared endpoints, this may be called
1235 * once for a tx fifo and once for an rx fifo.
1236 *
1237 * returns negative errno or offset for next fifo.
1238 */
1239 static int
fifo_setup(struct musb * musb,struct musb_hw_ep * hw_ep,const struct musb_fifo_cfg * cfg,u16 offset)1240 fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1241 const struct musb_fifo_cfg *cfg, u16 offset)
1242 {
1243 void __iomem *mbase = musb->mregs;
1244 int size = 0;
1245 u16 maxpacket = cfg->maxpacket;
1246 u16 c_off = offset >> 3;
1247 u8 c_size;
1248
1249 /* expect hw_ep has already been zero-initialized */
1250
1251 size = ffs(max(maxpacket, (u16) 8)) - 1;
1252 maxpacket = 1 << size;
1253
1254 c_size = size - 3;
1255 if (cfg->mode == BUF_DOUBLE) {
1256 if ((offset + (maxpacket << 1)) >
1257 (1 << (musb->config->ram_bits + 2)))
1258 return -EMSGSIZE;
1259 c_size |= MUSB_FIFOSZ_DPB;
1260 } else {
1261 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1262 return -EMSGSIZE;
1263 }
1264
1265 /* configure the FIFO */
1266 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1267
1268 /* EP0 reserved endpoint for control, bidirectional;
1269 * EP1 reserved for bulk, two unidirectional halves.
1270 */
1271 if (hw_ep->epnum == 1)
1272 musb->bulk_ep = hw_ep;
1273 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1274 switch (cfg->style) {
1275 case FIFO_TX:
1276 musb_write_txfifosz(mbase, c_size);
1277 musb_write_txfifoadd(mbase, c_off);
1278 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1279 hw_ep->max_packet_sz_tx = maxpacket;
1280 break;
1281 case FIFO_RX:
1282 musb_write_rxfifosz(mbase, c_size);
1283 musb_write_rxfifoadd(mbase, c_off);
1284 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1285 hw_ep->max_packet_sz_rx = maxpacket;
1286 break;
1287 case FIFO_RXTX:
1288 musb_write_txfifosz(mbase, c_size);
1289 musb_write_txfifoadd(mbase, c_off);
1290 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1291 hw_ep->max_packet_sz_rx = maxpacket;
1292
1293 musb_write_rxfifosz(mbase, c_size);
1294 musb_write_rxfifoadd(mbase, c_off);
1295 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1296 hw_ep->max_packet_sz_tx = maxpacket;
1297
1298 hw_ep->is_shared_fifo = true;
1299 break;
1300 }
1301
1302 /* NOTE rx and tx endpoint irqs aren't managed separately,
1303 * which happens to be ok
1304 */
1305 musb->epmask |= (1 << hw_ep->epnum);
1306
1307 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1308 }
1309
1310 static struct musb_fifo_cfg ep0_cfg = {
1311 .style = FIFO_RXTX, .maxpacket = 64,
1312 };
1313
ep_config_from_table(struct musb * musb)1314 static int ep_config_from_table(struct musb *musb)
1315 {
1316 const struct musb_fifo_cfg *cfg;
1317 unsigned i, n;
1318 int offset;
1319 struct musb_hw_ep *hw_ep = musb->endpoints;
1320
1321 if (musb->config->fifo_cfg) {
1322 cfg = musb->config->fifo_cfg;
1323 n = musb->config->fifo_cfg_size;
1324 goto done;
1325 }
1326
1327 switch (fifo_mode) {
1328 default:
1329 fifo_mode = 0;
1330 /* FALLTHROUGH */
1331 case 0:
1332 cfg = mode_0_cfg;
1333 n = ARRAY_SIZE(mode_0_cfg);
1334 break;
1335 case 1:
1336 cfg = mode_1_cfg;
1337 n = ARRAY_SIZE(mode_1_cfg);
1338 break;
1339 case 2:
1340 cfg = mode_2_cfg;
1341 n = ARRAY_SIZE(mode_2_cfg);
1342 break;
1343 case 3:
1344 cfg = mode_3_cfg;
1345 n = ARRAY_SIZE(mode_3_cfg);
1346 break;
1347 case 4:
1348 cfg = mode_4_cfg;
1349 n = ARRAY_SIZE(mode_4_cfg);
1350 break;
1351 case 5:
1352 cfg = mode_5_cfg;
1353 n = ARRAY_SIZE(mode_5_cfg);
1354 break;
1355 }
1356
1357 pr_debug("%s: setup fifo_mode %d\n", musb_driver_name, fifo_mode);
1358
1359
1360 done:
1361 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1362 /* assert(offset > 0) */
1363
1364 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1365 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1366 */
1367
1368 for (i = 0; i < n; i++) {
1369 u8 epn = cfg->hw_ep_num;
1370
1371 if (epn >= musb->config->num_eps) {
1372 pr_debug("%s: invalid ep %d\n",
1373 musb_driver_name, epn);
1374 return -EINVAL;
1375 }
1376 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1377 if (offset < 0) {
1378 pr_debug("%s: mem overrun, ep %d\n",
1379 musb_driver_name, epn);
1380 return offset;
1381 }
1382 epn++;
1383 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1384 }
1385
1386 pr_debug("%s: %d/%d max ep, %d/%d memory\n",
1387 musb_driver_name,
1388 n + 1, musb->config->num_eps * 2 - 1,
1389 offset, (1 << (musb->config->ram_bits + 2)));
1390
1391 if (!musb->bulk_ep) {
1392 pr_debug("%s: missing bulk\n", musb_driver_name);
1393 return -EINVAL;
1394 }
1395
1396 return 0;
1397 }
1398
1399
1400 /*
1401 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1402 * @param musb the controller
1403 */
ep_config_from_hw(struct musb * musb)1404 static int ep_config_from_hw(struct musb *musb)
1405 {
1406 u8 epnum = 0;
1407 struct musb_hw_ep *hw_ep;
1408 void __iomem *mbase = musb->mregs;
1409 int ret = 0;
1410
1411 musb_dbg(musb, "<== static silicon ep config");
1412
1413 /* FIXME pick up ep0 maxpacket size */
1414
1415 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1416 musb_ep_select(mbase, epnum);
1417 hw_ep = musb->endpoints + epnum;
1418
1419 ret = musb_read_fifosize(musb, hw_ep, epnum);
1420 if (ret < 0)
1421 break;
1422
1423 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1424
1425 /* pick an RX/TX endpoint for bulk */
1426 if (hw_ep->max_packet_sz_tx < 512
1427 || hw_ep->max_packet_sz_rx < 512)
1428 continue;
1429
1430 /* REVISIT: this algorithm is lazy, we should at least
1431 * try to pick a double buffered endpoint.
1432 */
1433 if (musb->bulk_ep)
1434 continue;
1435 musb->bulk_ep = hw_ep;
1436 }
1437
1438 if (!musb->bulk_ep) {
1439 pr_debug("%s: missing bulk\n", musb_driver_name);
1440 return -EINVAL;
1441 }
1442
1443 return 0;
1444 }
1445
1446 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1447
1448 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1449 * configure endpoints, or take their config from silicon
1450 */
musb_core_init(u16 musb_type,struct musb * musb)1451 static int musb_core_init(u16 musb_type, struct musb *musb)
1452 {
1453 u8 reg;
1454 char *type;
1455 char aInfo[90];
1456 void __iomem *mbase = musb->mregs;
1457 int status = 0;
1458 int i;
1459
1460 /* log core options (read using indexed model) */
1461 reg = musb_read_configdata(mbase);
1462
1463 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1464 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1465 strcat(aInfo, ", dyn FIFOs");
1466 musb->dyn_fifo = true;
1467 }
1468 if (reg & MUSB_CONFIGDATA_MPRXE) {
1469 strcat(aInfo, ", bulk combine");
1470 musb->bulk_combine = true;
1471 }
1472 if (reg & MUSB_CONFIGDATA_MPTXE) {
1473 strcat(aInfo, ", bulk split");
1474 musb->bulk_split = true;
1475 }
1476 if (reg & MUSB_CONFIGDATA_HBRXE) {
1477 strcat(aInfo, ", HB-ISO Rx");
1478 musb->hb_iso_rx = true;
1479 }
1480 if (reg & MUSB_CONFIGDATA_HBTXE) {
1481 strcat(aInfo, ", HB-ISO Tx");
1482 musb->hb_iso_tx = true;
1483 }
1484 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1485 strcat(aInfo, ", SoftConn");
1486
1487 pr_debug("%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo);
1488
1489 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1490 musb->is_multipoint = 1;
1491 type = "M";
1492 } else {
1493 musb->is_multipoint = 0;
1494 type = "";
1495 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1496 pr_err("%s: kernel must blacklist external hubs\n",
1497 musb_driver_name);
1498 #endif
1499 }
1500
1501 /* log release info */
1502 musb->hwvers = musb_read_hwvers(mbase);
1503 pr_debug("%s: %sHDRC RTL version %d.%d%s\n",
1504 musb_driver_name, type, MUSB_HWVERS_MAJOR(musb->hwvers),
1505 MUSB_HWVERS_MINOR(musb->hwvers),
1506 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1507
1508 /* configure ep0 */
1509 musb_configure_ep0(musb);
1510
1511 /* discover endpoint configuration */
1512 musb->nr_endpoints = 1;
1513 musb->epmask = 1;
1514
1515 if (musb->dyn_fifo)
1516 status = ep_config_from_table(musb);
1517 else
1518 status = ep_config_from_hw(musb);
1519
1520 if (status < 0)
1521 return status;
1522
1523 /* finish init, and print endpoint config */
1524 for (i = 0; i < musb->nr_endpoints; i++) {
1525 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1526
1527 hw_ep->fifo = musb->io.fifo_offset(i) + mbase;
1528 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
1529 if (musb->io.quirks & MUSB_IN_TUSB) {
1530 hw_ep->fifo_async = musb->async + 0x400 +
1531 musb->io.fifo_offset(i);
1532 hw_ep->fifo_sync = musb->sync + 0x400 +
1533 musb->io.fifo_offset(i);
1534 hw_ep->fifo_sync_va =
1535 musb->sync_va + 0x400 + musb->io.fifo_offset(i);
1536
1537 if (i == 0)
1538 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1539 else
1540 hw_ep->conf = mbase + 0x400 +
1541 (((i - 1) & 0xf) << 2);
1542 }
1543 #endif
1544
1545 hw_ep->regs = musb->io.ep_offset(i, 0) + mbase;
1546 hw_ep->rx_reinit = 1;
1547 hw_ep->tx_reinit = 1;
1548
1549 if (hw_ep->max_packet_sz_tx) {
1550 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1551 musb_driver_name, i,
1552 hw_ep->is_shared_fifo ? "shared" : "tx",
1553 hw_ep->tx_double_buffered
1554 ? "doublebuffer, " : "",
1555 hw_ep->max_packet_sz_tx);
1556 }
1557 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1558 musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
1559 musb_driver_name, i,
1560 "rx",
1561 hw_ep->rx_double_buffered
1562 ? "doublebuffer, " : "",
1563 hw_ep->max_packet_sz_rx);
1564 }
1565 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1566 musb_dbg(musb, "hw_ep %d not configured", i);
1567 }
1568
1569 return 0;
1570 }
1571
1572 /*-------------------------------------------------------------------------*/
1573
1574 /*
1575 * handle all the irqs defined by the HDRC core. for now we expect: other
1576 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1577 * will be assigned, and the irq will already have been acked.
1578 *
1579 * called in irq context with spinlock held, irqs blocked
1580 */
musb_interrupt(struct musb * musb)1581 irqreturn_t musb_interrupt(struct musb *musb)
1582 {
1583 irqreturn_t retval = IRQ_NONE;
1584 unsigned long status;
1585 unsigned long epnum;
1586 u8 devctl;
1587
1588 if (!musb->int_usb && !musb->int_tx && !musb->int_rx)
1589 return IRQ_NONE;
1590
1591 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1592
1593 trace_musb_isr(musb);
1594
1595 /**
1596 * According to Mentor Graphics' documentation, flowchart on page 98,
1597 * IRQ should be handled as follows:
1598 *
1599 * . Resume IRQ
1600 * . Session Request IRQ
1601 * . VBUS Error IRQ
1602 * . Suspend IRQ
1603 * . Connect IRQ
1604 * . Disconnect IRQ
1605 * . Reset/Babble IRQ
1606 * . SOF IRQ (we're not using this one)
1607 * . Endpoint 0 IRQ
1608 * . TX Endpoints
1609 * . RX Endpoints
1610 *
1611 * We will be following that flowchart in order to avoid any problems
1612 * that might arise with internal Finite State Machine.
1613 */
1614
1615 if (musb->int_usb)
1616 retval |= musb_stage0_irq(musb, musb->int_usb, devctl);
1617
1618 if (musb->int_tx & 1) {
1619 if (is_host_active(musb))
1620 retval |= musb_h_ep0_irq(musb);
1621 else
1622 retval |= musb_g_ep0_irq(musb);
1623
1624 /* we have just handled endpoint 0 IRQ, clear it */
1625 musb->int_tx &= ~BIT(0);
1626 }
1627
1628 status = musb->int_tx;
1629
1630 for_each_set_bit(epnum, &status, 16) {
1631 retval = IRQ_HANDLED;
1632 if (is_host_active(musb))
1633 musb_host_tx(musb, epnum);
1634 else
1635 musb_g_tx(musb, epnum);
1636 }
1637
1638 status = musb->int_rx;
1639
1640 for_each_set_bit(epnum, &status, 16) {
1641 retval = IRQ_HANDLED;
1642 if (is_host_active(musb))
1643 musb_host_rx(musb, epnum);
1644 else
1645 musb_g_rx(musb, epnum);
1646 }
1647
1648 return retval;
1649 }
1650 EXPORT_SYMBOL_GPL(musb_interrupt);
1651
1652 #ifndef CONFIG_MUSB_PIO_ONLY
1653 static bool use_dma = 1;
1654
1655 /* "modprobe ... use_dma=0" etc */
1656 module_param(use_dma, bool, 0644);
1657 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1658
musb_dma_completion(struct musb * musb,u8 epnum,u8 transmit)1659 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1660 {
1661 /* called with controller lock already held */
1662
1663 if (!epnum) {
1664 if (!is_cppi_enabled(musb)) {
1665 /* endpoint 0 */
1666 if (is_host_active(musb))
1667 musb_h_ep0_irq(musb);
1668 else
1669 musb_g_ep0_irq(musb);
1670 }
1671 } else {
1672 /* endpoints 1..15 */
1673 if (transmit) {
1674 if (is_host_active(musb))
1675 musb_host_tx(musb, epnum);
1676 else
1677 musb_g_tx(musb, epnum);
1678 } else {
1679 /* receive */
1680 if (is_host_active(musb))
1681 musb_host_rx(musb, epnum);
1682 else
1683 musb_g_rx(musb, epnum);
1684 }
1685 }
1686 }
1687 EXPORT_SYMBOL_GPL(musb_dma_completion);
1688
1689 #else
1690 #define use_dma 0
1691 #endif
1692
1693 static int (*musb_phy_callback)(enum musb_vbus_id_status status);
1694
1695 /*
1696 * musb_mailbox - optional phy notifier function
1697 * @status phy state change
1698 *
1699 * Optionally gets called from the USB PHY. Note that the USB PHY must be
1700 * disabled at the point the phy_callback is registered or unregistered.
1701 */
musb_mailbox(enum musb_vbus_id_status status)1702 int musb_mailbox(enum musb_vbus_id_status status)
1703 {
1704 if (musb_phy_callback)
1705 return musb_phy_callback(status);
1706
1707 return -ENODEV;
1708 };
1709 EXPORT_SYMBOL_GPL(musb_mailbox);
1710
1711 /*-------------------------------------------------------------------------*/
1712
1713 static ssize_t
musb_mode_show(struct device * dev,struct device_attribute * attr,char * buf)1714 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1715 {
1716 struct musb *musb = dev_to_musb(dev);
1717 unsigned long flags;
1718 int ret = -EINVAL;
1719
1720 spin_lock_irqsave(&musb->lock, flags);
1721 ret = sprintf(buf, "%s\n", usb_otg_state_string(musb->xceiv->otg->state));
1722 spin_unlock_irqrestore(&musb->lock, flags);
1723
1724 return ret;
1725 }
1726
1727 static ssize_t
musb_mode_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1728 musb_mode_store(struct device *dev, struct device_attribute *attr,
1729 const char *buf, size_t n)
1730 {
1731 struct musb *musb = dev_to_musb(dev);
1732 unsigned long flags;
1733 int status;
1734
1735 spin_lock_irqsave(&musb->lock, flags);
1736 if (sysfs_streq(buf, "host"))
1737 status = musb_platform_set_mode(musb, MUSB_HOST);
1738 else if (sysfs_streq(buf, "peripheral"))
1739 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1740 else if (sysfs_streq(buf, "otg"))
1741 status = musb_platform_set_mode(musb, MUSB_OTG);
1742 else
1743 status = -EINVAL;
1744 spin_unlock_irqrestore(&musb->lock, flags);
1745
1746 return (status == 0) ? n : status;
1747 }
1748 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1749
1750 static ssize_t
musb_vbus_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1751 musb_vbus_store(struct device *dev, struct device_attribute *attr,
1752 const char *buf, size_t n)
1753 {
1754 struct musb *musb = dev_to_musb(dev);
1755 unsigned long flags;
1756 unsigned long val;
1757
1758 if (sscanf(buf, "%lu", &val) < 1) {
1759 dev_err(dev, "Invalid VBUS timeout ms value\n");
1760 return -EINVAL;
1761 }
1762
1763 spin_lock_irqsave(&musb->lock, flags);
1764 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1765 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1766 if (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON)
1767 musb->is_active = 0;
1768 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1769 spin_unlock_irqrestore(&musb->lock, flags);
1770
1771 return n;
1772 }
1773
1774 static ssize_t
musb_vbus_show(struct device * dev,struct device_attribute * attr,char * buf)1775 musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1776 {
1777 struct musb *musb = dev_to_musb(dev);
1778 unsigned long flags;
1779 unsigned long val;
1780 int vbus;
1781 u8 devctl;
1782
1783 pm_runtime_get_sync(dev);
1784 spin_lock_irqsave(&musb->lock, flags);
1785 val = musb->a_wait_bcon;
1786 vbus = musb_platform_get_vbus_status(musb);
1787 if (vbus < 0) {
1788 /* Use default MUSB method by means of DEVCTL register */
1789 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1790 if ((devctl & MUSB_DEVCTL_VBUS)
1791 == (3 << MUSB_DEVCTL_VBUS_SHIFT))
1792 vbus = 1;
1793 else
1794 vbus = 0;
1795 }
1796 spin_unlock_irqrestore(&musb->lock, flags);
1797 pm_runtime_put_sync(dev);
1798
1799 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1800 vbus ? "on" : "off", val);
1801 }
1802 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1803
1804 /* Gadget drivers can't know that a host is connected so they might want
1805 * to start SRP, but users can. This allows userspace to trigger SRP.
1806 */
1807 static ssize_t
musb_srp_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)1808 musb_srp_store(struct device *dev, struct device_attribute *attr,
1809 const char *buf, size_t n)
1810 {
1811 struct musb *musb = dev_to_musb(dev);
1812 unsigned short srp;
1813
1814 if (sscanf(buf, "%hu", &srp) != 1
1815 || (srp != 1)) {
1816 dev_err(dev, "SRP: Value must be 1\n");
1817 return -EINVAL;
1818 }
1819
1820 if (srp == 1)
1821 musb_g_wakeup(musb);
1822
1823 return n;
1824 }
1825 static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1826
1827 static struct attribute *musb_attributes[] = {
1828 &dev_attr_mode.attr,
1829 &dev_attr_vbus.attr,
1830 &dev_attr_srp.attr,
1831 NULL
1832 };
1833
1834 static const struct attribute_group musb_attr_group = {
1835 .attrs = musb_attributes,
1836 };
1837
1838 #define MUSB_QUIRK_B_INVALID_VBUS_91 (MUSB_DEVCTL_BDEVICE | \
1839 (2 << MUSB_DEVCTL_VBUS_SHIFT) | \
1840 MUSB_DEVCTL_SESSION)
1841 #define MUSB_QUIRK_B_DISCONNECT_99 (MUSB_DEVCTL_BDEVICE | \
1842 (3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1843 MUSB_DEVCTL_SESSION)
1844 #define MUSB_QUIRK_A_DISCONNECT_19 ((3 << MUSB_DEVCTL_VBUS_SHIFT) | \
1845 MUSB_DEVCTL_SESSION)
1846
1847 /*
1848 * Check the musb devctl session bit to determine if we want to
1849 * allow PM runtime for the device. In general, we want to keep things
1850 * active when the session bit is set except after host disconnect.
1851 *
1852 * Only called from musb_irq_work. If this ever needs to get called
1853 * elsewhere, proper locking must be implemented for musb->session.
1854 */
musb_pm_runtime_check_session(struct musb * musb)1855 static void musb_pm_runtime_check_session(struct musb *musb)
1856 {
1857 u8 devctl, s;
1858 int error;
1859
1860 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1861
1862 /* Handle session status quirks first */
1863 s = MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV |
1864 MUSB_DEVCTL_HR;
1865 switch (devctl & ~s) {
1866 case MUSB_QUIRK_B_DISCONNECT_99:
1867 musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
1868 schedule_delayed_work(&musb->irq_work,
1869 msecs_to_jiffies(1000));
1870 break;
1871 case MUSB_QUIRK_B_INVALID_VBUS_91:
1872 if (musb->quirk_retries && !musb->flush_irq_work) {
1873 musb_dbg(musb,
1874 "Poll devctl on invalid vbus, assume no session");
1875 schedule_delayed_work(&musb->irq_work,
1876 msecs_to_jiffies(1000));
1877 musb->quirk_retries--;
1878 return;
1879 }
1880 /* fall through */
1881 case MUSB_QUIRK_A_DISCONNECT_19:
1882 if (musb->quirk_retries && !musb->flush_irq_work) {
1883 musb_dbg(musb,
1884 "Poll devctl on possible host mode disconnect");
1885 schedule_delayed_work(&musb->irq_work,
1886 msecs_to_jiffies(1000));
1887 musb->quirk_retries--;
1888 return;
1889 }
1890 if (!musb->session)
1891 break;
1892 musb_dbg(musb, "Allow PM on possible host mode disconnect");
1893 pm_runtime_mark_last_busy(musb->controller);
1894 pm_runtime_put_autosuspend(musb->controller);
1895 musb->session = false;
1896 return;
1897 default:
1898 break;
1899 }
1900
1901 /* No need to do anything if session has not changed */
1902 s = devctl & MUSB_DEVCTL_SESSION;
1903 if (s == musb->session)
1904 return;
1905
1906 /* Block PM or allow PM? */
1907 if (s) {
1908 musb_dbg(musb, "Block PM on active session: %02x", devctl);
1909 error = pm_runtime_get_sync(musb->controller);
1910 if (error < 0)
1911 dev_err(musb->controller, "Could not enable: %i\n",
1912 error);
1913 musb->quirk_retries = 3;
1914 } else {
1915 musb_dbg(musb, "Allow PM with no session: %02x", devctl);
1916 pm_runtime_mark_last_busy(musb->controller);
1917 pm_runtime_put_autosuspend(musb->controller);
1918 }
1919
1920 musb->session = s;
1921 }
1922
1923 /* Only used to provide driver mode change events */
musb_irq_work(struct work_struct * data)1924 static void musb_irq_work(struct work_struct *data)
1925 {
1926 struct musb *musb = container_of(data, struct musb, irq_work.work);
1927 int error;
1928
1929 error = pm_runtime_get_sync(musb->controller);
1930 if (error < 0) {
1931 dev_err(musb->controller, "Could not enable: %i\n", error);
1932
1933 return;
1934 }
1935
1936 musb_pm_runtime_check_session(musb);
1937
1938 if (musb->xceiv->otg->state != musb->xceiv_old_state) {
1939 musb->xceiv_old_state = musb->xceiv->otg->state;
1940 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1941 }
1942
1943 pm_runtime_mark_last_busy(musb->controller);
1944 pm_runtime_put_autosuspend(musb->controller);
1945 }
1946
musb_recover_from_babble(struct musb * musb)1947 static void musb_recover_from_babble(struct musb *musb)
1948 {
1949 int ret;
1950 u8 devctl;
1951
1952 musb_disable_interrupts(musb);
1953
1954 /*
1955 * wait at least 320 cycles of 60MHz clock. That's 5.3us, we will give
1956 * it some slack and wait for 10us.
1957 */
1958 udelay(10);
1959
1960 ret = musb_platform_recover(musb);
1961 if (ret) {
1962 musb_enable_interrupts(musb);
1963 return;
1964 }
1965
1966 /* drop session bit */
1967 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1968 devctl &= ~MUSB_DEVCTL_SESSION;
1969 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
1970
1971 /* tell usbcore about it */
1972 musb_root_disconnect(musb);
1973
1974 /*
1975 * When a babble condition occurs, the musb controller
1976 * removes the session bit and the endpoint config is lost.
1977 */
1978 if (musb->dyn_fifo)
1979 ret = ep_config_from_table(musb);
1980 else
1981 ret = ep_config_from_hw(musb);
1982
1983 /* restart session */
1984 if (ret == 0)
1985 musb_start(musb);
1986 }
1987
1988 /* --------------------------------------------------------------------------
1989 * Init support
1990 */
1991
allocate_instance(struct device * dev,const struct musb_hdrc_config * config,void __iomem * mbase)1992 static struct musb *allocate_instance(struct device *dev,
1993 const struct musb_hdrc_config *config, void __iomem *mbase)
1994 {
1995 struct musb *musb;
1996 struct musb_hw_ep *ep;
1997 int epnum;
1998 int ret;
1999
2000 musb = devm_kzalloc(dev, sizeof(*musb), GFP_KERNEL);
2001 if (!musb)
2002 return NULL;
2003
2004 INIT_LIST_HEAD(&musb->control);
2005 INIT_LIST_HEAD(&musb->in_bulk);
2006 INIT_LIST_HEAD(&musb->out_bulk);
2007 INIT_LIST_HEAD(&musb->pending_list);
2008
2009 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
2010 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
2011 musb->mregs = mbase;
2012 musb->ctrl_base = mbase;
2013 musb->nIrq = -ENODEV;
2014 musb->config = config;
2015 BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
2016 for (epnum = 0, ep = musb->endpoints;
2017 epnum < musb->config->num_eps;
2018 epnum++, ep++) {
2019 ep->musb = musb;
2020 ep->epnum = epnum;
2021 }
2022
2023 musb->controller = dev;
2024
2025 ret = musb_host_alloc(musb);
2026 if (ret < 0)
2027 goto err_free;
2028
2029 dev_set_drvdata(dev, musb);
2030
2031 return musb;
2032
2033 err_free:
2034 return NULL;
2035 }
2036
musb_free(struct musb * musb)2037 static void musb_free(struct musb *musb)
2038 {
2039 /* this has multiple entry modes. it handles fault cleanup after
2040 * probe(), where things may be partially set up, as well as rmmod
2041 * cleanup after everything's been de-activated.
2042 */
2043
2044 #ifdef CONFIG_SYSFS
2045 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
2046 #endif
2047
2048 if (musb->nIrq >= 0) {
2049 if (musb->irq_wake)
2050 disable_irq_wake(musb->nIrq);
2051 free_irq(musb->nIrq, musb);
2052 }
2053
2054 musb_host_free(musb);
2055 }
2056
2057 struct musb_pending_work {
2058 int (*callback)(struct musb *musb, void *data);
2059 void *data;
2060 struct list_head node;
2061 };
2062
2063 #ifdef CONFIG_PM
2064 /*
2065 * Called from musb_runtime_resume(), musb_resume(), and
2066 * musb_queue_resume_work(). Callers must take musb->lock.
2067 */
musb_run_resume_work(struct musb * musb)2068 static int musb_run_resume_work(struct musb *musb)
2069 {
2070 struct musb_pending_work *w, *_w;
2071 unsigned long flags;
2072 int error = 0;
2073
2074 spin_lock_irqsave(&musb->list_lock, flags);
2075 list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
2076 if (w->callback) {
2077 error = w->callback(musb, w->data);
2078 if (error < 0) {
2079 dev_err(musb->controller,
2080 "resume callback %p failed: %i\n",
2081 w->callback, error);
2082 }
2083 }
2084 list_del(&w->node);
2085 devm_kfree(musb->controller, w);
2086 }
2087 spin_unlock_irqrestore(&musb->list_lock, flags);
2088
2089 return error;
2090 }
2091 #endif
2092
2093 /*
2094 * Called to run work if device is active or else queue the work to happen
2095 * on resume. Caller must take musb->lock and must hold an RPM reference.
2096 *
2097 * Note that we cowardly refuse queuing work after musb PM runtime
2098 * resume is done calling musb_run_resume_work() and return -EINPROGRESS
2099 * instead.
2100 */
musb_queue_resume_work(struct musb * musb,int (* callback)(struct musb * musb,void * data),void * data)2101 int musb_queue_resume_work(struct musb *musb,
2102 int (*callback)(struct musb *musb, void *data),
2103 void *data)
2104 {
2105 struct musb_pending_work *w;
2106 unsigned long flags;
2107 int error;
2108
2109 if (WARN_ON(!callback))
2110 return -EINVAL;
2111
2112 if (pm_runtime_active(musb->controller))
2113 return callback(musb, data);
2114
2115 w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
2116 if (!w)
2117 return -ENOMEM;
2118
2119 w->callback = callback;
2120 w->data = data;
2121 spin_lock_irqsave(&musb->list_lock, flags);
2122 if (musb->is_runtime_suspended) {
2123 list_add_tail(&w->node, &musb->pending_list);
2124 error = 0;
2125 } else {
2126 dev_err(musb->controller, "could not add resume work %p\n",
2127 callback);
2128 devm_kfree(musb->controller, w);
2129 error = -EINPROGRESS;
2130 }
2131 spin_unlock_irqrestore(&musb->list_lock, flags);
2132
2133 return error;
2134 }
2135 EXPORT_SYMBOL_GPL(musb_queue_resume_work);
2136
musb_deassert_reset(struct work_struct * work)2137 static void musb_deassert_reset(struct work_struct *work)
2138 {
2139 struct musb *musb;
2140 unsigned long flags;
2141
2142 musb = container_of(work, struct musb, deassert_reset_work.work);
2143
2144 spin_lock_irqsave(&musb->lock, flags);
2145
2146 if (musb->port1_status & USB_PORT_STAT_RESET)
2147 musb_port_reset(musb, false);
2148
2149 spin_unlock_irqrestore(&musb->lock, flags);
2150 }
2151
2152 /*
2153 * Perform generic per-controller initialization.
2154 *
2155 * @dev: the controller (already clocked, etc)
2156 * @nIrq: IRQ number
2157 * @ctrl: virtual address of controller registers,
2158 * not yet corrected for platform-specific offsets
2159 */
2160 static int
musb_init_controller(struct device * dev,int nIrq,void __iomem * ctrl)2161 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2162 {
2163 int status;
2164 struct musb *musb;
2165 struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
2166
2167 /* The driver might handle more features than the board; OK.
2168 * Fail when the board needs a feature that's not enabled.
2169 */
2170 if (!plat) {
2171 dev_err(dev, "no platform_data?\n");
2172 status = -ENODEV;
2173 goto fail0;
2174 }
2175
2176 /* allocate */
2177 musb = allocate_instance(dev, plat->config, ctrl);
2178 if (!musb) {
2179 status = -ENOMEM;
2180 goto fail0;
2181 }
2182
2183 spin_lock_init(&musb->lock);
2184 spin_lock_init(&musb->list_lock);
2185 musb->board_set_power = plat->set_power;
2186 musb->min_power = plat->min_power;
2187 musb->ops = plat->platform_ops;
2188 musb->port_mode = plat->mode;
2189
2190 /*
2191 * Initialize the default IO functions. At least omap2430 needs
2192 * these early. We initialize the platform specific IO functions
2193 * later on.
2194 */
2195 musb_readb = musb_default_readb;
2196 musb_writeb = musb_default_writeb;
2197 musb_readw = musb_default_readw;
2198 musb_writew = musb_default_writew;
2199 musb_readl = musb_default_readl;
2200 musb_writel = musb_default_writel;
2201
2202 /* The musb_platform_init() call:
2203 * - adjusts musb->mregs
2204 * - sets the musb->isr
2205 * - may initialize an integrated transceiver
2206 * - initializes musb->xceiv, usually by otg_get_phy()
2207 * - stops powering VBUS
2208 *
2209 * There are various transceiver configurations. Blackfin,
2210 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
2211 * external/discrete ones in various flavors (twl4030 family,
2212 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
2213 */
2214 status = musb_platform_init(musb);
2215 if (status < 0)
2216 goto fail1;
2217
2218 if (!musb->isr) {
2219 status = -ENODEV;
2220 goto fail2;
2221 }
2222
2223 if (musb->ops->quirks)
2224 musb->io.quirks = musb->ops->quirks;
2225
2226 /* Most devices use indexed offset or flat offset */
2227 if (musb->io.quirks & MUSB_INDEXED_EP) {
2228 musb->io.ep_offset = musb_indexed_ep_offset;
2229 musb->io.ep_select = musb_indexed_ep_select;
2230 } else {
2231 musb->io.ep_offset = musb_flat_ep_offset;
2232 musb->io.ep_select = musb_flat_ep_select;
2233 }
2234
2235 if (musb->io.quirks & MUSB_G_NO_SKB_RESERVE)
2236 musb->g.quirk_avoids_skb_reserve = 1;
2237
2238 /* At least tusb6010 has its own offsets */
2239 if (musb->ops->ep_offset)
2240 musb->io.ep_offset = musb->ops->ep_offset;
2241 if (musb->ops->ep_select)
2242 musb->io.ep_select = musb->ops->ep_select;
2243
2244 if (musb->ops->fifo_mode)
2245 fifo_mode = musb->ops->fifo_mode;
2246 else
2247 fifo_mode = 4;
2248
2249 if (musb->ops->fifo_offset)
2250 musb->io.fifo_offset = musb->ops->fifo_offset;
2251 else
2252 musb->io.fifo_offset = musb_default_fifo_offset;
2253
2254 if (musb->ops->busctl_offset)
2255 musb->io.busctl_offset = musb->ops->busctl_offset;
2256 else
2257 musb->io.busctl_offset = musb_default_busctl_offset;
2258
2259 if (musb->ops->readb)
2260 musb_readb = musb->ops->readb;
2261 if (musb->ops->writeb)
2262 musb_writeb = musb->ops->writeb;
2263 if (musb->ops->readw)
2264 musb_readw = musb->ops->readw;
2265 if (musb->ops->writew)
2266 musb_writew = musb->ops->writew;
2267 if (musb->ops->readl)
2268 musb_readl = musb->ops->readl;
2269 if (musb->ops->writel)
2270 musb_writel = musb->ops->writel;
2271
2272 #ifndef CONFIG_MUSB_PIO_ONLY
2273 if (!musb->ops->dma_init || !musb->ops->dma_exit) {
2274 dev_err(dev, "DMA controller not set\n");
2275 status = -ENODEV;
2276 goto fail2;
2277 }
2278 musb_dma_controller_create = musb->ops->dma_init;
2279 musb_dma_controller_destroy = musb->ops->dma_exit;
2280 #endif
2281
2282 if (musb->ops->read_fifo)
2283 musb->io.read_fifo = musb->ops->read_fifo;
2284 else
2285 musb->io.read_fifo = musb_default_read_fifo;
2286
2287 if (musb->ops->write_fifo)
2288 musb->io.write_fifo = musb->ops->write_fifo;
2289 else
2290 musb->io.write_fifo = musb_default_write_fifo;
2291
2292 if (!musb->xceiv->io_ops) {
2293 musb->xceiv->io_dev = musb->controller;
2294 musb->xceiv->io_priv = musb->mregs;
2295 musb->xceiv->io_ops = &musb_ulpi_access;
2296 }
2297
2298 if (musb->ops->phy_callback)
2299 musb_phy_callback = musb->ops->phy_callback;
2300
2301 /*
2302 * We need musb_read/write functions initialized for PM.
2303 * Note that at least 2430 glue needs autosuspend delay
2304 * somewhere above 300 ms for the hardware to idle properly
2305 * after disconnecting the cable in host mode. Let's use
2306 * 500 ms for some margin.
2307 */
2308 pm_runtime_use_autosuspend(musb->controller);
2309 pm_runtime_set_autosuspend_delay(musb->controller, 500);
2310 pm_runtime_enable(musb->controller);
2311 pm_runtime_get_sync(musb->controller);
2312
2313 status = usb_phy_init(musb->xceiv);
2314 if (status < 0)
2315 goto err_usb_phy_init;
2316
2317 if (use_dma && dev->dma_mask) {
2318 musb->dma_controller =
2319 musb_dma_controller_create(musb, musb->mregs);
2320 if (IS_ERR(musb->dma_controller)) {
2321 status = PTR_ERR(musb->dma_controller);
2322 goto fail2_5;
2323 }
2324 }
2325
2326 /* be sure interrupts are disabled before connecting ISR */
2327 musb_platform_disable(musb);
2328 musb_disable_interrupts(musb);
2329 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2330
2331 /* MUSB_POWER_SOFTCONN might be already set, JZ4740 does this. */
2332 musb_writeb(musb->mregs, MUSB_POWER, 0);
2333
2334 /* Init IRQ workqueue before request_irq */
2335 INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
2336 INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
2337 INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
2338
2339 /* setup musb parts of the core (especially endpoints) */
2340 status = musb_core_init(plat->config->multipoint
2341 ? MUSB_CONTROLLER_MHDRC
2342 : MUSB_CONTROLLER_HDRC, musb);
2343 if (status < 0)
2344 goto fail3;
2345
2346 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
2347
2348 /* attach to the IRQ */
2349 if (request_irq(nIrq, musb->isr, IRQF_SHARED, dev_name(dev), musb)) {
2350 dev_err(dev, "request_irq %d failed!\n", nIrq);
2351 status = -ENODEV;
2352 goto fail3;
2353 }
2354 musb->nIrq = nIrq;
2355 /* FIXME this handles wakeup irqs wrong */
2356 if (enable_irq_wake(nIrq) == 0) {
2357 musb->irq_wake = 1;
2358 device_init_wakeup(dev, 1);
2359 } else {
2360 musb->irq_wake = 0;
2361 }
2362
2363 /* program PHY to use external vBus if required */
2364 if (plat->extvbus) {
2365 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2366 busctl |= MUSB_ULPI_USE_EXTVBUS;
2367 musb_write_ulpi_buscontrol(musb->mregs, busctl);
2368 }
2369
2370 if (musb->xceiv->otg->default_a) {
2371 MUSB_HST_MODE(musb);
2372 musb->xceiv->otg->state = OTG_STATE_A_IDLE;
2373 } else {
2374 MUSB_DEV_MODE(musb);
2375 musb->xceiv->otg->state = OTG_STATE_B_IDLE;
2376 }
2377
2378 switch (musb->port_mode) {
2379 case MUSB_PORT_MODE_HOST:
2380 status = musb_host_setup(musb, plat->power);
2381 if (status < 0)
2382 goto fail3;
2383 status = musb_platform_set_mode(musb, MUSB_HOST);
2384 break;
2385 case MUSB_PORT_MODE_GADGET:
2386 status = musb_gadget_setup(musb);
2387 if (status < 0)
2388 goto fail3;
2389 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
2390 break;
2391 case MUSB_PORT_MODE_DUAL_ROLE:
2392 status = musb_host_setup(musb, plat->power);
2393 if (status < 0)
2394 goto fail3;
2395 status = musb_gadget_setup(musb);
2396 if (status) {
2397 musb_host_cleanup(musb);
2398 goto fail3;
2399 }
2400 status = musb_platform_set_mode(musb, MUSB_OTG);
2401 break;
2402 default:
2403 dev_err(dev, "unsupported port mode %d\n", musb->port_mode);
2404 break;
2405 }
2406
2407 if (status < 0)
2408 goto fail3;
2409
2410 status = musb_init_debugfs(musb);
2411 if (status < 0)
2412 goto fail4;
2413
2414 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2415 if (status)
2416 goto fail5;
2417
2418 musb->is_initialized = 1;
2419 pm_runtime_mark_last_busy(musb->controller);
2420 pm_runtime_put_autosuspend(musb->controller);
2421
2422 return 0;
2423
2424 fail5:
2425 musb_exit_debugfs(musb);
2426
2427 fail4:
2428 musb_gadget_cleanup(musb);
2429 musb_host_cleanup(musb);
2430
2431 fail3:
2432 cancel_delayed_work_sync(&musb->irq_work);
2433 cancel_delayed_work_sync(&musb->finish_resume_work);
2434 cancel_delayed_work_sync(&musb->deassert_reset_work);
2435 if (musb->dma_controller)
2436 musb_dma_controller_destroy(musb->dma_controller);
2437
2438 fail2_5:
2439 usb_phy_shutdown(musb->xceiv);
2440
2441 err_usb_phy_init:
2442 pm_runtime_dont_use_autosuspend(musb->controller);
2443 pm_runtime_put_sync(musb->controller);
2444 pm_runtime_disable(musb->controller);
2445
2446 fail2:
2447 if (musb->irq_wake)
2448 device_init_wakeup(dev, 0);
2449 musb_platform_exit(musb);
2450
2451 fail1:
2452 if (status != -EPROBE_DEFER)
2453 dev_err(musb->controller,
2454 "%s failed with status %d\n", __func__, status);
2455
2456 musb_free(musb);
2457
2458 fail0:
2459
2460 return status;
2461
2462 }
2463
2464 /*-------------------------------------------------------------------------*/
2465
2466 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2467 * bridge to a platform device; this driver then suffices.
2468 */
musb_probe(struct platform_device * pdev)2469 static int musb_probe(struct platform_device *pdev)
2470 {
2471 struct device *dev = &pdev->dev;
2472 int irq = platform_get_irq_byname(pdev, "mc");
2473 struct resource *iomem;
2474 void __iomem *base;
2475
2476 if (irq <= 0)
2477 return -ENODEV;
2478
2479 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2480 base = devm_ioremap_resource(dev, iomem);
2481 if (IS_ERR(base))
2482 return PTR_ERR(base);
2483
2484 return musb_init_controller(dev, irq, base);
2485 }
2486
musb_remove(struct platform_device * pdev)2487 static int musb_remove(struct platform_device *pdev)
2488 {
2489 struct device *dev = &pdev->dev;
2490 struct musb *musb = dev_to_musb(dev);
2491 unsigned long flags;
2492
2493 /* this gets called on rmmod.
2494 * - Host mode: host may still be active
2495 * - Peripheral mode: peripheral is deactivated (or never-activated)
2496 * - OTG mode: both roles are deactivated (or never-activated)
2497 */
2498 musb_exit_debugfs(musb);
2499
2500 cancel_delayed_work_sync(&musb->irq_work);
2501 cancel_delayed_work_sync(&musb->finish_resume_work);
2502 cancel_delayed_work_sync(&musb->deassert_reset_work);
2503 pm_runtime_get_sync(musb->controller);
2504 musb_host_cleanup(musb);
2505 musb_gadget_cleanup(musb);
2506
2507 musb_platform_disable(musb);
2508 spin_lock_irqsave(&musb->lock, flags);
2509 musb_disable_interrupts(musb);
2510 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2511 spin_unlock_irqrestore(&musb->lock, flags);
2512 musb_platform_exit(musb);
2513
2514 pm_runtime_dont_use_autosuspend(musb->controller);
2515 pm_runtime_put_sync(musb->controller);
2516 pm_runtime_disable(musb->controller);
2517 musb_phy_callback = NULL;
2518 if (musb->dma_controller)
2519 musb_dma_controller_destroy(musb->dma_controller);
2520 usb_phy_shutdown(musb->xceiv);
2521 musb_free(musb);
2522 device_init_wakeup(dev, 0);
2523 return 0;
2524 }
2525
2526 #ifdef CONFIG_PM
2527
musb_save_context(struct musb * musb)2528 static void musb_save_context(struct musb *musb)
2529 {
2530 int i;
2531 void __iomem *musb_base = musb->mregs;
2532 void __iomem *epio;
2533
2534 musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2535 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2536 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2537 musb->context.power = musb_readb(musb_base, MUSB_POWER);
2538 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2539 musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2540 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2541
2542 for (i = 0; i < musb->config->num_eps; ++i) {
2543 struct musb_hw_ep *hw_ep;
2544
2545 hw_ep = &musb->endpoints[i];
2546 if (!hw_ep)
2547 continue;
2548
2549 epio = hw_ep->regs;
2550 if (!epio)
2551 continue;
2552
2553 musb_writeb(musb_base, MUSB_INDEX, i);
2554 musb->context.index_regs[i].txmaxp =
2555 musb_readw(epio, MUSB_TXMAXP);
2556 musb->context.index_regs[i].txcsr =
2557 musb_readw(epio, MUSB_TXCSR);
2558 musb->context.index_regs[i].rxmaxp =
2559 musb_readw(epio, MUSB_RXMAXP);
2560 musb->context.index_regs[i].rxcsr =
2561 musb_readw(epio, MUSB_RXCSR);
2562
2563 if (musb->dyn_fifo) {
2564 musb->context.index_regs[i].txfifoadd =
2565 musb_read_txfifoadd(musb_base);
2566 musb->context.index_regs[i].rxfifoadd =
2567 musb_read_rxfifoadd(musb_base);
2568 musb->context.index_regs[i].txfifosz =
2569 musb_read_txfifosz(musb_base);
2570 musb->context.index_regs[i].rxfifosz =
2571 musb_read_rxfifosz(musb_base);
2572 }
2573
2574 musb->context.index_regs[i].txtype =
2575 musb_readb(epio, MUSB_TXTYPE);
2576 musb->context.index_regs[i].txinterval =
2577 musb_readb(epio, MUSB_TXINTERVAL);
2578 musb->context.index_regs[i].rxtype =
2579 musb_readb(epio, MUSB_RXTYPE);
2580 musb->context.index_regs[i].rxinterval =
2581 musb_readb(epio, MUSB_RXINTERVAL);
2582
2583 musb->context.index_regs[i].txfunaddr =
2584 musb_read_txfunaddr(musb, i);
2585 musb->context.index_regs[i].txhubaddr =
2586 musb_read_txhubaddr(musb, i);
2587 musb->context.index_regs[i].txhubport =
2588 musb_read_txhubport(musb, i);
2589
2590 musb->context.index_regs[i].rxfunaddr =
2591 musb_read_rxfunaddr(musb, i);
2592 musb->context.index_regs[i].rxhubaddr =
2593 musb_read_rxhubaddr(musb, i);
2594 musb->context.index_regs[i].rxhubport =
2595 musb_read_rxhubport(musb, i);
2596 }
2597 }
2598
musb_restore_context(struct musb * musb)2599 static void musb_restore_context(struct musb *musb)
2600 {
2601 int i;
2602 void __iomem *musb_base = musb->mregs;
2603 void __iomem *epio;
2604 u8 power;
2605
2606 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2607 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2608 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2609
2610 /* Don't affect SUSPENDM/RESUME bits in POWER reg */
2611 power = musb_readb(musb_base, MUSB_POWER);
2612 power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
2613 musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
2614 power |= musb->context.power;
2615 musb_writeb(musb_base, MUSB_POWER, power);
2616
2617 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
2618 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
2619 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2620 if (musb->context.devctl & MUSB_DEVCTL_SESSION)
2621 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2622
2623 for (i = 0; i < musb->config->num_eps; ++i) {
2624 struct musb_hw_ep *hw_ep;
2625
2626 hw_ep = &musb->endpoints[i];
2627 if (!hw_ep)
2628 continue;
2629
2630 epio = hw_ep->regs;
2631 if (!epio)
2632 continue;
2633
2634 musb_writeb(musb_base, MUSB_INDEX, i);
2635 musb_writew(epio, MUSB_TXMAXP,
2636 musb->context.index_regs[i].txmaxp);
2637 musb_writew(epio, MUSB_TXCSR,
2638 musb->context.index_regs[i].txcsr);
2639 musb_writew(epio, MUSB_RXMAXP,
2640 musb->context.index_regs[i].rxmaxp);
2641 musb_writew(epio, MUSB_RXCSR,
2642 musb->context.index_regs[i].rxcsr);
2643
2644 if (musb->dyn_fifo) {
2645 musb_write_txfifosz(musb_base,
2646 musb->context.index_regs[i].txfifosz);
2647 musb_write_rxfifosz(musb_base,
2648 musb->context.index_regs[i].rxfifosz);
2649 musb_write_txfifoadd(musb_base,
2650 musb->context.index_regs[i].txfifoadd);
2651 musb_write_rxfifoadd(musb_base,
2652 musb->context.index_regs[i].rxfifoadd);
2653 }
2654
2655 musb_writeb(epio, MUSB_TXTYPE,
2656 musb->context.index_regs[i].txtype);
2657 musb_writeb(epio, MUSB_TXINTERVAL,
2658 musb->context.index_regs[i].txinterval);
2659 musb_writeb(epio, MUSB_RXTYPE,
2660 musb->context.index_regs[i].rxtype);
2661 musb_writeb(epio, MUSB_RXINTERVAL,
2662
2663 musb->context.index_regs[i].rxinterval);
2664 musb_write_txfunaddr(musb, i,
2665 musb->context.index_regs[i].txfunaddr);
2666 musb_write_txhubaddr(musb, i,
2667 musb->context.index_regs[i].txhubaddr);
2668 musb_write_txhubport(musb, i,
2669 musb->context.index_regs[i].txhubport);
2670
2671 musb_write_rxfunaddr(musb, i,
2672 musb->context.index_regs[i].rxfunaddr);
2673 musb_write_rxhubaddr(musb, i,
2674 musb->context.index_regs[i].rxhubaddr);
2675 musb_write_rxhubport(musb, i,
2676 musb->context.index_regs[i].rxhubport);
2677 }
2678 musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2679 }
2680
musb_suspend(struct device * dev)2681 static int musb_suspend(struct device *dev)
2682 {
2683 struct musb *musb = dev_to_musb(dev);
2684 unsigned long flags;
2685 int ret;
2686
2687 ret = pm_runtime_get_sync(dev);
2688 if (ret < 0) {
2689 pm_runtime_put_noidle(dev);
2690 return ret;
2691 }
2692
2693 musb_platform_disable(musb);
2694 musb_disable_interrupts(musb);
2695
2696 musb->flush_irq_work = true;
2697 while (flush_delayed_work(&musb->irq_work))
2698 ;
2699 musb->flush_irq_work = false;
2700
2701 if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
2702 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2703
2704 WARN_ON(!list_empty(&musb->pending_list));
2705
2706 spin_lock_irqsave(&musb->lock, flags);
2707
2708 if (is_peripheral_active(musb)) {
2709 /* FIXME force disconnect unless we know USB will wake
2710 * the system up quickly enough to respond ...
2711 */
2712 } else if (is_host_active(musb)) {
2713 /* we know all the children are suspended; sometimes
2714 * they will even be wakeup-enabled.
2715 */
2716 }
2717
2718 musb_save_context(musb);
2719
2720 spin_unlock_irqrestore(&musb->lock, flags);
2721 return 0;
2722 }
2723
musb_resume(struct device * dev)2724 static int musb_resume(struct device *dev)
2725 {
2726 struct musb *musb = dev_to_musb(dev);
2727 unsigned long flags;
2728 int error;
2729 u8 devctl;
2730 u8 mask;
2731
2732 /*
2733 * For static cmos like DaVinci, register values were preserved
2734 * unless for some reason the whole soc powered down or the USB
2735 * module got reset through the PSC (vs just being disabled).
2736 *
2737 * For the DSPS glue layer though, a full register restore has to
2738 * be done. As it shouldn't harm other platforms, we do it
2739 * unconditionally.
2740 */
2741
2742 musb_restore_context(musb);
2743
2744 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2745 mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
2746 if ((devctl & mask) != (musb->context.devctl & mask))
2747 musb->port1_status = 0;
2748
2749 musb_enable_interrupts(musb);
2750 musb_platform_enable(musb);
2751
2752 spin_lock_irqsave(&musb->lock, flags);
2753 error = musb_run_resume_work(musb);
2754 if (error)
2755 dev_err(musb->controller, "resume work failed with %i\n",
2756 error);
2757 spin_unlock_irqrestore(&musb->lock, flags);
2758
2759 pm_runtime_mark_last_busy(dev);
2760 pm_runtime_put_autosuspend(dev);
2761
2762 return 0;
2763 }
2764
musb_runtime_suspend(struct device * dev)2765 static int musb_runtime_suspend(struct device *dev)
2766 {
2767 struct musb *musb = dev_to_musb(dev);
2768
2769 musb_save_context(musb);
2770 musb->is_runtime_suspended = 1;
2771
2772 return 0;
2773 }
2774
musb_runtime_resume(struct device * dev)2775 static int musb_runtime_resume(struct device *dev)
2776 {
2777 struct musb *musb = dev_to_musb(dev);
2778 unsigned long flags;
2779 int error;
2780
2781 /*
2782 * When pm_runtime_get_sync called for the first time in driver
2783 * init, some of the structure is still not initialized which is
2784 * used in restore function. But clock needs to be
2785 * enabled before any register access, so
2786 * pm_runtime_get_sync has to be called.
2787 * Also context restore without save does not make
2788 * any sense
2789 */
2790 if (!musb->is_initialized)
2791 return 0;
2792
2793 musb_restore_context(musb);
2794
2795 spin_lock_irqsave(&musb->lock, flags);
2796 error = musb_run_resume_work(musb);
2797 if (error)
2798 dev_err(musb->controller, "resume work failed with %i\n",
2799 error);
2800 musb->is_runtime_suspended = 0;
2801 spin_unlock_irqrestore(&musb->lock, flags);
2802
2803 return 0;
2804 }
2805
2806 static const struct dev_pm_ops musb_dev_pm_ops = {
2807 .suspend = musb_suspend,
2808 .resume = musb_resume,
2809 .runtime_suspend = musb_runtime_suspend,
2810 .runtime_resume = musb_runtime_resume,
2811 };
2812
2813 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2814 #else
2815 #define MUSB_DEV_PM_OPS NULL
2816 #endif
2817
2818 static struct platform_driver musb_driver = {
2819 .driver = {
2820 .name = (char *)musb_driver_name,
2821 .bus = &platform_bus_type,
2822 .pm = MUSB_DEV_PM_OPS,
2823 },
2824 .probe = musb_probe,
2825 .remove = musb_remove,
2826 };
2827
2828 module_platform_driver(musb_driver);
2829