1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Intel PXA25x and IXP4xx on-chip full speed USB device controllers
4 *
5 * Copyright (C) 2002 Intrinsyc, Inc. (Frank Becker)
6 * Copyright (C) 2003 Robert Schwebel, Pengutronix
7 * Copyright (C) 2003 Benedikt Spranger, Pengutronix
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003 Joshua Wise
10 */
11
12 /* #define VERBOSE_DEBUG */
13
14 #include <linux/device.h>
15 #include <linux/gpio.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/ioport.h>
19 #include <linux/types.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/slab.h>
24 #include <linux/timer.h>
25 #include <linux/list.h>
26 #include <linux/interrupt.h>
27 #include <linux/mm.h>
28 #include <linux/platform_data/pxa2xx_udc.h>
29 #include <linux/platform_device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/irq.h>
32 #include <linux/clk.h>
33 #include <linux/seq_file.h>
34 #include <linux/debugfs.h>
35 #include <linux/io.h>
36 #include <linux/prefetch.h>
37
38 #include <asm/byteorder.h>
39 #include <asm/dma.h>
40 #include <asm/mach-types.h>
41 #include <asm/unaligned.h>
42
43 #include <linux/usb/ch9.h>
44 #include <linux/usb/gadget.h>
45 #include <linux/usb/otg.h>
46
47 #ifdef CONFIG_ARCH_LUBBOCK
48 #include <mach/lubbock.h>
49 #endif
50
51 #define UDCCR 0x0000 /* UDC Control Register */
52 #define UDC_RES1 0x0004 /* UDC Undocumented - Reserved1 */
53 #define UDC_RES2 0x0008 /* UDC Undocumented - Reserved2 */
54 #define UDC_RES3 0x000C /* UDC Undocumented - Reserved3 */
55 #define UDCCS0 0x0010 /* UDC Endpoint 0 Control/Status Register */
56 #define UDCCS1 0x0014 /* UDC Endpoint 1 (IN) Control/Status Register */
57 #define UDCCS2 0x0018 /* UDC Endpoint 2 (OUT) Control/Status Register */
58 #define UDCCS3 0x001C /* UDC Endpoint 3 (IN) Control/Status Register */
59 #define UDCCS4 0x0020 /* UDC Endpoint 4 (OUT) Control/Status Register */
60 #define UDCCS5 0x0024 /* UDC Endpoint 5 (Interrupt) Control/Status Register */
61 #define UDCCS6 0x0028 /* UDC Endpoint 6 (IN) Control/Status Register */
62 #define UDCCS7 0x002C /* UDC Endpoint 7 (OUT) Control/Status Register */
63 #define UDCCS8 0x0030 /* UDC Endpoint 8 (IN) Control/Status Register */
64 #define UDCCS9 0x0034 /* UDC Endpoint 9 (OUT) Control/Status Register */
65 #define UDCCS10 0x0038 /* UDC Endpoint 10 (Interrupt) Control/Status Register */
66 #define UDCCS11 0x003C /* UDC Endpoint 11 (IN) Control/Status Register */
67 #define UDCCS12 0x0040 /* UDC Endpoint 12 (OUT) Control/Status Register */
68 #define UDCCS13 0x0044 /* UDC Endpoint 13 (IN) Control/Status Register */
69 #define UDCCS14 0x0048 /* UDC Endpoint 14 (OUT) Control/Status Register */
70 #define UDCCS15 0x004C /* UDC Endpoint 15 (Interrupt) Control/Status Register */
71 #define UFNRH 0x0060 /* UDC Frame Number Register High */
72 #define UFNRL 0x0064 /* UDC Frame Number Register Low */
73 #define UBCR2 0x0068 /* UDC Byte Count Reg 2 */
74 #define UBCR4 0x006c /* UDC Byte Count Reg 4 */
75 #define UBCR7 0x0070 /* UDC Byte Count Reg 7 */
76 #define UBCR9 0x0074 /* UDC Byte Count Reg 9 */
77 #define UBCR12 0x0078 /* UDC Byte Count Reg 12 */
78 #define UBCR14 0x007c /* UDC Byte Count Reg 14 */
79 #define UDDR0 0x0080 /* UDC Endpoint 0 Data Register */
80 #define UDDR1 0x0100 /* UDC Endpoint 1 Data Register */
81 #define UDDR2 0x0180 /* UDC Endpoint 2 Data Register */
82 #define UDDR3 0x0200 /* UDC Endpoint 3 Data Register */
83 #define UDDR4 0x0400 /* UDC Endpoint 4 Data Register */
84 #define UDDR5 0x00A0 /* UDC Endpoint 5 Data Register */
85 #define UDDR6 0x0600 /* UDC Endpoint 6 Data Register */
86 #define UDDR7 0x0680 /* UDC Endpoint 7 Data Register */
87 #define UDDR8 0x0700 /* UDC Endpoint 8 Data Register */
88 #define UDDR9 0x0900 /* UDC Endpoint 9 Data Register */
89 #define UDDR10 0x00C0 /* UDC Endpoint 10 Data Register */
90 #define UDDR11 0x0B00 /* UDC Endpoint 11 Data Register */
91 #define UDDR12 0x0B80 /* UDC Endpoint 12 Data Register */
92 #define UDDR13 0x0C00 /* UDC Endpoint 13 Data Register */
93 #define UDDR14 0x0E00 /* UDC Endpoint 14 Data Register */
94 #define UDDR15 0x00E0 /* UDC Endpoint 15 Data Register */
95
96 #define UICR0 0x0050 /* UDC Interrupt Control Register 0 */
97 #define UICR1 0x0054 /* UDC Interrupt Control Register 1 */
98
99 #define USIR0 0x0058 /* UDC Status Interrupt Register 0 */
100 #define USIR1 0x005C /* UDC Status Interrupt Register 1 */
101
102 #define UDCCR_UDE (1 << 0) /* UDC enable */
103 #define UDCCR_UDA (1 << 1) /* UDC active */
104 #define UDCCR_RSM (1 << 2) /* Device resume */
105 #define UDCCR_RESIR (1 << 3) /* Resume interrupt request */
106 #define UDCCR_SUSIR (1 << 4) /* Suspend interrupt request */
107 #define UDCCR_SRM (1 << 5) /* Suspend/resume interrupt mask */
108 #define UDCCR_RSTIR (1 << 6) /* Reset interrupt request */
109 #define UDCCR_REM (1 << 7) /* Reset interrupt mask */
110
111 #define UDCCS0_OPR (1 << 0) /* OUT packet ready */
112 #define UDCCS0_IPR (1 << 1) /* IN packet ready */
113 #define UDCCS0_FTF (1 << 2) /* Flush Tx FIFO */
114 #define UDCCS0_DRWF (1 << 3) /* Device remote wakeup feature */
115 #define UDCCS0_SST (1 << 4) /* Sent stall */
116 #define UDCCS0_FST (1 << 5) /* Force stall */
117 #define UDCCS0_RNE (1 << 6) /* Receive FIFO no empty */
118 #define UDCCS0_SA (1 << 7) /* Setup active */
119
120 #define UDCCS_BI_TFS (1 << 0) /* Transmit FIFO service */
121 #define UDCCS_BI_TPC (1 << 1) /* Transmit packet complete */
122 #define UDCCS_BI_FTF (1 << 2) /* Flush Tx FIFO */
123 #define UDCCS_BI_TUR (1 << 3) /* Transmit FIFO underrun */
124 #define UDCCS_BI_SST (1 << 4) /* Sent stall */
125 #define UDCCS_BI_FST (1 << 5) /* Force stall */
126 #define UDCCS_BI_TSP (1 << 7) /* Transmit short packet */
127
128 #define UDCCS_BO_RFS (1 << 0) /* Receive FIFO service */
129 #define UDCCS_BO_RPC (1 << 1) /* Receive packet complete */
130 #define UDCCS_BO_DME (1 << 3) /* DMA enable */
131 #define UDCCS_BO_SST (1 << 4) /* Sent stall */
132 #define UDCCS_BO_FST (1 << 5) /* Force stall */
133 #define UDCCS_BO_RNE (1 << 6) /* Receive FIFO not empty */
134 #define UDCCS_BO_RSP (1 << 7) /* Receive short packet */
135
136 #define UDCCS_II_TFS (1 << 0) /* Transmit FIFO service */
137 #define UDCCS_II_TPC (1 << 1) /* Transmit packet complete */
138 #define UDCCS_II_FTF (1 << 2) /* Flush Tx FIFO */
139 #define UDCCS_II_TUR (1 << 3) /* Transmit FIFO underrun */
140 #define UDCCS_II_TSP (1 << 7) /* Transmit short packet */
141
142 #define UDCCS_IO_RFS (1 << 0) /* Receive FIFO service */
143 #define UDCCS_IO_RPC (1 << 1) /* Receive packet complete */
144 #ifdef CONFIG_ARCH_IXP4XX /* FIXME: is this right?, datasheed says '2' */
145 #define UDCCS_IO_ROF (1 << 3) /* Receive overflow */
146 #endif
147 #ifdef CONFIG_ARCH_PXA
148 #define UDCCS_IO_ROF (1 << 2) /* Receive overflow */
149 #endif
150 #define UDCCS_IO_DME (1 << 3) /* DMA enable */
151 #define UDCCS_IO_RNE (1 << 6) /* Receive FIFO not empty */
152 #define UDCCS_IO_RSP (1 << 7) /* Receive short packet */
153
154 #define UDCCS_INT_TFS (1 << 0) /* Transmit FIFO service */
155 #define UDCCS_INT_TPC (1 << 1) /* Transmit packet complete */
156 #define UDCCS_INT_FTF (1 << 2) /* Flush Tx FIFO */
157 #define UDCCS_INT_TUR (1 << 3) /* Transmit FIFO underrun */
158 #define UDCCS_INT_SST (1 << 4) /* Sent stall */
159 #define UDCCS_INT_FST (1 << 5) /* Force stall */
160 #define UDCCS_INT_TSP (1 << 7) /* Transmit short packet */
161
162 #define UICR0_IM0 (1 << 0) /* Interrupt mask ep 0 */
163 #define UICR0_IM1 (1 << 1) /* Interrupt mask ep 1 */
164 #define UICR0_IM2 (1 << 2) /* Interrupt mask ep 2 */
165 #define UICR0_IM3 (1 << 3) /* Interrupt mask ep 3 */
166 #define UICR0_IM4 (1 << 4) /* Interrupt mask ep 4 */
167 #define UICR0_IM5 (1 << 5) /* Interrupt mask ep 5 */
168 #define UICR0_IM6 (1 << 6) /* Interrupt mask ep 6 */
169 #define UICR0_IM7 (1 << 7) /* Interrupt mask ep 7 */
170
171 #define UICR1_IM8 (1 << 0) /* Interrupt mask ep 8 */
172 #define UICR1_IM9 (1 << 1) /* Interrupt mask ep 9 */
173 #define UICR1_IM10 (1 << 2) /* Interrupt mask ep 10 */
174 #define UICR1_IM11 (1 << 3) /* Interrupt mask ep 11 */
175 #define UICR1_IM12 (1 << 4) /* Interrupt mask ep 12 */
176 #define UICR1_IM13 (1 << 5) /* Interrupt mask ep 13 */
177 #define UICR1_IM14 (1 << 6) /* Interrupt mask ep 14 */
178 #define UICR1_IM15 (1 << 7) /* Interrupt mask ep 15 */
179
180 #define USIR0_IR0 (1 << 0) /* Interrupt request ep 0 */
181 #define USIR0_IR1 (1 << 1) /* Interrupt request ep 1 */
182 #define USIR0_IR2 (1 << 2) /* Interrupt request ep 2 */
183 #define USIR0_IR3 (1 << 3) /* Interrupt request ep 3 */
184 #define USIR0_IR4 (1 << 4) /* Interrupt request ep 4 */
185 #define USIR0_IR5 (1 << 5) /* Interrupt request ep 5 */
186 #define USIR0_IR6 (1 << 6) /* Interrupt request ep 6 */
187 #define USIR0_IR7 (1 << 7) /* Interrupt request ep 7 */
188
189 #define USIR1_IR8 (1 << 0) /* Interrupt request ep 8 */
190 #define USIR1_IR9 (1 << 1) /* Interrupt request ep 9 */
191 #define USIR1_IR10 (1 << 2) /* Interrupt request ep 10 */
192 #define USIR1_IR11 (1 << 3) /* Interrupt request ep 11 */
193 #define USIR1_IR12 (1 << 4) /* Interrupt request ep 12 */
194 #define USIR1_IR13 (1 << 5) /* Interrupt request ep 13 */
195 #define USIR1_IR14 (1 << 6) /* Interrupt request ep 14 */
196 #define USIR1_IR15 (1 << 7) /* Interrupt request ep 15 */
197
198 /*
199 * This driver handles the USB Device Controller (UDC) in Intel's PXA 25x
200 * series processors. The UDC for the IXP 4xx series is very similar.
201 * There are fifteen endpoints, in addition to ep0.
202 *
203 * Such controller drivers work with a gadget driver. The gadget driver
204 * returns descriptors, implements configuration and data protocols used
205 * by the host to interact with this device, and allocates endpoints to
206 * the different protocol interfaces. The controller driver virtualizes
207 * usb hardware so that the gadget drivers will be more portable.
208 *
209 * This UDC hardware wants to implement a bit too much USB protocol, so
210 * it constrains the sorts of USB configuration change events that work.
211 * The errata for these chips are misleading; some "fixed" bugs from
212 * pxa250 a0/a1 b0/b1/b2 sure act like they're still there.
213 *
214 * Note that the UDC hardware supports DMA (except on IXP) but that's
215 * not used here. IN-DMA (to host) is simple enough, when the data is
216 * suitably aligned (16 bytes) ... the network stack doesn't do that,
217 * other software can. OUT-DMA is buggy in most chip versions, as well
218 * as poorly designed (data toggle not automatic). So this driver won't
219 * bother using DMA. (Mostly-working IN-DMA support was available in
220 * kernels before 2.6.23, but was never enabled or well tested.)
221 */
222
223 #define DRIVER_VERSION "30-June-2007"
224 #define DRIVER_DESC "PXA 25x USB Device Controller driver"
225
226
227 static const char driver_name [] = "pxa25x_udc";
228
229 static const char ep0name [] = "ep0";
230
231
232 #ifdef CONFIG_ARCH_IXP4XX
233
234 /* cpu-specific register addresses are compiled in to this code */
235 #ifdef CONFIG_ARCH_PXA
236 #error "Can't configure both IXP and PXA"
237 #endif
238
239 /* IXP doesn't yet support <linux/clk.h> */
240 #define clk_get(dev,name) NULL
241 #define clk_enable(clk) do { } while (0)
242 #define clk_disable(clk) do { } while (0)
243 #define clk_put(clk) do { } while (0)
244
245 #endif
246
247 #include "pxa25x_udc.h"
248
249
250 #ifdef CONFIG_USB_PXA25X_SMALL
251 #define SIZE_STR " (small)"
252 #else
253 #define SIZE_STR ""
254 #endif
255
256 /* ---------------------------------------------------------------------------
257 * endpoint related parts of the api to the usb controller hardware,
258 * used by gadget driver; and the inner talker-to-hardware core.
259 * ---------------------------------------------------------------------------
260 */
261
262 static void pxa25x_ep_fifo_flush (struct usb_ep *ep);
263 static void nuke (struct pxa25x_ep *, int status);
264
265 /* one GPIO should control a D+ pullup, so host sees this device (or not) */
pullup_off(void)266 static void pullup_off(void)
267 {
268 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
269 int off_level = mach->gpio_pullup_inverted;
270
271 if (gpio_is_valid(mach->gpio_pullup))
272 gpio_set_value(mach->gpio_pullup, off_level);
273 else if (mach->udc_command)
274 mach->udc_command(PXA2XX_UDC_CMD_DISCONNECT);
275 }
276
pullup_on(void)277 static void pullup_on(void)
278 {
279 struct pxa2xx_udc_mach_info *mach = the_controller->mach;
280 int on_level = !mach->gpio_pullup_inverted;
281
282 if (gpio_is_valid(mach->gpio_pullup))
283 gpio_set_value(mach->gpio_pullup, on_level);
284 else if (mach->udc_command)
285 mach->udc_command(PXA2XX_UDC_CMD_CONNECT);
286 }
287
288 #if defined(CONFIG_CPU_BIG_ENDIAN)
289 /*
290 * IXP4xx has its buses wired up in a way that relies on never doing any
291 * byte swaps, independent of whether it runs in big-endian or little-endian
292 * mode, as explained by Krzysztof Hałasa.
293 *
294 * We only support pxa25x in little-endian mode, but it is very likely
295 * that it works the same way.
296 */
udc_set_reg(struct pxa25x_udc * dev,u32 reg,u32 val)297 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
298 {
299 iowrite32be(val, dev->regs + reg);
300 }
301
udc_get_reg(struct pxa25x_udc * dev,u32 reg)302 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
303 {
304 return ioread32be(dev->regs + reg);
305 }
306 #else
udc_set_reg(struct pxa25x_udc * dev,u32 reg,u32 val)307 static inline void udc_set_reg(struct pxa25x_udc *dev, u32 reg, u32 val)
308 {
309 writel(val, dev->regs + reg);
310 }
311
udc_get_reg(struct pxa25x_udc * dev,u32 reg)312 static inline u32 udc_get_reg(struct pxa25x_udc *dev, u32 reg)
313 {
314 return readl(dev->regs + reg);
315 }
316 #endif
317
pio_irq_enable(struct pxa25x_ep * ep)318 static void pio_irq_enable(struct pxa25x_ep *ep)
319 {
320 u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
321
322 if (bEndpointAddress < 8)
323 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) &
324 ~(1 << bEndpointAddress));
325 else {
326 bEndpointAddress -= 8;
327 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) &
328 ~(1 << bEndpointAddress));
329 }
330 }
331
pio_irq_disable(struct pxa25x_ep * ep)332 static void pio_irq_disable(struct pxa25x_ep *ep)
333 {
334 u32 bEndpointAddress = ep->bEndpointAddress & 0xf;
335
336 if (bEndpointAddress < 8)
337 udc_set_reg(ep->dev, UICR0, udc_get_reg(ep->dev, UICR0) |
338 (1 << bEndpointAddress));
339 else {
340 bEndpointAddress -= 8;
341 udc_set_reg(ep->dev, UICR1, udc_get_reg(ep->dev, UICR1) |
342 (1 << bEndpointAddress));
343 }
344 }
345
346 /* The UDCCR reg contains mask and interrupt status bits,
347 * so using '|=' isn't safe as it may ack an interrupt.
348 */
349 #define UDCCR_MASK_BITS (UDCCR_REM | UDCCR_SRM | UDCCR_UDE)
350
udc_set_mask_UDCCR(struct pxa25x_udc * dev,int mask)351 static inline void udc_set_mask_UDCCR(struct pxa25x_udc *dev, int mask)
352 {
353 u32 udccr = udc_get_reg(dev, UDCCR);
354
355 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) | (mask & UDCCR_MASK_BITS), UDCCR);
356 }
357
udc_clear_mask_UDCCR(struct pxa25x_udc * dev,int mask)358 static inline void udc_clear_mask_UDCCR(struct pxa25x_udc *dev, int mask)
359 {
360 u32 udccr = udc_get_reg(dev, UDCCR);
361
362 udc_set_reg(dev, (udccr & UDCCR_MASK_BITS) & ~(mask & UDCCR_MASK_BITS), UDCCR);
363 }
364
udc_ack_int_UDCCR(struct pxa25x_udc * dev,int mask)365 static inline void udc_ack_int_UDCCR(struct pxa25x_udc *dev, int mask)
366 {
367 /* udccr contains the bits we dont want to change */
368 u32 udccr = udc_get_reg(dev, UDCCR) & UDCCR_MASK_BITS;
369
370 udc_set_reg(dev, udccr | (mask & ~UDCCR_MASK_BITS), UDCCR);
371 }
372
udc_ep_get_UDCCS(struct pxa25x_ep * ep)373 static inline u32 udc_ep_get_UDCCS(struct pxa25x_ep *ep)
374 {
375 return udc_get_reg(ep->dev, ep->regoff_udccs);
376 }
377
udc_ep_set_UDCCS(struct pxa25x_ep * ep,u32 data)378 static inline void udc_ep_set_UDCCS(struct pxa25x_ep *ep, u32 data)
379 {
380 udc_set_reg(ep->dev, data, ep->regoff_udccs);
381 }
382
udc_ep0_get_UDCCS(struct pxa25x_udc * dev)383 static inline u32 udc_ep0_get_UDCCS(struct pxa25x_udc *dev)
384 {
385 return udc_get_reg(dev, UDCCS0);
386 }
387
udc_ep0_set_UDCCS(struct pxa25x_udc * dev,u32 data)388 static inline void udc_ep0_set_UDCCS(struct pxa25x_udc *dev, u32 data)
389 {
390 udc_set_reg(dev, data, UDCCS0);
391 }
392
udc_ep_get_UDDR(struct pxa25x_ep * ep)393 static inline u32 udc_ep_get_UDDR(struct pxa25x_ep *ep)
394 {
395 return udc_get_reg(ep->dev, ep->regoff_uddr);
396 }
397
udc_ep_set_UDDR(struct pxa25x_ep * ep,u32 data)398 static inline void udc_ep_set_UDDR(struct pxa25x_ep *ep, u32 data)
399 {
400 udc_set_reg(ep->dev, data, ep->regoff_uddr);
401 }
402
udc_ep_get_UBCR(struct pxa25x_ep * ep)403 static inline u32 udc_ep_get_UBCR(struct pxa25x_ep *ep)
404 {
405 return udc_get_reg(ep->dev, ep->regoff_ubcr);
406 }
407
408 /*
409 * endpoint enable/disable
410 *
411 * we need to verify the descriptors used to enable endpoints. since pxa25x
412 * endpoint configurations are fixed, and are pretty much always enabled,
413 * there's not a lot to manage here.
414 *
415 * because pxa25x can't selectively initialize bulk (or interrupt) endpoints,
416 * (resetting endpoint halt and toggle), SET_INTERFACE is unusable except
417 * for a single interface (with only the default altsetting) and for gadget
418 * drivers that don't halt endpoints (not reset by set_interface). that also
419 * means that if you use ISO, you must violate the USB spec rule that all
420 * iso endpoints must be in non-default altsettings.
421 */
pxa25x_ep_enable(struct usb_ep * _ep,const struct usb_endpoint_descriptor * desc)422 static int pxa25x_ep_enable (struct usb_ep *_ep,
423 const struct usb_endpoint_descriptor *desc)
424 {
425 struct pxa25x_ep *ep;
426 struct pxa25x_udc *dev;
427
428 ep = container_of (_ep, struct pxa25x_ep, ep);
429 if (!_ep || !desc || _ep->name == ep0name
430 || desc->bDescriptorType != USB_DT_ENDPOINT
431 || ep->bEndpointAddress != desc->bEndpointAddress
432 || ep->fifo_size < usb_endpoint_maxp (desc)) {
433 DMSG("%s, bad ep or descriptor\n", __func__);
434 return -EINVAL;
435 }
436
437 /* xfer types must match, except that interrupt ~= bulk */
438 if (ep->bmAttributes != desc->bmAttributes
439 && ep->bmAttributes != USB_ENDPOINT_XFER_BULK
440 && desc->bmAttributes != USB_ENDPOINT_XFER_INT) {
441 DMSG("%s, %s type mismatch\n", __func__, _ep->name);
442 return -EINVAL;
443 }
444
445 /* hardware _could_ do smaller, but driver doesn't */
446 if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
447 && usb_endpoint_maxp (desc)
448 != BULK_FIFO_SIZE)
449 || !desc->wMaxPacketSize) {
450 DMSG("%s, bad %s maxpacket\n", __func__, _ep->name);
451 return -ERANGE;
452 }
453
454 dev = ep->dev;
455 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
456 DMSG("%s, bogus device state\n", __func__);
457 return -ESHUTDOWN;
458 }
459
460 ep->ep.desc = desc;
461 ep->stopped = 0;
462 ep->pio_irqs = 0;
463 ep->ep.maxpacket = usb_endpoint_maxp (desc);
464
465 /* flush fifo (mostly for OUT buffers) */
466 pxa25x_ep_fifo_flush (_ep);
467
468 /* ... reset halt state too, if we could ... */
469
470 DBG(DBG_VERBOSE, "enabled %s\n", _ep->name);
471 return 0;
472 }
473
pxa25x_ep_disable(struct usb_ep * _ep)474 static int pxa25x_ep_disable (struct usb_ep *_ep)
475 {
476 struct pxa25x_ep *ep;
477 unsigned long flags;
478
479 ep = container_of (_ep, struct pxa25x_ep, ep);
480 if (!_ep || !ep->ep.desc) {
481 DMSG("%s, %s not enabled\n", __func__,
482 _ep ? ep->ep.name : NULL);
483 return -EINVAL;
484 }
485 local_irq_save(flags);
486
487 nuke (ep, -ESHUTDOWN);
488
489 /* flush fifo (mostly for IN buffers) */
490 pxa25x_ep_fifo_flush (_ep);
491
492 ep->ep.desc = NULL;
493 ep->stopped = 1;
494
495 local_irq_restore(flags);
496 DBG(DBG_VERBOSE, "%s disabled\n", _ep->name);
497 return 0;
498 }
499
500 /*-------------------------------------------------------------------------*/
501
502 /* for the pxa25x, these can just wrap kmalloc/kfree. gadget drivers
503 * must still pass correctly initialized endpoints, since other controller
504 * drivers may care about how it's currently set up (dma issues etc).
505 */
506
507 /*
508 * pxa25x_ep_alloc_request - allocate a request data structure
509 */
510 static struct usb_request *
pxa25x_ep_alloc_request(struct usb_ep * _ep,gfp_t gfp_flags)511 pxa25x_ep_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
512 {
513 struct pxa25x_request *req;
514
515 req = kzalloc(sizeof(*req), gfp_flags);
516 if (!req)
517 return NULL;
518
519 INIT_LIST_HEAD (&req->queue);
520 return &req->req;
521 }
522
523
524 /*
525 * pxa25x_ep_free_request - deallocate a request data structure
526 */
527 static void
pxa25x_ep_free_request(struct usb_ep * _ep,struct usb_request * _req)528 pxa25x_ep_free_request (struct usb_ep *_ep, struct usb_request *_req)
529 {
530 struct pxa25x_request *req;
531
532 req = container_of (_req, struct pxa25x_request, req);
533 WARN_ON(!list_empty (&req->queue));
534 kfree(req);
535 }
536
537 /*-------------------------------------------------------------------------*/
538
539 /*
540 * done - retire a request; caller blocked irqs
541 */
done(struct pxa25x_ep * ep,struct pxa25x_request * req,int status)542 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status)
543 {
544 unsigned stopped = ep->stopped;
545
546 list_del_init(&req->queue);
547
548 if (likely (req->req.status == -EINPROGRESS))
549 req->req.status = status;
550 else
551 status = req->req.status;
552
553 if (status && status != -ESHUTDOWN)
554 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n",
555 ep->ep.name, &req->req, status,
556 req->req.actual, req->req.length);
557
558 /* don't modify queue heads during completion callback */
559 ep->stopped = 1;
560 usb_gadget_giveback_request(&ep->ep, &req->req);
561 ep->stopped = stopped;
562 }
563
564
ep0_idle(struct pxa25x_udc * dev)565 static inline void ep0_idle (struct pxa25x_udc *dev)
566 {
567 dev->ep0state = EP0_IDLE;
568 }
569
570 static int
write_packet(struct pxa25x_ep * ep,struct pxa25x_request * req,unsigned max)571 write_packet(struct pxa25x_ep *ep, struct pxa25x_request *req, unsigned max)
572 {
573 u8 *buf;
574 unsigned length, count;
575
576 buf = req->req.buf + req->req.actual;
577 prefetch(buf);
578
579 /* how big will this packet be? */
580 length = min(req->req.length - req->req.actual, max);
581 req->req.actual += length;
582
583 count = length;
584 while (likely(count--))
585 udc_ep_set_UDDR(ep, *buf++);
586
587 return length;
588 }
589
590 /*
591 * write to an IN endpoint fifo, as many packets as possible.
592 * irqs will use this to write the rest later.
593 * caller guarantees at least one packet buffer is ready (or a zlp).
594 */
595 static int
write_fifo(struct pxa25x_ep * ep,struct pxa25x_request * req)596 write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
597 {
598 unsigned max;
599
600 max = usb_endpoint_maxp(ep->ep.desc);
601 do {
602 unsigned count;
603 int is_last, is_short;
604
605 count = write_packet(ep, req, max);
606
607 /* last packet is usually short (or a zlp) */
608 if (unlikely (count != max))
609 is_last = is_short = 1;
610 else {
611 if (likely(req->req.length != req->req.actual)
612 || req->req.zero)
613 is_last = 0;
614 else
615 is_last = 1;
616 /* interrupt/iso maxpacket may not fill the fifo */
617 is_short = unlikely (max < ep->fifo_size);
618 }
619
620 DBG(DBG_VERY_NOISY, "wrote %s %d bytes%s%s %d left %p\n",
621 ep->ep.name, count,
622 is_last ? "/L" : "", is_short ? "/S" : "",
623 req->req.length - req->req.actual, req);
624
625 /* let loose that packet. maybe try writing another one,
626 * double buffering might work. TSP, TPC, and TFS
627 * bit values are the same for all normal IN endpoints.
628 */
629 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC);
630 if (is_short)
631 udc_ep_set_UDCCS(ep, UDCCS_BI_TSP);
632
633 /* requests complete when all IN data is in the FIFO */
634 if (is_last) {
635 done (ep, req, 0);
636 if (list_empty(&ep->queue))
637 pio_irq_disable(ep);
638 return 1;
639 }
640
641 // TODO experiment: how robust can fifo mode tweaking be?
642 // double buffering is off in the default fifo mode, which
643 // prevents TFS from being set here.
644
645 } while (udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS);
646 return 0;
647 }
648
649 /* caller asserts req->pending (ep0 irq status nyet cleared); starts
650 * ep0 data stage. these chips want very simple state transitions.
651 */
652 static inline
ep0start(struct pxa25x_udc * dev,u32 flags,const char * tag)653 void ep0start(struct pxa25x_udc *dev, u32 flags, const char *tag)
654 {
655 udc_ep0_set_UDCCS(dev, flags|UDCCS0_SA|UDCCS0_OPR);
656 udc_set_reg(dev, USIR0, USIR0_IR0);
657 dev->req_pending = 0;
658 DBG(DBG_VERY_NOISY, "%s %s, %02x/%02x\n",
659 __func__, tag, udc_ep0_get_UDCCS(dev), flags);
660 }
661
662 static int
write_ep0_fifo(struct pxa25x_ep * ep,struct pxa25x_request * req)663 write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
664 {
665 struct pxa25x_udc *dev = ep->dev;
666 unsigned count;
667 int is_short;
668
669 count = write_packet(&dev->ep[0], req, EP0_FIFO_SIZE);
670 ep->dev->stats.write.bytes += count;
671
672 /* last packet "must be" short (or a zlp) */
673 is_short = (count != EP0_FIFO_SIZE);
674
675 DBG(DBG_VERY_NOISY, "ep0in %d bytes %d left %p\n", count,
676 req->req.length - req->req.actual, req);
677
678 if (unlikely (is_short)) {
679 if (ep->dev->req_pending)
680 ep0start(ep->dev, UDCCS0_IPR, "short IN");
681 else
682 udc_ep0_set_UDCCS(dev, UDCCS0_IPR);
683
684 count = req->req.length;
685 done (ep, req, 0);
686 ep0_idle(ep->dev);
687 #ifndef CONFIG_ARCH_IXP4XX
688 #if 1
689 /* This seems to get rid of lost status irqs in some cases:
690 * host responds quickly, or next request involves config
691 * change automagic, or should have been hidden, or ...
692 *
693 * FIXME get rid of all udelays possible...
694 */
695 if (count >= EP0_FIFO_SIZE) {
696 count = 100;
697 do {
698 if ((udc_ep0_get_UDCCS(dev) & UDCCS0_OPR) != 0) {
699 /* clear OPR, generate ack */
700 udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
701 break;
702 }
703 count--;
704 udelay(1);
705 } while (count);
706 }
707 #endif
708 #endif
709 } else if (ep->dev->req_pending)
710 ep0start(ep->dev, 0, "IN");
711 return is_short;
712 }
713
714
715 /*
716 * read_fifo - unload packet(s) from the fifo we use for usb OUT
717 * transfers and put them into the request. caller should have made
718 * sure there's at least one packet ready.
719 *
720 * returns true if the request completed because of short packet or the
721 * request buffer having filled (and maybe overran till end-of-packet).
722 */
723 static int
read_fifo(struct pxa25x_ep * ep,struct pxa25x_request * req)724 read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
725 {
726 for (;;) {
727 u32 udccs;
728 u8 *buf;
729 unsigned bufferspace, count, is_short;
730
731 /* make sure there's a packet in the FIFO.
732 * UDCCS_{BO,IO}_RPC are all the same bit value.
733 * UDCCS_{BO,IO}_RNE are all the same bit value.
734 */
735 udccs = udc_ep_get_UDCCS(ep);
736 if (unlikely ((udccs & UDCCS_BO_RPC) == 0))
737 break;
738 buf = req->req.buf + req->req.actual;
739 prefetchw(buf);
740 bufferspace = req->req.length - req->req.actual;
741
742 /* read all bytes from this packet */
743 if (likely (udccs & UDCCS_BO_RNE)) {
744 count = 1 + (0x0ff & udc_ep_get_UBCR(ep));
745 req->req.actual += min (count, bufferspace);
746 } else /* zlp */
747 count = 0;
748 is_short = (count < ep->ep.maxpacket);
749 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n",
750 ep->ep.name, udccs, count,
751 is_short ? "/S" : "",
752 req, req->req.actual, req->req.length);
753 while (likely (count-- != 0)) {
754 u8 byte = (u8) udc_ep_get_UDDR(ep);
755
756 if (unlikely (bufferspace == 0)) {
757 /* this happens when the driver's buffer
758 * is smaller than what the host sent.
759 * discard the extra data.
760 */
761 if (req->req.status != -EOVERFLOW)
762 DMSG("%s overflow %d\n",
763 ep->ep.name, count);
764 req->req.status = -EOVERFLOW;
765 } else {
766 *buf++ = byte;
767 bufferspace--;
768 }
769 }
770 udc_ep_set_UDCCS(ep, UDCCS_BO_RPC);
771 /* RPC/RSP/RNE could now reflect the other packet buffer */
772
773 /* iso is one request per packet */
774 if (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
775 if (udccs & UDCCS_IO_ROF)
776 req->req.status = -EHOSTUNREACH;
777 /* more like "is_done" */
778 is_short = 1;
779 }
780
781 /* completion */
782 if (is_short || req->req.actual == req->req.length) {
783 done (ep, req, 0);
784 if (list_empty(&ep->queue))
785 pio_irq_disable(ep);
786 return 1;
787 }
788
789 /* finished that packet. the next one may be waiting... */
790 }
791 return 0;
792 }
793
794 /*
795 * special ep0 version of the above. no UBCR0 or double buffering; status
796 * handshaking is magic. most device protocols don't need control-OUT.
797 * CDC vendor commands (and RNDIS), mass storage CB/CBI, and some other
798 * protocols do use them.
799 */
800 static int
read_ep0_fifo(struct pxa25x_ep * ep,struct pxa25x_request * req)801 read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req)
802 {
803 u8 *buf, byte;
804 unsigned bufferspace;
805
806 buf = req->req.buf + req->req.actual;
807 bufferspace = req->req.length - req->req.actual;
808
809 while (udc_ep_get_UDCCS(ep) & UDCCS0_RNE) {
810 byte = (u8) UDDR0;
811
812 if (unlikely (bufferspace == 0)) {
813 /* this happens when the driver's buffer
814 * is smaller than what the host sent.
815 * discard the extra data.
816 */
817 if (req->req.status != -EOVERFLOW)
818 DMSG("%s overflow\n", ep->ep.name);
819 req->req.status = -EOVERFLOW;
820 } else {
821 *buf++ = byte;
822 req->req.actual++;
823 bufferspace--;
824 }
825 }
826
827 udc_ep_set_UDCCS(ep, UDCCS0_OPR | UDCCS0_IPR);
828
829 /* completion */
830 if (req->req.actual >= req->req.length)
831 return 1;
832
833 /* finished that packet. the next one may be waiting... */
834 return 0;
835 }
836
837 /*-------------------------------------------------------------------------*/
838
839 static int
pxa25x_ep_queue(struct usb_ep * _ep,struct usb_request * _req,gfp_t gfp_flags)840 pxa25x_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
841 {
842 struct pxa25x_request *req;
843 struct pxa25x_ep *ep;
844 struct pxa25x_udc *dev;
845 unsigned long flags;
846
847 req = container_of(_req, struct pxa25x_request, req);
848 if (unlikely (!_req || !_req->complete || !_req->buf
849 || !list_empty(&req->queue))) {
850 DMSG("%s, bad params\n", __func__);
851 return -EINVAL;
852 }
853
854 ep = container_of(_ep, struct pxa25x_ep, ep);
855 if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
856 DMSG("%s, bad ep\n", __func__);
857 return -EINVAL;
858 }
859
860 dev = ep->dev;
861 if (unlikely (!dev->driver
862 || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
863 DMSG("%s, bogus device state\n", __func__);
864 return -ESHUTDOWN;
865 }
866
867 /* iso is always one packet per request, that's the only way
868 * we can report per-packet status. that also helps with dma.
869 */
870 if (unlikely (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
871 && req->req.length > usb_endpoint_maxp(ep->ep.desc)))
872 return -EMSGSIZE;
873
874 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n",
875 _ep->name, _req, _req->length, _req->buf);
876
877 local_irq_save(flags);
878
879 _req->status = -EINPROGRESS;
880 _req->actual = 0;
881
882 /* kickstart this i/o queue? */
883 if (list_empty(&ep->queue) && !ep->stopped) {
884 if (ep->ep.desc == NULL/* ep0 */) {
885 unsigned length = _req->length;
886
887 switch (dev->ep0state) {
888 case EP0_IN_DATA_PHASE:
889 dev->stats.write.ops++;
890 if (write_ep0_fifo(ep, req))
891 req = NULL;
892 break;
893
894 case EP0_OUT_DATA_PHASE:
895 dev->stats.read.ops++;
896 /* messy ... */
897 if (dev->req_config) {
898 DBG(DBG_VERBOSE, "ep0 config ack%s\n",
899 dev->has_cfr ? "" : " raced");
900 if (dev->has_cfr)
901 udc_set_reg(dev, UDCCFR, UDCCFR_AREN |
902 UDCCFR_ACM | UDCCFR_MB1);
903 done(ep, req, 0);
904 dev->ep0state = EP0_END_XFER;
905 local_irq_restore (flags);
906 return 0;
907 }
908 if (dev->req_pending)
909 ep0start(dev, UDCCS0_IPR, "OUT");
910 if (length == 0 || ((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0
911 && read_ep0_fifo(ep, req))) {
912 ep0_idle(dev);
913 done(ep, req, 0);
914 req = NULL;
915 }
916 break;
917
918 default:
919 DMSG("ep0 i/o, odd state %d\n", dev->ep0state);
920 local_irq_restore (flags);
921 return -EL2HLT;
922 }
923 /* can the FIFO can satisfy the request immediately? */
924 } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
925 if ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) != 0
926 && write_fifo(ep, req))
927 req = NULL;
928 } else if ((udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) != 0
929 && read_fifo(ep, req)) {
930 req = NULL;
931 }
932
933 if (likely(req && ep->ep.desc))
934 pio_irq_enable(ep);
935 }
936
937 /* pio or dma irq handler advances the queue. */
938 if (likely(req != NULL))
939 list_add_tail(&req->queue, &ep->queue);
940 local_irq_restore(flags);
941
942 return 0;
943 }
944
945
946 /*
947 * nuke - dequeue ALL requests
948 */
nuke(struct pxa25x_ep * ep,int status)949 static void nuke(struct pxa25x_ep *ep, int status)
950 {
951 struct pxa25x_request *req;
952
953 /* called with irqs blocked */
954 while (!list_empty(&ep->queue)) {
955 req = list_entry(ep->queue.next,
956 struct pxa25x_request,
957 queue);
958 done(ep, req, status);
959 }
960 if (ep->ep.desc)
961 pio_irq_disable(ep);
962 }
963
964
965 /* dequeue JUST ONE request */
pxa25x_ep_dequeue(struct usb_ep * _ep,struct usb_request * _req)966 static int pxa25x_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
967 {
968 struct pxa25x_ep *ep;
969 struct pxa25x_request *req;
970 unsigned long flags;
971
972 ep = container_of(_ep, struct pxa25x_ep, ep);
973 if (!_ep || ep->ep.name == ep0name)
974 return -EINVAL;
975
976 local_irq_save(flags);
977
978 /* make sure it's actually queued on this endpoint */
979 list_for_each_entry (req, &ep->queue, queue) {
980 if (&req->req == _req)
981 break;
982 }
983 if (&req->req != _req) {
984 local_irq_restore(flags);
985 return -EINVAL;
986 }
987
988 done(ep, req, -ECONNRESET);
989
990 local_irq_restore(flags);
991 return 0;
992 }
993
994 /*-------------------------------------------------------------------------*/
995
pxa25x_ep_set_halt(struct usb_ep * _ep,int value)996 static int pxa25x_ep_set_halt(struct usb_ep *_ep, int value)
997 {
998 struct pxa25x_ep *ep;
999 unsigned long flags;
1000
1001 ep = container_of(_ep, struct pxa25x_ep, ep);
1002 if (unlikely (!_ep
1003 || (!ep->ep.desc && ep->ep.name != ep0name))
1004 || ep->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
1005 DMSG("%s, bad ep\n", __func__);
1006 return -EINVAL;
1007 }
1008 if (value == 0) {
1009 /* this path (reset toggle+halt) is needed to implement
1010 * SET_INTERFACE on normal hardware. but it can't be
1011 * done from software on the PXA UDC, and the hardware
1012 * forgets to do it as part of SET_INTERFACE automagic.
1013 */
1014 DMSG("only host can clear %s halt\n", _ep->name);
1015 return -EROFS;
1016 }
1017
1018 local_irq_save(flags);
1019
1020 if ((ep->bEndpointAddress & USB_DIR_IN) != 0
1021 && ((udc_ep_get_UDCCS(ep) & UDCCS_BI_TFS) == 0
1022 || !list_empty(&ep->queue))) {
1023 local_irq_restore(flags);
1024 return -EAGAIN;
1025 }
1026
1027 /* FST bit is the same for control, bulk in, bulk out, interrupt in */
1028 udc_ep_set_UDCCS(ep, UDCCS_BI_FST|UDCCS_BI_FTF);
1029
1030 /* ep0 needs special care */
1031 if (!ep->ep.desc) {
1032 start_watchdog(ep->dev);
1033 ep->dev->req_pending = 0;
1034 ep->dev->ep0state = EP0_STALL;
1035
1036 /* and bulk/intr endpoints like dropping stalls too */
1037 } else {
1038 unsigned i;
1039 for (i = 0; i < 1000; i += 20) {
1040 if (udc_ep_get_UDCCS(ep) & UDCCS_BI_SST)
1041 break;
1042 udelay(20);
1043 }
1044 }
1045 local_irq_restore(flags);
1046
1047 DBG(DBG_VERBOSE, "%s halt\n", _ep->name);
1048 return 0;
1049 }
1050
pxa25x_ep_fifo_status(struct usb_ep * _ep)1051 static int pxa25x_ep_fifo_status(struct usb_ep *_ep)
1052 {
1053 struct pxa25x_ep *ep;
1054
1055 ep = container_of(_ep, struct pxa25x_ep, ep);
1056 if (!_ep) {
1057 DMSG("%s, bad ep\n", __func__);
1058 return -ENODEV;
1059 }
1060 /* pxa can't report unclaimed bytes from IN fifos */
1061 if ((ep->bEndpointAddress & USB_DIR_IN) != 0)
1062 return -EOPNOTSUPP;
1063 if (ep->dev->gadget.speed == USB_SPEED_UNKNOWN
1064 || (udc_ep_get_UDCCS(ep) & UDCCS_BO_RFS) == 0)
1065 return 0;
1066 else
1067 return (udc_ep_get_UBCR(ep) & 0xfff) + 1;
1068 }
1069
pxa25x_ep_fifo_flush(struct usb_ep * _ep)1070 static void pxa25x_ep_fifo_flush(struct usb_ep *_ep)
1071 {
1072 struct pxa25x_ep *ep;
1073
1074 ep = container_of(_ep, struct pxa25x_ep, ep);
1075 if (!_ep || ep->ep.name == ep0name || !list_empty(&ep->queue)) {
1076 DMSG("%s, bad ep\n", __func__);
1077 return;
1078 }
1079
1080 /* toggle and halt bits stay unchanged */
1081
1082 /* for OUT, just read and discard the FIFO contents. */
1083 if ((ep->bEndpointAddress & USB_DIR_IN) == 0) {
1084 while (((udc_ep_get_UDCCS(ep)) & UDCCS_BO_RNE) != 0)
1085 (void)udc_ep_get_UDDR(ep);
1086 return;
1087 }
1088
1089 /* most IN status is the same, but ISO can't stall */
1090 udc_ep_set_UDCCS(ep, UDCCS_BI_TPC|UDCCS_BI_FTF|UDCCS_BI_TUR
1091 | (ep->bmAttributes == USB_ENDPOINT_XFER_ISOC
1092 ? 0 : UDCCS_BI_SST));
1093 }
1094
1095
1096 static const struct usb_ep_ops pxa25x_ep_ops = {
1097 .enable = pxa25x_ep_enable,
1098 .disable = pxa25x_ep_disable,
1099
1100 .alloc_request = pxa25x_ep_alloc_request,
1101 .free_request = pxa25x_ep_free_request,
1102
1103 .queue = pxa25x_ep_queue,
1104 .dequeue = pxa25x_ep_dequeue,
1105
1106 .set_halt = pxa25x_ep_set_halt,
1107 .fifo_status = pxa25x_ep_fifo_status,
1108 .fifo_flush = pxa25x_ep_fifo_flush,
1109 };
1110
1111
1112 /* ---------------------------------------------------------------------------
1113 * device-scoped parts of the api to the usb controller hardware
1114 * ---------------------------------------------------------------------------
1115 */
1116
pxa25x_udc_get_frame(struct usb_gadget * _gadget)1117 static int pxa25x_udc_get_frame(struct usb_gadget *_gadget)
1118 {
1119 struct pxa25x_udc *dev;
1120
1121 dev = container_of(_gadget, struct pxa25x_udc, gadget);
1122 return ((udc_get_reg(dev, UFNRH) & 0x07) << 8) |
1123 (udc_get_reg(dev, UFNRL) & 0xff);
1124 }
1125
pxa25x_udc_wakeup(struct usb_gadget * _gadget)1126 static int pxa25x_udc_wakeup(struct usb_gadget *_gadget)
1127 {
1128 struct pxa25x_udc *udc;
1129
1130 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1131
1132 /* host may not have enabled remote wakeup */
1133 if ((udc_ep0_get_UDCCS(udc) & UDCCS0_DRWF) == 0)
1134 return -EHOSTUNREACH;
1135 udc_set_mask_UDCCR(udc, UDCCR_RSM);
1136 return 0;
1137 }
1138
1139 static void stop_activity(struct pxa25x_udc *, struct usb_gadget_driver *);
1140 static void udc_enable (struct pxa25x_udc *);
1141 static void udc_disable(struct pxa25x_udc *);
1142
1143 /* We disable the UDC -- and its 48 MHz clock -- whenever it's not
1144 * in active use.
1145 */
pullup(struct pxa25x_udc * udc)1146 static int pullup(struct pxa25x_udc *udc)
1147 {
1148 int is_active = udc->vbus && udc->pullup && !udc->suspended;
1149 DMSG("%s\n", is_active ? "active" : "inactive");
1150 if (is_active) {
1151 if (!udc->active) {
1152 udc->active = 1;
1153 /* Enable clock for USB device */
1154 clk_enable(udc->clk);
1155 udc_enable(udc);
1156 }
1157 } else {
1158 if (udc->active) {
1159 if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
1160 DMSG("disconnect %s\n", udc->driver
1161 ? udc->driver->driver.name
1162 : "(no driver)");
1163 stop_activity(udc, udc->driver);
1164 }
1165 udc_disable(udc);
1166 /* Disable clock for USB device */
1167 clk_disable(udc->clk);
1168 udc->active = 0;
1169 }
1170
1171 }
1172 return 0;
1173 }
1174
1175 /* VBUS reporting logically comes from a transceiver */
pxa25x_udc_vbus_session(struct usb_gadget * _gadget,int is_active)1176 static int pxa25x_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
1177 {
1178 struct pxa25x_udc *udc;
1179
1180 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1181 udc->vbus = is_active;
1182 DMSG("vbus %s\n", is_active ? "supplied" : "inactive");
1183 pullup(udc);
1184 return 0;
1185 }
1186
1187 /* drivers may have software control over D+ pullup */
pxa25x_udc_pullup(struct usb_gadget * _gadget,int is_active)1188 static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
1189 {
1190 struct pxa25x_udc *udc;
1191
1192 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1193
1194 /* not all boards support pullup control */
1195 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
1196 return -EOPNOTSUPP;
1197
1198 udc->pullup = (is_active != 0);
1199 pullup(udc);
1200 return 0;
1201 }
1202
1203 /* boards may consume current from VBUS, up to 100-500mA based on config.
1204 * the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
1205 * violate USB specs.
1206 */
pxa25x_udc_vbus_draw(struct usb_gadget * _gadget,unsigned mA)1207 static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
1208 {
1209 struct pxa25x_udc *udc;
1210
1211 udc = container_of(_gadget, struct pxa25x_udc, gadget);
1212
1213 if (!IS_ERR_OR_NULL(udc->transceiver))
1214 return usb_phy_set_power(udc->transceiver, mA);
1215 return -EOPNOTSUPP;
1216 }
1217
1218 static int pxa25x_udc_start(struct usb_gadget *g,
1219 struct usb_gadget_driver *driver);
1220 static int pxa25x_udc_stop(struct usb_gadget *g);
1221
1222 static const struct usb_gadget_ops pxa25x_udc_ops = {
1223 .get_frame = pxa25x_udc_get_frame,
1224 .wakeup = pxa25x_udc_wakeup,
1225 .vbus_session = pxa25x_udc_vbus_session,
1226 .pullup = pxa25x_udc_pullup,
1227 .vbus_draw = pxa25x_udc_vbus_draw,
1228 .udc_start = pxa25x_udc_start,
1229 .udc_stop = pxa25x_udc_stop,
1230 };
1231
1232 /*-------------------------------------------------------------------------*/
1233
1234 #ifdef CONFIG_USB_GADGET_DEBUG_FS
1235
udc_debug_show(struct seq_file * m,void * _d)1236 static int udc_debug_show(struct seq_file *m, void *_d)
1237 {
1238 struct pxa25x_udc *dev = m->private;
1239 unsigned long flags;
1240 int i;
1241 u32 tmp;
1242
1243 local_irq_save(flags);
1244
1245 /* basic device status */
1246 seq_printf(m, DRIVER_DESC "\n"
1247 "%s version: %s\nGadget driver: %s\nHost %s\n\n",
1248 driver_name, DRIVER_VERSION SIZE_STR "(pio)",
1249 dev->driver ? dev->driver->driver.name : "(none)",
1250 dev->gadget.speed == USB_SPEED_FULL ? "full speed" : "disconnected");
1251
1252 /* registers for device and ep0 */
1253 seq_printf(m,
1254 "uicr %02X.%02X, usir %02X.%02x, ufnr %02X.%02X\n",
1255 udc_get_reg(dev, UICR1), udc_get_reg(dev, UICR0),
1256 udc_get_reg(dev, USIR1), udc_get_reg(dev, USIR0),
1257 udc_get_reg(dev, UFNRH), udc_get_reg(dev, UFNRL));
1258
1259 tmp = udc_get_reg(dev, UDCCR);
1260 seq_printf(m,
1261 "udccr %02X =%s%s%s%s%s%s%s%s\n", tmp,
1262 (tmp & UDCCR_REM) ? " rem" : "",
1263 (tmp & UDCCR_RSTIR) ? " rstir" : "",
1264 (tmp & UDCCR_SRM) ? " srm" : "",
1265 (tmp & UDCCR_SUSIR) ? " susir" : "",
1266 (tmp & UDCCR_RESIR) ? " resir" : "",
1267 (tmp & UDCCR_RSM) ? " rsm" : "",
1268 (tmp & UDCCR_UDA) ? " uda" : "",
1269 (tmp & UDCCR_UDE) ? " ude" : "");
1270
1271 tmp = udc_ep0_get_UDCCS(dev);
1272 seq_printf(m,
1273 "udccs0 %02X =%s%s%s%s%s%s%s%s\n", tmp,
1274 (tmp & UDCCS0_SA) ? " sa" : "",
1275 (tmp & UDCCS0_RNE) ? " rne" : "",
1276 (tmp & UDCCS0_FST) ? " fst" : "",
1277 (tmp & UDCCS0_SST) ? " sst" : "",
1278 (tmp & UDCCS0_DRWF) ? " dwrf" : "",
1279 (tmp & UDCCS0_FTF) ? " ftf" : "",
1280 (tmp & UDCCS0_IPR) ? " ipr" : "",
1281 (tmp & UDCCS0_OPR) ? " opr" : "");
1282
1283 if (dev->has_cfr) {
1284 tmp = udc_get_reg(dev, UDCCFR);
1285 seq_printf(m,
1286 "udccfr %02X =%s%s\n", tmp,
1287 (tmp & UDCCFR_AREN) ? " aren" : "",
1288 (tmp & UDCCFR_ACM) ? " acm" : "");
1289 }
1290
1291 if (dev->gadget.speed != USB_SPEED_FULL || !dev->driver)
1292 goto done;
1293
1294 seq_printf(m, "ep0 IN %lu/%lu, OUT %lu/%lu\nirqs %lu\n\n",
1295 dev->stats.write.bytes, dev->stats.write.ops,
1296 dev->stats.read.bytes, dev->stats.read.ops,
1297 dev->stats.irqs);
1298
1299 /* dump endpoint queues */
1300 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1301 struct pxa25x_ep *ep = &dev->ep [i];
1302 struct pxa25x_request *req;
1303
1304 if (i != 0) {
1305 const struct usb_endpoint_descriptor *desc;
1306
1307 desc = ep->ep.desc;
1308 if (!desc)
1309 continue;
1310 tmp = udc_ep_get_UDCCS(&dev->ep[i]);
1311 seq_printf(m,
1312 "%s max %d %s udccs %02x irqs %lu\n",
1313 ep->ep.name, usb_endpoint_maxp(desc),
1314 "pio", tmp, ep->pio_irqs);
1315 /* TODO translate all five groups of udccs bits! */
1316
1317 } else /* ep0 should only have one transfer queued */
1318 seq_printf(m, "ep0 max 16 pio irqs %lu\n",
1319 ep->pio_irqs);
1320
1321 if (list_empty(&ep->queue)) {
1322 seq_printf(m, "\t(nothing queued)\n");
1323 continue;
1324 }
1325 list_for_each_entry(req, &ep->queue, queue) {
1326 seq_printf(m,
1327 "\treq %p len %d/%d buf %p\n",
1328 &req->req, req->req.actual,
1329 req->req.length, req->req.buf);
1330 }
1331 }
1332
1333 done:
1334 local_irq_restore(flags);
1335 return 0;
1336 }
1337 DEFINE_SHOW_ATTRIBUTE(udc_debug);
1338
1339 #define create_debug_files(dev) \
1340 do { \
1341 debugfs_create_file(dev->gadget.name, \
1342 S_IRUGO, NULL, dev, &udc_debug_fops); \
1343 } while (0)
1344 #define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
1345
1346 #else /* !CONFIG_USB_GADGET_DEBUG_FILES */
1347
1348 #define create_debug_files(dev) do {} while (0)
1349 #define remove_debug_files(dev) do {} while (0)
1350
1351 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */
1352
1353 /*-------------------------------------------------------------------------*/
1354
1355 /*
1356 * udc_disable - disable USB device controller
1357 */
udc_disable(struct pxa25x_udc * dev)1358 static void udc_disable(struct pxa25x_udc *dev)
1359 {
1360 /* block all irqs */
1361 udc_set_mask_UDCCR(dev, UDCCR_SRM|UDCCR_REM);
1362 udc_set_reg(dev, UICR0, 0xff);
1363 udc_set_reg(dev, UICR1, 0xff);
1364 udc_set_reg(dev, UFNRH, UFNRH_SIM);
1365
1366 /* if hardware supports it, disconnect from usb */
1367 pullup_off();
1368
1369 udc_clear_mask_UDCCR(dev, UDCCR_UDE);
1370
1371 ep0_idle (dev);
1372 dev->gadget.speed = USB_SPEED_UNKNOWN;
1373 }
1374
1375
1376 /*
1377 * udc_reinit - initialize software state
1378 */
udc_reinit(struct pxa25x_udc * dev)1379 static void udc_reinit(struct pxa25x_udc *dev)
1380 {
1381 u32 i;
1382
1383 /* device/ep0 records init */
1384 INIT_LIST_HEAD (&dev->gadget.ep_list);
1385 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
1386 dev->ep0state = EP0_IDLE;
1387 dev->gadget.quirk_altset_not_supp = 1;
1388
1389 /* basic endpoint records init */
1390 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1391 struct pxa25x_ep *ep = &dev->ep[i];
1392
1393 if (i != 0)
1394 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
1395
1396 ep->ep.desc = NULL;
1397 ep->stopped = 0;
1398 INIT_LIST_HEAD (&ep->queue);
1399 ep->pio_irqs = 0;
1400 usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
1401 }
1402
1403 /* the rest was statically initialized, and is read-only */
1404 }
1405
1406 /* until it's enabled, this UDC should be completely invisible
1407 * to any USB host.
1408 */
udc_enable(struct pxa25x_udc * dev)1409 static void udc_enable (struct pxa25x_udc *dev)
1410 {
1411 udc_clear_mask_UDCCR(dev, UDCCR_UDE);
1412
1413 /* try to clear these bits before we enable the udc */
1414 udc_ack_int_UDCCR(dev, UDCCR_SUSIR|/*UDCCR_RSTIR|*/UDCCR_RESIR);
1415
1416 ep0_idle(dev);
1417 dev->gadget.speed = USB_SPEED_UNKNOWN;
1418 dev->stats.irqs = 0;
1419
1420 /*
1421 * sequence taken from chapter 12.5.10, PXA250 AppProcDevManual:
1422 * - enable UDC
1423 * - if RESET is already in progress, ack interrupt
1424 * - unmask reset interrupt
1425 */
1426 udc_set_mask_UDCCR(dev, UDCCR_UDE);
1427 if (!(udc_get_reg(dev, UDCCR) & UDCCR_UDA))
1428 udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
1429
1430 if (dev->has_cfr /* UDC_RES2 is defined */) {
1431 /* pxa255 (a0+) can avoid a set_config race that could
1432 * prevent gadget drivers from configuring correctly
1433 */
1434 udc_set_reg(dev, UDCCFR, UDCCFR_ACM | UDCCFR_MB1);
1435 } else {
1436 /* "USB test mode" for pxa250 errata 40-42 (stepping a0, a1)
1437 * which could result in missing packets and interrupts.
1438 * supposedly one bit per endpoint, controlling whether it
1439 * double buffers or not; ACM/AREN bits fit into the holes.
1440 * zero bits (like USIR0_IRx) disable double buffering.
1441 */
1442 udc_set_reg(dev, UDC_RES1, 0x00);
1443 udc_set_reg(dev, UDC_RES2, 0x00);
1444 }
1445
1446 /* enable suspend/resume and reset irqs */
1447 udc_clear_mask_UDCCR(dev, UDCCR_SRM | UDCCR_REM);
1448
1449 /* enable ep0 irqs */
1450 udc_set_reg(dev, UICR0, udc_get_reg(dev, UICR0) & ~UICR0_IM0);
1451
1452 /* if hardware supports it, pullup D+ and wait for reset */
1453 pullup_on();
1454 }
1455
1456
1457 /* when a driver is successfully registered, it will receive
1458 * control requests including set_configuration(), which enables
1459 * non-control requests. then usb traffic follows until a
1460 * disconnect is reported. then a host may connect again, or
1461 * the driver might get unbound.
1462 */
pxa25x_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1463 static int pxa25x_udc_start(struct usb_gadget *g,
1464 struct usb_gadget_driver *driver)
1465 {
1466 struct pxa25x_udc *dev = to_pxa25x(g);
1467 int retval;
1468
1469 /* first hook up the driver ... */
1470 dev->driver = driver;
1471 dev->pullup = 1;
1472
1473 /* ... then enable host detection and ep0; and we're ready
1474 * for set_configuration as well as eventual disconnect.
1475 */
1476 /* connect to bus through transceiver */
1477 if (!IS_ERR_OR_NULL(dev->transceiver)) {
1478 retval = otg_set_peripheral(dev->transceiver->otg,
1479 &dev->gadget);
1480 if (retval)
1481 goto bind_fail;
1482 }
1483
1484 dump_state(dev);
1485 return 0;
1486 bind_fail:
1487 return retval;
1488 }
1489
1490 static void
reset_gadget(struct pxa25x_udc * dev,struct usb_gadget_driver * driver)1491 reset_gadget(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
1492 {
1493 int i;
1494
1495 /* don't disconnect drivers more than once */
1496 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1497 driver = NULL;
1498 dev->gadget.speed = USB_SPEED_UNKNOWN;
1499
1500 /* prevent new request submissions, kill any outstanding requests */
1501 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1502 struct pxa25x_ep *ep = &dev->ep[i];
1503
1504 ep->stopped = 1;
1505 nuke(ep, -ESHUTDOWN);
1506 }
1507 del_timer_sync(&dev->timer);
1508
1509 /* report reset; the driver is already quiesced */
1510 if (driver)
1511 usb_gadget_udc_reset(&dev->gadget, driver);
1512
1513 /* re-init driver-visible data structures */
1514 udc_reinit(dev);
1515 }
1516
1517 static void
stop_activity(struct pxa25x_udc * dev,struct usb_gadget_driver * driver)1518 stop_activity(struct pxa25x_udc *dev, struct usb_gadget_driver *driver)
1519 {
1520 int i;
1521
1522 /* don't disconnect drivers more than once */
1523 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1524 driver = NULL;
1525 dev->gadget.speed = USB_SPEED_UNKNOWN;
1526
1527 /* prevent new request submissions, kill any outstanding requests */
1528 for (i = 0; i < PXA_UDC_NUM_ENDPOINTS; i++) {
1529 struct pxa25x_ep *ep = &dev->ep[i];
1530
1531 ep->stopped = 1;
1532 nuke(ep, -ESHUTDOWN);
1533 }
1534 del_timer_sync(&dev->timer);
1535
1536 /* report disconnect; the driver is already quiesced */
1537 if (driver)
1538 driver->disconnect(&dev->gadget);
1539
1540 /* re-init driver-visible data structures */
1541 udc_reinit(dev);
1542 }
1543
pxa25x_udc_stop(struct usb_gadget * g)1544 static int pxa25x_udc_stop(struct usb_gadget*g)
1545 {
1546 struct pxa25x_udc *dev = to_pxa25x(g);
1547
1548 local_irq_disable();
1549 dev->pullup = 0;
1550 stop_activity(dev, NULL);
1551 local_irq_enable();
1552
1553 if (!IS_ERR_OR_NULL(dev->transceiver))
1554 (void) otg_set_peripheral(dev->transceiver->otg, NULL);
1555
1556 dev->driver = NULL;
1557
1558 dump_state(dev);
1559
1560 return 0;
1561 }
1562
1563 /*-------------------------------------------------------------------------*/
1564
1565 #ifdef CONFIG_ARCH_LUBBOCK
1566
1567 /* Lubbock has separate connect and disconnect irqs. More typical designs
1568 * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
1569 */
1570
1571 static irqreturn_t
lubbock_vbus_irq(int irq,void * _dev)1572 lubbock_vbus_irq(int irq, void *_dev)
1573 {
1574 struct pxa25x_udc *dev = _dev;
1575 int vbus;
1576
1577 dev->stats.irqs++;
1578 switch (irq) {
1579 case LUBBOCK_USB_IRQ:
1580 vbus = 1;
1581 disable_irq(LUBBOCK_USB_IRQ);
1582 enable_irq(LUBBOCK_USB_DISC_IRQ);
1583 break;
1584 case LUBBOCK_USB_DISC_IRQ:
1585 vbus = 0;
1586 disable_irq(LUBBOCK_USB_DISC_IRQ);
1587 enable_irq(LUBBOCK_USB_IRQ);
1588 break;
1589 default:
1590 return IRQ_NONE;
1591 }
1592
1593 pxa25x_udc_vbus_session(&dev->gadget, vbus);
1594 return IRQ_HANDLED;
1595 }
1596
1597 #endif
1598
1599
1600 /*-------------------------------------------------------------------------*/
1601
clear_ep_state(struct pxa25x_udc * dev)1602 static inline void clear_ep_state (struct pxa25x_udc *dev)
1603 {
1604 unsigned i;
1605
1606 /* hardware SET_{CONFIGURATION,INTERFACE} automagic resets endpoint
1607 * fifos, and pending transactions mustn't be continued in any case.
1608 */
1609 for (i = 1; i < PXA_UDC_NUM_ENDPOINTS; i++)
1610 nuke(&dev->ep[i], -ECONNABORTED);
1611 }
1612
udc_watchdog(struct timer_list * t)1613 static void udc_watchdog(struct timer_list *t)
1614 {
1615 struct pxa25x_udc *dev = from_timer(dev, t, timer);
1616
1617 local_irq_disable();
1618 if (dev->ep0state == EP0_STALL
1619 && (udc_ep0_get_UDCCS(dev) & UDCCS0_FST) == 0
1620 && (udc_ep0_get_UDCCS(dev) & UDCCS0_SST) == 0) {
1621 udc_ep0_set_UDCCS(dev, UDCCS0_FST|UDCCS0_FTF);
1622 DBG(DBG_VERBOSE, "ep0 re-stall\n");
1623 start_watchdog(dev);
1624 }
1625 local_irq_enable();
1626 }
1627
handle_ep0(struct pxa25x_udc * dev)1628 static void handle_ep0 (struct pxa25x_udc *dev)
1629 {
1630 u32 udccs0 = udc_ep0_get_UDCCS(dev);
1631 struct pxa25x_ep *ep = &dev->ep [0];
1632 struct pxa25x_request *req;
1633 union {
1634 struct usb_ctrlrequest r;
1635 u8 raw [8];
1636 u32 word [2];
1637 } u;
1638
1639 if (list_empty(&ep->queue))
1640 req = NULL;
1641 else
1642 req = list_entry(ep->queue.next, struct pxa25x_request, queue);
1643
1644 /* clear stall status */
1645 if (udccs0 & UDCCS0_SST) {
1646 nuke(ep, -EPIPE);
1647 udc_ep0_set_UDCCS(dev, UDCCS0_SST);
1648 del_timer(&dev->timer);
1649 ep0_idle(dev);
1650 }
1651
1652 /* previous request unfinished? non-error iff back-to-back ... */
1653 if ((udccs0 & UDCCS0_SA) != 0 && dev->ep0state != EP0_IDLE) {
1654 nuke(ep, 0);
1655 del_timer(&dev->timer);
1656 ep0_idle(dev);
1657 }
1658
1659 switch (dev->ep0state) {
1660 case EP0_IDLE:
1661 /* late-breaking status? */
1662 udccs0 = udc_ep0_get_UDCCS(dev);
1663
1664 /* start control request? */
1665 if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))
1666 == (UDCCS0_OPR|UDCCS0_SA|UDCCS0_RNE))) {
1667 int i;
1668
1669 nuke (ep, -EPROTO);
1670
1671 /* read SETUP packet */
1672 for (i = 0; i < 8; i++) {
1673 if (unlikely(!(udc_ep0_get_UDCCS(dev) & UDCCS0_RNE))) {
1674 bad_setup:
1675 DMSG("SETUP %d!\n", i);
1676 goto stall;
1677 }
1678 u.raw [i] = (u8) UDDR0;
1679 }
1680 if (unlikely((udc_ep0_get_UDCCS(dev) & UDCCS0_RNE) != 0))
1681 goto bad_setup;
1682
1683 got_setup:
1684 DBG(DBG_VERBOSE, "SETUP %02x.%02x v%04x i%04x l%04x\n",
1685 u.r.bRequestType, u.r.bRequest,
1686 le16_to_cpu(u.r.wValue),
1687 le16_to_cpu(u.r.wIndex),
1688 le16_to_cpu(u.r.wLength));
1689
1690 /* cope with automagic for some standard requests. */
1691 dev->req_std = (u.r.bRequestType & USB_TYPE_MASK)
1692 == USB_TYPE_STANDARD;
1693 dev->req_config = 0;
1694 dev->req_pending = 1;
1695 switch (u.r.bRequest) {
1696 /* hardware restricts gadget drivers here! */
1697 case USB_REQ_SET_CONFIGURATION:
1698 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1699 /* reflect hardware's automagic
1700 * up to the gadget driver.
1701 */
1702 config_change:
1703 dev->req_config = 1;
1704 clear_ep_state(dev);
1705 /* if !has_cfr, there's no synch
1706 * else use AREN (later) not SA|OPR
1707 * USIR0_IR0 acts edge sensitive
1708 */
1709 }
1710 break;
1711 /* ... and here, even more ... */
1712 case USB_REQ_SET_INTERFACE:
1713 if (u.r.bRequestType == USB_RECIP_INTERFACE) {
1714 /* udc hardware is broken by design:
1715 * - altsetting may only be zero;
1716 * - hw resets all interfaces' eps;
1717 * - ep reset doesn't include halt(?).
1718 */
1719 DMSG("broken set_interface (%d/%d)\n",
1720 le16_to_cpu(u.r.wIndex),
1721 le16_to_cpu(u.r.wValue));
1722 goto config_change;
1723 }
1724 break;
1725 /* hardware was supposed to hide this */
1726 case USB_REQ_SET_ADDRESS:
1727 if (u.r.bRequestType == USB_RECIP_DEVICE) {
1728 ep0start(dev, 0, "address");
1729 return;
1730 }
1731 break;
1732 }
1733
1734 if (u.r.bRequestType & USB_DIR_IN)
1735 dev->ep0state = EP0_IN_DATA_PHASE;
1736 else
1737 dev->ep0state = EP0_OUT_DATA_PHASE;
1738
1739 i = dev->driver->setup(&dev->gadget, &u.r);
1740 if (i < 0) {
1741 /* hardware automagic preventing STALL... */
1742 if (dev->req_config) {
1743 /* hardware sometimes neglects to tell
1744 * tell us about config change events,
1745 * so later ones may fail...
1746 */
1747 WARNING("config change %02x fail %d?\n",
1748 u.r.bRequest, i);
1749 return;
1750 /* TODO experiment: if has_cfr,
1751 * hardware didn't ACK; maybe we
1752 * could actually STALL!
1753 */
1754 }
1755 DBG(DBG_VERBOSE, "protocol STALL, "
1756 "%02x err %d\n", udc_ep0_get_UDCCS(dev), i);
1757 stall:
1758 /* the watchdog timer helps deal with cases
1759 * where udc seems to clear FST wrongly, and
1760 * then NAKs instead of STALLing.
1761 */
1762 ep0start(dev, UDCCS0_FST|UDCCS0_FTF, "stall");
1763 start_watchdog(dev);
1764 dev->ep0state = EP0_STALL;
1765
1766 /* deferred i/o == no response yet */
1767 } else if (dev->req_pending) {
1768 if (likely(dev->ep0state == EP0_IN_DATA_PHASE
1769 || dev->req_std || u.r.wLength))
1770 ep0start(dev, 0, "defer");
1771 else
1772 ep0start(dev, UDCCS0_IPR, "defer/IPR");
1773 }
1774
1775 /* expect at least one data or status stage irq */
1776 return;
1777
1778 } else if (likely((udccs0 & (UDCCS0_OPR|UDCCS0_SA))
1779 == (UDCCS0_OPR|UDCCS0_SA))) {
1780 unsigned i;
1781
1782 /* pxa210/250 erratum 131 for B0/B1 says RNE lies.
1783 * still observed on a pxa255 a0.
1784 */
1785 DBG(DBG_VERBOSE, "e131\n");
1786 nuke(ep, -EPROTO);
1787
1788 /* read SETUP data, but don't trust it too much */
1789 for (i = 0; i < 8; i++)
1790 u.raw [i] = (u8) UDDR0;
1791 if ((u.r.bRequestType & USB_RECIP_MASK)
1792 > USB_RECIP_OTHER)
1793 goto stall;
1794 if (u.word [0] == 0 && u.word [1] == 0)
1795 goto stall;
1796 goto got_setup;
1797 } else {
1798 /* some random early IRQ:
1799 * - we acked FST
1800 * - IPR cleared
1801 * - OPR got set, without SA (likely status stage)
1802 */
1803 udc_ep0_set_UDCCS(dev, udccs0 & (UDCCS0_SA|UDCCS0_OPR));
1804 }
1805 break;
1806 case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
1807 if (udccs0 & UDCCS0_OPR) {
1808 udc_ep0_set_UDCCS(dev, UDCCS0_OPR|UDCCS0_FTF);
1809 DBG(DBG_VERBOSE, "ep0in premature status\n");
1810 if (req)
1811 done(ep, req, 0);
1812 ep0_idle(dev);
1813 } else /* irq was IPR clearing */ {
1814 if (req) {
1815 /* this IN packet might finish the request */
1816 (void) write_ep0_fifo(ep, req);
1817 } /* else IN token before response was written */
1818 }
1819 break;
1820 case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
1821 if (udccs0 & UDCCS0_OPR) {
1822 if (req) {
1823 /* this OUT packet might finish the request */
1824 if (read_ep0_fifo(ep, req))
1825 done(ep, req, 0);
1826 /* else more OUT packets expected */
1827 } /* else OUT token before read was issued */
1828 } else /* irq was IPR clearing */ {
1829 DBG(DBG_VERBOSE, "ep0out premature status\n");
1830 if (req)
1831 done(ep, req, 0);
1832 ep0_idle(dev);
1833 }
1834 break;
1835 case EP0_END_XFER:
1836 if (req)
1837 done(ep, req, 0);
1838 /* ack control-IN status (maybe in-zlp was skipped)
1839 * also appears after some config change events.
1840 */
1841 if (udccs0 & UDCCS0_OPR)
1842 udc_ep0_set_UDCCS(dev, UDCCS0_OPR);
1843 ep0_idle(dev);
1844 break;
1845 case EP0_STALL:
1846 udc_ep0_set_UDCCS(dev, UDCCS0_FST);
1847 break;
1848 }
1849 udc_set_reg(dev, USIR0, USIR0_IR0);
1850 }
1851
handle_ep(struct pxa25x_ep * ep)1852 static void handle_ep(struct pxa25x_ep *ep)
1853 {
1854 struct pxa25x_request *req;
1855 int is_in = ep->bEndpointAddress & USB_DIR_IN;
1856 int completed;
1857 u32 udccs, tmp;
1858
1859 do {
1860 completed = 0;
1861 if (likely (!list_empty(&ep->queue)))
1862 req = list_entry(ep->queue.next,
1863 struct pxa25x_request, queue);
1864 else
1865 req = NULL;
1866
1867 // TODO check FST handling
1868
1869 udccs = udc_ep_get_UDCCS(ep);
1870 if (unlikely(is_in)) { /* irq from TPC, SST, or (ISO) TUR */
1871 tmp = UDCCS_BI_TUR;
1872 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1873 tmp |= UDCCS_BI_SST;
1874 tmp &= udccs;
1875 if (likely (tmp))
1876 udc_ep_set_UDCCS(ep, tmp);
1877 if (req && likely ((udccs & UDCCS_BI_TFS) != 0))
1878 completed = write_fifo(ep, req);
1879
1880 } else { /* irq from RPC (or for ISO, ROF) */
1881 if (likely(ep->bmAttributes == USB_ENDPOINT_XFER_BULK))
1882 tmp = UDCCS_BO_SST | UDCCS_BO_DME;
1883 else
1884 tmp = UDCCS_IO_ROF | UDCCS_IO_DME;
1885 tmp &= udccs;
1886 if (likely(tmp))
1887 udc_ep_set_UDCCS(ep, tmp);
1888
1889 /* fifos can hold packets, ready for reading... */
1890 if (likely(req)) {
1891 completed = read_fifo(ep, req);
1892 } else
1893 pio_irq_disable(ep);
1894 }
1895 ep->pio_irqs++;
1896 } while (completed);
1897 }
1898
1899 /*
1900 * pxa25x_udc_irq - interrupt handler
1901 *
1902 * avoid delays in ep0 processing. the control handshaking isn't always
1903 * under software control (pxa250c0 and the pxa255 are better), and delays
1904 * could cause usb protocol errors.
1905 */
1906 static irqreturn_t
pxa25x_udc_irq(int irq,void * _dev)1907 pxa25x_udc_irq(int irq, void *_dev)
1908 {
1909 struct pxa25x_udc *dev = _dev;
1910 int handled;
1911
1912 dev->stats.irqs++;
1913 do {
1914 u32 udccr = udc_get_reg(dev, UDCCR);
1915
1916 handled = 0;
1917
1918 /* SUSpend Interrupt Request */
1919 if (unlikely(udccr & UDCCR_SUSIR)) {
1920 udc_ack_int_UDCCR(dev, UDCCR_SUSIR);
1921 handled = 1;
1922 DBG(DBG_VERBOSE, "USB suspend\n");
1923
1924 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1925 && dev->driver
1926 && dev->driver->suspend)
1927 dev->driver->suspend(&dev->gadget);
1928 ep0_idle (dev);
1929 }
1930
1931 /* RESume Interrupt Request */
1932 if (unlikely(udccr & UDCCR_RESIR)) {
1933 udc_ack_int_UDCCR(dev, UDCCR_RESIR);
1934 handled = 1;
1935 DBG(DBG_VERBOSE, "USB resume\n");
1936
1937 if (dev->gadget.speed != USB_SPEED_UNKNOWN
1938 && dev->driver
1939 && dev->driver->resume)
1940 dev->driver->resume(&dev->gadget);
1941 }
1942
1943 /* ReSeT Interrupt Request - USB reset */
1944 if (unlikely(udccr & UDCCR_RSTIR)) {
1945 udc_ack_int_UDCCR(dev, UDCCR_RSTIR);
1946 handled = 1;
1947
1948 if ((udc_get_reg(dev, UDCCR) & UDCCR_UDA) == 0) {
1949 DBG(DBG_VERBOSE, "USB reset start\n");
1950
1951 /* reset driver and endpoints,
1952 * in case that's not yet done
1953 */
1954 reset_gadget(dev, dev->driver);
1955
1956 } else {
1957 DBG(DBG_VERBOSE, "USB reset end\n");
1958 dev->gadget.speed = USB_SPEED_FULL;
1959 memset(&dev->stats, 0, sizeof dev->stats);
1960 /* driver and endpoints are still reset */
1961 }
1962
1963 } else {
1964 u32 usir0 = udc_get_reg(dev, USIR0) &
1965 ~udc_get_reg(dev, UICR0);
1966 u32 usir1 = udc_get_reg(dev, USIR1) &
1967 ~udc_get_reg(dev, UICR1);
1968 int i;
1969
1970 if (unlikely (!usir0 && !usir1))
1971 continue;
1972
1973 DBG(DBG_VERY_NOISY, "irq %02x.%02x\n", usir1, usir0);
1974
1975 /* control traffic */
1976 if (usir0 & USIR0_IR0) {
1977 dev->ep[0].pio_irqs++;
1978 handle_ep0(dev);
1979 handled = 1;
1980 }
1981
1982 /* endpoint data transfers */
1983 for (i = 0; i < 8; i++) {
1984 u32 tmp = 1 << i;
1985
1986 if (i && (usir0 & tmp)) {
1987 handle_ep(&dev->ep[i]);
1988 udc_set_reg(dev, USIR0,
1989 udc_get_reg(dev, USIR0) | tmp);
1990 handled = 1;
1991 }
1992 #ifndef CONFIG_USB_PXA25X_SMALL
1993 if (usir1 & tmp) {
1994 handle_ep(&dev->ep[i+8]);
1995 udc_set_reg(dev, USIR1,
1996 udc_get_reg(dev, USIR1) | tmp);
1997 handled = 1;
1998 }
1999 #endif
2000 }
2001 }
2002
2003 /* we could also ask for 1 msec SOF (SIR) interrupts */
2004
2005 } while (handled);
2006 return IRQ_HANDLED;
2007 }
2008
2009 /*-------------------------------------------------------------------------*/
2010
nop_release(struct device * dev)2011 static void nop_release (struct device *dev)
2012 {
2013 DMSG("%s %s\n", __func__, dev_name(dev));
2014 }
2015
2016 /* this uses load-time allocation and initialization (instead of
2017 * doing it at run-time) to save code, eliminate fault paths, and
2018 * be more obviously correct.
2019 */
2020 static struct pxa25x_udc memory = {
2021 .gadget = {
2022 .ops = &pxa25x_udc_ops,
2023 .ep0 = &memory.ep[0].ep,
2024 .name = driver_name,
2025 .dev = {
2026 .init_name = "gadget",
2027 .release = nop_release,
2028 },
2029 },
2030
2031 /* control endpoint */
2032 .ep[0] = {
2033 .ep = {
2034 .name = ep0name,
2035 .ops = &pxa25x_ep_ops,
2036 .maxpacket = EP0_FIFO_SIZE,
2037 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
2038 USB_EP_CAPS_DIR_ALL),
2039 },
2040 .dev = &memory,
2041 .regoff_udccs = UDCCS0,
2042 .regoff_uddr = UDDR0,
2043 },
2044
2045 /* first group of endpoints */
2046 .ep[1] = {
2047 .ep = {
2048 .name = "ep1in-bulk",
2049 .ops = &pxa25x_ep_ops,
2050 .maxpacket = BULK_FIFO_SIZE,
2051 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2052 USB_EP_CAPS_DIR_IN),
2053 },
2054 .dev = &memory,
2055 .fifo_size = BULK_FIFO_SIZE,
2056 .bEndpointAddress = USB_DIR_IN | 1,
2057 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2058 .regoff_udccs = UDCCS1,
2059 .regoff_uddr = UDDR1,
2060 },
2061 .ep[2] = {
2062 .ep = {
2063 .name = "ep2out-bulk",
2064 .ops = &pxa25x_ep_ops,
2065 .maxpacket = BULK_FIFO_SIZE,
2066 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2067 USB_EP_CAPS_DIR_OUT),
2068 },
2069 .dev = &memory,
2070 .fifo_size = BULK_FIFO_SIZE,
2071 .bEndpointAddress = 2,
2072 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2073 .regoff_udccs = UDCCS2,
2074 .regoff_ubcr = UBCR2,
2075 .regoff_uddr = UDDR2,
2076 },
2077 #ifndef CONFIG_USB_PXA25X_SMALL
2078 .ep[3] = {
2079 .ep = {
2080 .name = "ep3in-iso",
2081 .ops = &pxa25x_ep_ops,
2082 .maxpacket = ISO_FIFO_SIZE,
2083 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2084 USB_EP_CAPS_DIR_IN),
2085 },
2086 .dev = &memory,
2087 .fifo_size = ISO_FIFO_SIZE,
2088 .bEndpointAddress = USB_DIR_IN | 3,
2089 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2090 .regoff_udccs = UDCCS3,
2091 .regoff_uddr = UDDR3,
2092 },
2093 .ep[4] = {
2094 .ep = {
2095 .name = "ep4out-iso",
2096 .ops = &pxa25x_ep_ops,
2097 .maxpacket = ISO_FIFO_SIZE,
2098 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2099 USB_EP_CAPS_DIR_OUT),
2100 },
2101 .dev = &memory,
2102 .fifo_size = ISO_FIFO_SIZE,
2103 .bEndpointAddress = 4,
2104 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2105 .regoff_udccs = UDCCS4,
2106 .regoff_ubcr = UBCR4,
2107 .regoff_uddr = UDDR4,
2108 },
2109 .ep[5] = {
2110 .ep = {
2111 .name = "ep5in-int",
2112 .ops = &pxa25x_ep_ops,
2113 .maxpacket = INT_FIFO_SIZE,
2114 .caps = USB_EP_CAPS(0, 0),
2115 },
2116 .dev = &memory,
2117 .fifo_size = INT_FIFO_SIZE,
2118 .bEndpointAddress = USB_DIR_IN | 5,
2119 .bmAttributes = USB_ENDPOINT_XFER_INT,
2120 .regoff_udccs = UDCCS5,
2121 .regoff_uddr = UDDR5,
2122 },
2123
2124 /* second group of endpoints */
2125 .ep[6] = {
2126 .ep = {
2127 .name = "ep6in-bulk",
2128 .ops = &pxa25x_ep_ops,
2129 .maxpacket = BULK_FIFO_SIZE,
2130 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2131 USB_EP_CAPS_DIR_IN),
2132 },
2133 .dev = &memory,
2134 .fifo_size = BULK_FIFO_SIZE,
2135 .bEndpointAddress = USB_DIR_IN | 6,
2136 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2137 .regoff_udccs = UDCCS6,
2138 .regoff_uddr = UDDR6,
2139 },
2140 .ep[7] = {
2141 .ep = {
2142 .name = "ep7out-bulk",
2143 .ops = &pxa25x_ep_ops,
2144 .maxpacket = BULK_FIFO_SIZE,
2145 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2146 USB_EP_CAPS_DIR_OUT),
2147 },
2148 .dev = &memory,
2149 .fifo_size = BULK_FIFO_SIZE,
2150 .bEndpointAddress = 7,
2151 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2152 .regoff_udccs = UDCCS7,
2153 .regoff_ubcr = UBCR7,
2154 .regoff_uddr = UDDR7,
2155 },
2156 .ep[8] = {
2157 .ep = {
2158 .name = "ep8in-iso",
2159 .ops = &pxa25x_ep_ops,
2160 .maxpacket = ISO_FIFO_SIZE,
2161 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2162 USB_EP_CAPS_DIR_IN),
2163 },
2164 .dev = &memory,
2165 .fifo_size = ISO_FIFO_SIZE,
2166 .bEndpointAddress = USB_DIR_IN | 8,
2167 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2168 .regoff_udccs = UDCCS8,
2169 .regoff_uddr = UDDR8,
2170 },
2171 .ep[9] = {
2172 .ep = {
2173 .name = "ep9out-iso",
2174 .ops = &pxa25x_ep_ops,
2175 .maxpacket = ISO_FIFO_SIZE,
2176 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2177 USB_EP_CAPS_DIR_OUT),
2178 },
2179 .dev = &memory,
2180 .fifo_size = ISO_FIFO_SIZE,
2181 .bEndpointAddress = 9,
2182 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2183 .regoff_udccs = UDCCS9,
2184 .regoff_ubcr = UBCR9,
2185 .regoff_uddr = UDDR9,
2186 },
2187 .ep[10] = {
2188 .ep = {
2189 .name = "ep10in-int",
2190 .ops = &pxa25x_ep_ops,
2191 .maxpacket = INT_FIFO_SIZE,
2192 .caps = USB_EP_CAPS(0, 0),
2193 },
2194 .dev = &memory,
2195 .fifo_size = INT_FIFO_SIZE,
2196 .bEndpointAddress = USB_DIR_IN | 10,
2197 .bmAttributes = USB_ENDPOINT_XFER_INT,
2198 .regoff_udccs = UDCCS10,
2199 .regoff_uddr = UDDR10,
2200 },
2201
2202 /* third group of endpoints */
2203 .ep[11] = {
2204 .ep = {
2205 .name = "ep11in-bulk",
2206 .ops = &pxa25x_ep_ops,
2207 .maxpacket = BULK_FIFO_SIZE,
2208 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2209 USB_EP_CAPS_DIR_IN),
2210 },
2211 .dev = &memory,
2212 .fifo_size = BULK_FIFO_SIZE,
2213 .bEndpointAddress = USB_DIR_IN | 11,
2214 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2215 .regoff_udccs = UDCCS11,
2216 .regoff_uddr = UDDR11,
2217 },
2218 .ep[12] = {
2219 .ep = {
2220 .name = "ep12out-bulk",
2221 .ops = &pxa25x_ep_ops,
2222 .maxpacket = BULK_FIFO_SIZE,
2223 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
2224 USB_EP_CAPS_DIR_OUT),
2225 },
2226 .dev = &memory,
2227 .fifo_size = BULK_FIFO_SIZE,
2228 .bEndpointAddress = 12,
2229 .bmAttributes = USB_ENDPOINT_XFER_BULK,
2230 .regoff_udccs = UDCCS12,
2231 .regoff_ubcr = UBCR12,
2232 .regoff_uddr = UDDR12,
2233 },
2234 .ep[13] = {
2235 .ep = {
2236 .name = "ep13in-iso",
2237 .ops = &pxa25x_ep_ops,
2238 .maxpacket = ISO_FIFO_SIZE,
2239 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2240 USB_EP_CAPS_DIR_IN),
2241 },
2242 .dev = &memory,
2243 .fifo_size = ISO_FIFO_SIZE,
2244 .bEndpointAddress = USB_DIR_IN | 13,
2245 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2246 .regoff_udccs = UDCCS13,
2247 .regoff_uddr = UDDR13,
2248 },
2249 .ep[14] = {
2250 .ep = {
2251 .name = "ep14out-iso",
2252 .ops = &pxa25x_ep_ops,
2253 .maxpacket = ISO_FIFO_SIZE,
2254 .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
2255 USB_EP_CAPS_DIR_OUT),
2256 },
2257 .dev = &memory,
2258 .fifo_size = ISO_FIFO_SIZE,
2259 .bEndpointAddress = 14,
2260 .bmAttributes = USB_ENDPOINT_XFER_ISOC,
2261 .regoff_udccs = UDCCS14,
2262 .regoff_ubcr = UBCR14,
2263 .regoff_uddr = UDDR14,
2264 },
2265 .ep[15] = {
2266 .ep = {
2267 .name = "ep15in-int",
2268 .ops = &pxa25x_ep_ops,
2269 .maxpacket = INT_FIFO_SIZE,
2270 .caps = USB_EP_CAPS(0, 0),
2271 },
2272 .dev = &memory,
2273 .fifo_size = INT_FIFO_SIZE,
2274 .bEndpointAddress = USB_DIR_IN | 15,
2275 .bmAttributes = USB_ENDPOINT_XFER_INT,
2276 .regoff_udccs = UDCCS15,
2277 .regoff_uddr = UDDR15,
2278 },
2279 #endif /* !CONFIG_USB_PXA25X_SMALL */
2280 };
2281
2282 #define CP15R0_VENDOR_MASK 0xffffe000
2283
2284 #if defined(CONFIG_ARCH_PXA)
2285 #define CP15R0_XSCALE_VALUE 0x69052000 /* intel/arm/xscale */
2286
2287 #elif defined(CONFIG_ARCH_IXP4XX)
2288 #define CP15R0_XSCALE_VALUE 0x69054000 /* intel/arm/ixp4xx */
2289
2290 #endif
2291
2292 #define CP15R0_PROD_MASK 0x000003f0
2293 #define PXA25x 0x00000100 /* and PXA26x */
2294 #define PXA210 0x00000120
2295
2296 #define CP15R0_REV_MASK 0x0000000f
2297
2298 #define CP15R0_PRODREV_MASK (CP15R0_PROD_MASK | CP15R0_REV_MASK)
2299
2300 #define PXA255_A0 0x00000106 /* or PXA260_B1 */
2301 #define PXA250_C0 0x00000105 /* or PXA26x_B0 */
2302 #define PXA250_B2 0x00000104
2303 #define PXA250_B1 0x00000103 /* or PXA260_A0 */
2304 #define PXA250_B0 0x00000102
2305 #define PXA250_A1 0x00000101
2306 #define PXA250_A0 0x00000100
2307
2308 #define PXA210_C0 0x00000125
2309 #define PXA210_B2 0x00000124
2310 #define PXA210_B1 0x00000123
2311 #define PXA210_B0 0x00000122
2312 #define IXP425_A0 0x000001c1
2313 #define IXP425_B0 0x000001f1
2314 #define IXP465_AD 0x00000200
2315
2316 /*
2317 * probe - binds to the platform device
2318 */
pxa25x_udc_probe(struct platform_device * pdev)2319 static int pxa25x_udc_probe(struct platform_device *pdev)
2320 {
2321 struct pxa25x_udc *dev = &memory;
2322 int retval, irq;
2323 u32 chiprev;
2324
2325 pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
2326
2327 /* insist on Intel/ARM/XScale */
2328 asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
2329 if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
2330 pr_err("%s: not XScale!\n", driver_name);
2331 return -ENODEV;
2332 }
2333
2334 /* trigger chiprev-specific logic */
2335 switch (chiprev & CP15R0_PRODREV_MASK) {
2336 #if defined(CONFIG_ARCH_PXA)
2337 case PXA255_A0:
2338 dev->has_cfr = 1;
2339 break;
2340 case PXA250_A0:
2341 case PXA250_A1:
2342 /* A0/A1 "not released"; ep 13, 15 unusable */
2343 fallthrough;
2344 case PXA250_B2: case PXA210_B2:
2345 case PXA250_B1: case PXA210_B1:
2346 case PXA250_B0: case PXA210_B0:
2347 /* OUT-DMA is broken ... */
2348 fallthrough;
2349 case PXA250_C0: case PXA210_C0:
2350 break;
2351 #elif defined(CONFIG_ARCH_IXP4XX)
2352 case IXP425_A0:
2353 case IXP425_B0:
2354 case IXP465_AD:
2355 dev->has_cfr = 1;
2356 break;
2357 #endif
2358 default:
2359 pr_err("%s: unrecognized processor: %08x\n",
2360 driver_name, chiprev);
2361 /* iop3xx, ixp4xx, ... */
2362 return -ENODEV;
2363 }
2364
2365 irq = platform_get_irq(pdev, 0);
2366 if (irq < 0)
2367 return -ENODEV;
2368
2369 dev->regs = devm_platform_ioremap_resource(pdev, 0);
2370 if (IS_ERR(dev->regs))
2371 return PTR_ERR(dev->regs);
2372
2373 dev->clk = devm_clk_get(&pdev->dev, NULL);
2374 if (IS_ERR(dev->clk))
2375 return PTR_ERR(dev->clk);
2376
2377 pr_debug("%s: IRQ %d%s%s\n", driver_name, irq,
2378 dev->has_cfr ? "" : " (!cfr)",
2379 SIZE_STR "(pio)"
2380 );
2381
2382 /* other non-static parts of init */
2383 dev->dev = &pdev->dev;
2384 dev->mach = dev_get_platdata(&pdev->dev);
2385
2386 dev->transceiver = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
2387
2388 if (gpio_is_valid(dev->mach->gpio_pullup)) {
2389 retval = devm_gpio_request(&pdev->dev, dev->mach->gpio_pullup,
2390 "pca25x_udc GPIO PULLUP");
2391 if (retval) {
2392 dev_dbg(&pdev->dev,
2393 "can't get pullup gpio %d, err: %d\n",
2394 dev->mach->gpio_pullup, retval);
2395 goto err;
2396 }
2397 gpio_direction_output(dev->mach->gpio_pullup, 0);
2398 }
2399
2400 timer_setup(&dev->timer, udc_watchdog, 0);
2401
2402 the_controller = dev;
2403 platform_set_drvdata(pdev, dev);
2404
2405 udc_disable(dev);
2406 udc_reinit(dev);
2407
2408 dev->vbus = 0;
2409
2410 /* irq setup after old hardware state is cleaned up */
2411 retval = devm_request_irq(&pdev->dev, irq, pxa25x_udc_irq, 0,
2412 driver_name, dev);
2413 if (retval != 0) {
2414 pr_err("%s: can't get irq %d, err %d\n",
2415 driver_name, irq, retval);
2416 goto err;
2417 }
2418 dev->got_irq = 1;
2419
2420 #ifdef CONFIG_ARCH_LUBBOCK
2421 if (machine_is_lubbock()) {
2422 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_DISC_IRQ,
2423 lubbock_vbus_irq, 0, driver_name,
2424 dev);
2425 if (retval != 0) {
2426 pr_err("%s: can't get irq %i, err %d\n",
2427 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2428 goto err;
2429 }
2430 retval = devm_request_irq(&pdev->dev, LUBBOCK_USB_IRQ,
2431 lubbock_vbus_irq, 0, driver_name,
2432 dev);
2433 if (retval != 0) {
2434 pr_err("%s: can't get irq %i, err %d\n",
2435 driver_name, LUBBOCK_USB_IRQ, retval);
2436 goto err;
2437 }
2438 } else
2439 #endif
2440 create_debug_files(dev);
2441
2442 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
2443 if (!retval)
2444 return retval;
2445
2446 remove_debug_files(dev);
2447 err:
2448 if (!IS_ERR_OR_NULL(dev->transceiver))
2449 dev->transceiver = NULL;
2450 return retval;
2451 }
2452
pxa25x_udc_shutdown(struct platform_device * _dev)2453 static void pxa25x_udc_shutdown(struct platform_device *_dev)
2454 {
2455 pullup_off();
2456 }
2457
pxa25x_udc_remove(struct platform_device * pdev)2458 static int pxa25x_udc_remove(struct platform_device *pdev)
2459 {
2460 struct pxa25x_udc *dev = platform_get_drvdata(pdev);
2461
2462 if (dev->driver)
2463 return -EBUSY;
2464
2465 usb_del_gadget_udc(&dev->gadget);
2466 dev->pullup = 0;
2467 pullup(dev);
2468
2469 remove_debug_files(dev);
2470
2471 if (!IS_ERR_OR_NULL(dev->transceiver))
2472 dev->transceiver = NULL;
2473
2474 the_controller = NULL;
2475 return 0;
2476 }
2477
2478 /*-------------------------------------------------------------------------*/
2479
2480 #ifdef CONFIG_PM
2481
2482 /* USB suspend (controlled by the host) and system suspend (controlled
2483 * by the PXA) don't necessarily work well together. If USB is active,
2484 * the 48 MHz clock is required; so the system can't enter 33 MHz idle
2485 * mode, or any deeper PM saving state.
2486 *
2487 * For now, we punt and forcibly disconnect from the USB host when PXA
2488 * enters any suspend state. While we're disconnected, we always disable
2489 * the 48MHz USB clock ... allowing PXA sleep and/or 33 MHz idle states.
2490 * Boards without software pullup control shouldn't use those states.
2491 * VBUS IRQs should probably be ignored so that the PXA device just acts
2492 * "dead" to USB hosts until system resume.
2493 */
pxa25x_udc_suspend(struct platform_device * dev,pm_message_t state)2494 static int pxa25x_udc_suspend(struct platform_device *dev, pm_message_t state)
2495 {
2496 struct pxa25x_udc *udc = platform_get_drvdata(dev);
2497 unsigned long flags;
2498
2499 if (!gpio_is_valid(udc->mach->gpio_pullup) && !udc->mach->udc_command)
2500 WARNING("USB host won't detect disconnect!\n");
2501 udc->suspended = 1;
2502
2503 local_irq_save(flags);
2504 pullup(udc);
2505 local_irq_restore(flags);
2506
2507 return 0;
2508 }
2509
pxa25x_udc_resume(struct platform_device * dev)2510 static int pxa25x_udc_resume(struct platform_device *dev)
2511 {
2512 struct pxa25x_udc *udc = platform_get_drvdata(dev);
2513 unsigned long flags;
2514
2515 udc->suspended = 0;
2516 local_irq_save(flags);
2517 pullup(udc);
2518 local_irq_restore(flags);
2519
2520 return 0;
2521 }
2522
2523 #else
2524 #define pxa25x_udc_suspend NULL
2525 #define pxa25x_udc_resume NULL
2526 #endif
2527
2528 /*-------------------------------------------------------------------------*/
2529
2530 static struct platform_driver udc_driver = {
2531 .shutdown = pxa25x_udc_shutdown,
2532 .probe = pxa25x_udc_probe,
2533 .remove = pxa25x_udc_remove,
2534 .suspend = pxa25x_udc_suspend,
2535 .resume = pxa25x_udc_resume,
2536 .driver = {
2537 .name = "pxa25x-udc",
2538 },
2539 };
2540
2541 module_platform_driver(udc_driver);
2542
2543 MODULE_DESCRIPTION(DRIVER_DESC);
2544 MODULE_AUTHOR("Frank Becker, Robert Schwebel, David Brownell");
2545 MODULE_LICENSE("GPL");
2546 MODULE_ALIAS("platform:pxa25x-udc");
2547