1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/delay.h>
10 #include <linux/dmi.h>
11 #include <linux/errno.h>
12 #include <linux/gpio/consumer.h>
13 #include <linux/gpio/machine.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/usb/ch9.h>
17 #include <linux/usb/gadget.h>
18 #include <linux/irq.h>
19
20 #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
21 #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */
22
23 /* Address offset of Registers */
24 #define UDC_EP_REG_SHIFT 0x20 /* Offset to next EP */
25
26 #define UDC_EPCTL_ADDR 0x00 /* Endpoint control */
27 #define UDC_EPSTS_ADDR 0x04 /* Endpoint status */
28 #define UDC_BUFIN_FRAMENUM_ADDR 0x08 /* buffer size in / frame number out */
29 #define UDC_BUFOUT_MAXPKT_ADDR 0x0C /* buffer size out / maxpkt in */
30 #define UDC_SUBPTR_ADDR 0x10 /* setup buffer pointer */
31 #define UDC_DESPTR_ADDR 0x14 /* Data descriptor pointer */
32 #define UDC_CONFIRM_ADDR 0x18 /* Write/Read confirmation */
33
34 #define UDC_DEVCFG_ADDR 0x400 /* Device configuration */
35 #define UDC_DEVCTL_ADDR 0x404 /* Device control */
36 #define UDC_DEVSTS_ADDR 0x408 /* Device status */
37 #define UDC_DEVIRQSTS_ADDR 0x40C /* Device irq status */
38 #define UDC_DEVIRQMSK_ADDR 0x410 /* Device irq mask */
39 #define UDC_EPIRQSTS_ADDR 0x414 /* Endpoint irq status */
40 #define UDC_EPIRQMSK_ADDR 0x418 /* Endpoint irq mask */
41 #define UDC_DEVLPM_ADDR 0x41C /* LPM control / status */
42 #define UDC_CSR_BUSY_ADDR 0x4f0 /* UDC_CSR_BUSY Status register */
43 #define UDC_SRST_ADDR 0x4fc /* SOFT RESET register */
44 #define UDC_CSR_ADDR 0x500 /* USB_DEVICE endpoint register */
45
46 /* Endpoint control register */
47 /* Bit position */
48 #define UDC_EPCTL_MRXFLUSH (1 << 12)
49 #define UDC_EPCTL_RRDY (1 << 9)
50 #define UDC_EPCTL_CNAK (1 << 8)
51 #define UDC_EPCTL_SNAK (1 << 7)
52 #define UDC_EPCTL_NAK (1 << 6)
53 #define UDC_EPCTL_P (1 << 3)
54 #define UDC_EPCTL_F (1 << 1)
55 #define UDC_EPCTL_S (1 << 0)
56 #define UDC_EPCTL_ET_SHIFT 4
57 /* Mask patern */
58 #define UDC_EPCTL_ET_MASK 0x00000030
59 /* Value for ET field */
60 #define UDC_EPCTL_ET_CONTROL 0
61 #define UDC_EPCTL_ET_ISO 1
62 #define UDC_EPCTL_ET_BULK 2
63 #define UDC_EPCTL_ET_INTERRUPT 3
64
65 /* Endpoint status register */
66 /* Bit position */
67 #define UDC_EPSTS_XFERDONE (1 << 27)
68 #define UDC_EPSTS_RSS (1 << 26)
69 #define UDC_EPSTS_RCS (1 << 25)
70 #define UDC_EPSTS_TXEMPTY (1 << 24)
71 #define UDC_EPSTS_TDC (1 << 10)
72 #define UDC_EPSTS_HE (1 << 9)
73 #define UDC_EPSTS_MRXFIFO_EMP (1 << 8)
74 #define UDC_EPSTS_BNA (1 << 7)
75 #define UDC_EPSTS_IN (1 << 6)
76 #define UDC_EPSTS_OUT_SHIFT 4
77 /* Mask patern */
78 #define UDC_EPSTS_OUT_MASK 0x00000030
79 #define UDC_EPSTS_ALL_CLR_MASK 0x1F0006F0
80 /* Value for OUT field */
81 #define UDC_EPSTS_OUT_SETUP 2
82 #define UDC_EPSTS_OUT_DATA 1
83
84 /* Device configuration register */
85 /* Bit position */
86 #define UDC_DEVCFG_CSR_PRG (1 << 17)
87 #define UDC_DEVCFG_SP (1 << 3)
88 /* SPD Valee */
89 #define UDC_DEVCFG_SPD_HS 0x0
90 #define UDC_DEVCFG_SPD_FS 0x1
91 #define UDC_DEVCFG_SPD_LS 0x2
92
93 /* Device control register */
94 /* Bit position */
95 #define UDC_DEVCTL_THLEN_SHIFT 24
96 #define UDC_DEVCTL_BRLEN_SHIFT 16
97 #define UDC_DEVCTL_CSR_DONE (1 << 13)
98 #define UDC_DEVCTL_SD (1 << 10)
99 #define UDC_DEVCTL_MODE (1 << 9)
100 #define UDC_DEVCTL_BREN (1 << 8)
101 #define UDC_DEVCTL_THE (1 << 7)
102 #define UDC_DEVCTL_DU (1 << 4)
103 #define UDC_DEVCTL_TDE (1 << 3)
104 #define UDC_DEVCTL_RDE (1 << 2)
105 #define UDC_DEVCTL_RES (1 << 0)
106
107 /* Device status register */
108 /* Bit position */
109 #define UDC_DEVSTS_TS_SHIFT 18
110 #define UDC_DEVSTS_ENUM_SPEED_SHIFT 13
111 #define UDC_DEVSTS_ALT_SHIFT 8
112 #define UDC_DEVSTS_INTF_SHIFT 4
113 #define UDC_DEVSTS_CFG_SHIFT 0
114 /* Mask patern */
115 #define UDC_DEVSTS_TS_MASK 0xfffc0000
116 #define UDC_DEVSTS_ENUM_SPEED_MASK 0x00006000
117 #define UDC_DEVSTS_ALT_MASK 0x00000f00
118 #define UDC_DEVSTS_INTF_MASK 0x000000f0
119 #define UDC_DEVSTS_CFG_MASK 0x0000000f
120 /* value for maximum speed for SPEED field */
121 #define UDC_DEVSTS_ENUM_SPEED_FULL 1
122 #define UDC_DEVSTS_ENUM_SPEED_HIGH 0
123 #define UDC_DEVSTS_ENUM_SPEED_LOW 2
124 #define UDC_DEVSTS_ENUM_SPEED_FULLX 3
125
126 /* Device irq register */
127 /* Bit position */
128 #define UDC_DEVINT_RWKP (1 << 7)
129 #define UDC_DEVINT_ENUM (1 << 6)
130 #define UDC_DEVINT_SOF (1 << 5)
131 #define UDC_DEVINT_US (1 << 4)
132 #define UDC_DEVINT_UR (1 << 3)
133 #define UDC_DEVINT_ES (1 << 2)
134 #define UDC_DEVINT_SI (1 << 1)
135 #define UDC_DEVINT_SC (1 << 0)
136 /* Mask patern */
137 #define UDC_DEVINT_MSK 0x7f
138
139 /* Endpoint irq register */
140 /* Bit position */
141 #define UDC_EPINT_IN_SHIFT 0
142 #define UDC_EPINT_OUT_SHIFT 16
143 #define UDC_EPINT_IN_EP0 (1 << 0)
144 #define UDC_EPINT_OUT_EP0 (1 << 16)
145 /* Mask patern */
146 #define UDC_EPINT_MSK_DISABLE_ALL 0xffffffff
147
148 /* UDC_CSR_BUSY Status register */
149 /* Bit position */
150 #define UDC_CSR_BUSY (1 << 0)
151
152 /* SOFT RESET register */
153 /* Bit position */
154 #define UDC_PSRST (1 << 1)
155 #define UDC_SRST (1 << 0)
156
157 /* USB_DEVICE endpoint register */
158 /* Bit position */
159 #define UDC_CSR_NE_NUM_SHIFT 0
160 #define UDC_CSR_NE_DIR_SHIFT 4
161 #define UDC_CSR_NE_TYPE_SHIFT 5
162 #define UDC_CSR_NE_CFG_SHIFT 7
163 #define UDC_CSR_NE_INTF_SHIFT 11
164 #define UDC_CSR_NE_ALT_SHIFT 15
165 #define UDC_CSR_NE_MAX_PKT_SHIFT 19
166 /* Mask patern */
167 #define UDC_CSR_NE_NUM_MASK 0x0000000f
168 #define UDC_CSR_NE_DIR_MASK 0x00000010
169 #define UDC_CSR_NE_TYPE_MASK 0x00000060
170 #define UDC_CSR_NE_CFG_MASK 0x00000780
171 #define UDC_CSR_NE_INTF_MASK 0x00007800
172 #define UDC_CSR_NE_ALT_MASK 0x00078000
173 #define UDC_CSR_NE_MAX_PKT_MASK 0x3ff80000
174
175 #define PCH_UDC_CSR(ep) (UDC_CSR_ADDR + ep*4)
176 #define PCH_UDC_EPINT(in, num)\
177 (1 << (num + (in ? UDC_EPINT_IN_SHIFT : UDC_EPINT_OUT_SHIFT)))
178
179 /* Index of endpoint */
180 #define UDC_EP0IN_IDX 0
181 #define UDC_EP0OUT_IDX 1
182 #define UDC_EPIN_IDX(ep) (ep * 2)
183 #define UDC_EPOUT_IDX(ep) (ep * 2 + 1)
184 #define PCH_UDC_EP0 0
185 #define PCH_UDC_EP1 1
186 #define PCH_UDC_EP2 2
187 #define PCH_UDC_EP3 3
188
189 /* Number of endpoint */
190 #define PCH_UDC_EP_NUM 32 /* Total number of EPs (16 IN,16 OUT) */
191 #define PCH_UDC_USED_EP_NUM 4 /* EP number of EP's really used */
192 /* Length Value */
193 #define PCH_UDC_BRLEN 0x0F /* Burst length */
194 #define PCH_UDC_THLEN 0x1F /* Threshold length */
195 /* Value of EP Buffer Size */
196 #define UDC_EP0IN_BUFF_SIZE 16
197 #define UDC_EPIN_BUFF_SIZE 256
198 #define UDC_EP0OUT_BUFF_SIZE 16
199 #define UDC_EPOUT_BUFF_SIZE 256
200 /* Value of EP maximum packet size */
201 #define UDC_EP0IN_MAX_PKT_SIZE 64
202 #define UDC_EP0OUT_MAX_PKT_SIZE 64
203 #define UDC_BULK_MAX_PKT_SIZE 512
204
205 /* DMA */
206 #define DMA_DIR_RX 1 /* DMA for data receive */
207 #define DMA_DIR_TX 2 /* DMA for data transmit */
208 #define DMA_ADDR_INVALID (~(dma_addr_t)0)
209 #define UDC_DMA_MAXPACKET 65536 /* maximum packet size for DMA */
210
211 /**
212 * struct pch_udc_data_dma_desc - Structure to hold DMA descriptor information
213 * for data
214 * @status: Status quadlet
215 * @reserved: Reserved
216 * @dataptr: Buffer descriptor
217 * @next: Next descriptor
218 */
219 struct pch_udc_data_dma_desc {
220 u32 status;
221 u32 reserved;
222 u32 dataptr;
223 u32 next;
224 };
225
226 /**
227 * struct pch_udc_stp_dma_desc - Structure to hold DMA descriptor information
228 * for control data
229 * @status: Status
230 * @reserved: Reserved
231 * @request: Control Request
232 */
233 struct pch_udc_stp_dma_desc {
234 u32 status;
235 u32 reserved;
236 struct usb_ctrlrequest request;
237 } __attribute((packed));
238
239 /* DMA status definitions */
240 /* Buffer status */
241 #define PCH_UDC_BUFF_STS 0xC0000000
242 #define PCH_UDC_BS_HST_RDY 0x00000000
243 #define PCH_UDC_BS_DMA_BSY 0x40000000
244 #define PCH_UDC_BS_DMA_DONE 0x80000000
245 #define PCH_UDC_BS_HST_BSY 0xC0000000
246 /* Rx/Tx Status */
247 #define PCH_UDC_RXTX_STS 0x30000000
248 #define PCH_UDC_RTS_SUCC 0x00000000
249 #define PCH_UDC_RTS_DESERR 0x10000000
250 #define PCH_UDC_RTS_BUFERR 0x30000000
251 /* Last Descriptor Indication */
252 #define PCH_UDC_DMA_LAST 0x08000000
253 /* Number of Rx/Tx Bytes Mask */
254 #define PCH_UDC_RXTX_BYTES 0x0000ffff
255
256 /**
257 * struct pch_udc_cfg_data - Structure to hold current configuration
258 * and interface information
259 * @cur_cfg: current configuration in use
260 * @cur_intf: current interface in use
261 * @cur_alt: current alt interface in use
262 */
263 struct pch_udc_cfg_data {
264 u16 cur_cfg;
265 u16 cur_intf;
266 u16 cur_alt;
267 };
268
269 /**
270 * struct pch_udc_ep - Structure holding a PCH USB device Endpoint information
271 * @ep: embedded ep request
272 * @td_stp_phys: for setup request
273 * @td_data_phys: for data request
274 * @td_stp: for setup request
275 * @td_data: for data request
276 * @dev: reference to device struct
277 * @offset_addr: offset address of ep register
278 * @desc: for this ep
279 * @queue: queue for requests
280 * @num: endpoint number
281 * @in: endpoint is IN
282 * @halted: endpoint halted?
283 * @epsts: Endpoint status
284 */
285 struct pch_udc_ep {
286 struct usb_ep ep;
287 dma_addr_t td_stp_phys;
288 dma_addr_t td_data_phys;
289 struct pch_udc_stp_dma_desc *td_stp;
290 struct pch_udc_data_dma_desc *td_data;
291 struct pch_udc_dev *dev;
292 unsigned long offset_addr;
293 struct list_head queue;
294 unsigned num:5,
295 in:1,
296 halted:1;
297 unsigned long epsts;
298 };
299
300 /**
301 * struct pch_vbus_gpio_data - Structure holding GPIO informaton
302 * for detecting VBUS
303 * @port: gpio descriptor for the VBUS GPIO
304 * @intr: gpio interrupt number
305 * @irq_work_fall: Structure for WorkQueue
306 * @irq_work_rise: Structure for WorkQueue
307 */
308 struct pch_vbus_gpio_data {
309 struct gpio_desc *port;
310 int intr;
311 struct work_struct irq_work_fall;
312 struct work_struct irq_work_rise;
313 };
314
315 /**
316 * struct pch_udc_dev - Structure holding complete information
317 * of the PCH USB device
318 * @gadget: gadget driver data
319 * @driver: reference to gadget driver bound
320 * @pdev: reference to the PCI device
321 * @ep: array of endpoints
322 * @lock: protects all state
323 * @stall: stall requested
324 * @prot_stall: protcol stall requested
325 * @registered: driver registered with system
326 * @suspended: driver in suspended state
327 * @connected: gadget driver associated
328 * @vbus_session: required vbus_session state
329 * @set_cfg_not_acked: pending acknowledgement 4 setup
330 * @waiting_zlp_ack: pending acknowledgement 4 ZLP
331 * @data_requests: DMA pool for data requests
332 * @stp_requests: DMA pool for setup requests
333 * @dma_addr: DMA pool for received
334 * @setup_data: Received setup data
335 * @base_addr: for mapped device memory
336 * @cfg_data: current cfg, intf, and alt in use
337 * @vbus_gpio: GPIO informaton for detecting VBUS
338 */
339 struct pch_udc_dev {
340 struct usb_gadget gadget;
341 struct usb_gadget_driver *driver;
342 struct pci_dev *pdev;
343 struct pch_udc_ep ep[PCH_UDC_EP_NUM];
344 spinlock_t lock; /* protects all state */
345 unsigned
346 stall:1,
347 prot_stall:1,
348 suspended:1,
349 connected:1,
350 vbus_session:1,
351 set_cfg_not_acked:1,
352 waiting_zlp_ack:1;
353 struct dma_pool *data_requests;
354 struct dma_pool *stp_requests;
355 dma_addr_t dma_addr;
356 struct usb_ctrlrequest setup_data;
357 void __iomem *base_addr;
358 struct pch_udc_cfg_data cfg_data;
359 struct pch_vbus_gpio_data vbus_gpio;
360 };
361 #define to_pch_udc(g) (container_of((g), struct pch_udc_dev, gadget))
362
363 #define PCH_UDC_PCI_BAR_QUARK_X1000 0
364 #define PCH_UDC_PCI_BAR 1
365
366 #define PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC 0x0939
367 #define PCI_DEVICE_ID_INTEL_EG20T_UDC 0x8808
368
369 #define PCI_DEVICE_ID_ML7213_IOH_UDC 0x801D
370 #define PCI_DEVICE_ID_ML7831_IOH_UDC 0x8808
371
372 static const char ep0_string[] = "ep0in";
373 static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
374 static bool speed_fs;
375 module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
376 MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
377
378 /**
379 * struct pch_udc_request - Structure holding a PCH USB device request packet
380 * @req: embedded ep request
381 * @td_data_phys: phys. address
382 * @td_data: first dma desc. of chain
383 * @td_data_last: last dma desc. of chain
384 * @queue: associated queue
385 * @dma_going: DMA in progress for request
386 * @dma_mapped: DMA memory mapped for request
387 * @dma_done: DMA completed for request
388 * @chain_len: chain length
389 * @buf: Buffer memory for align adjustment
390 * @dma: DMA memory for align adjustment
391 */
392 struct pch_udc_request {
393 struct usb_request req;
394 dma_addr_t td_data_phys;
395 struct pch_udc_data_dma_desc *td_data;
396 struct pch_udc_data_dma_desc *td_data_last;
397 struct list_head queue;
398 unsigned dma_going:1,
399 dma_mapped:1,
400 dma_done:1;
401 unsigned chain_len;
402 void *buf;
403 dma_addr_t dma;
404 };
405
pch_udc_readl(struct pch_udc_dev * dev,unsigned long reg)406 static inline u32 pch_udc_readl(struct pch_udc_dev *dev, unsigned long reg)
407 {
408 return ioread32(dev->base_addr + reg);
409 }
410
pch_udc_writel(struct pch_udc_dev * dev,unsigned long val,unsigned long reg)411 static inline void pch_udc_writel(struct pch_udc_dev *dev,
412 unsigned long val, unsigned long reg)
413 {
414 iowrite32(val, dev->base_addr + reg);
415 }
416
pch_udc_bit_set(struct pch_udc_dev * dev,unsigned long reg,unsigned long bitmask)417 static inline void pch_udc_bit_set(struct pch_udc_dev *dev,
418 unsigned long reg,
419 unsigned long bitmask)
420 {
421 pch_udc_writel(dev, pch_udc_readl(dev, reg) | bitmask, reg);
422 }
423
pch_udc_bit_clr(struct pch_udc_dev * dev,unsigned long reg,unsigned long bitmask)424 static inline void pch_udc_bit_clr(struct pch_udc_dev *dev,
425 unsigned long reg,
426 unsigned long bitmask)
427 {
428 pch_udc_writel(dev, pch_udc_readl(dev, reg) & ~(bitmask), reg);
429 }
430
pch_udc_ep_readl(struct pch_udc_ep * ep,unsigned long reg)431 static inline u32 pch_udc_ep_readl(struct pch_udc_ep *ep, unsigned long reg)
432 {
433 return ioread32(ep->dev->base_addr + ep->offset_addr + reg);
434 }
435
pch_udc_ep_writel(struct pch_udc_ep * ep,unsigned long val,unsigned long reg)436 static inline void pch_udc_ep_writel(struct pch_udc_ep *ep,
437 unsigned long val, unsigned long reg)
438 {
439 iowrite32(val, ep->dev->base_addr + ep->offset_addr + reg);
440 }
441
pch_udc_ep_bit_set(struct pch_udc_ep * ep,unsigned long reg,unsigned long bitmask)442 static inline void pch_udc_ep_bit_set(struct pch_udc_ep *ep,
443 unsigned long reg,
444 unsigned long bitmask)
445 {
446 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) | bitmask, reg);
447 }
448
pch_udc_ep_bit_clr(struct pch_udc_ep * ep,unsigned long reg,unsigned long bitmask)449 static inline void pch_udc_ep_bit_clr(struct pch_udc_ep *ep,
450 unsigned long reg,
451 unsigned long bitmask)
452 {
453 pch_udc_ep_writel(ep, pch_udc_ep_readl(ep, reg) & ~(bitmask), reg);
454 }
455
456 /**
457 * pch_udc_csr_busy() - Wait till idle.
458 * @dev: Reference to pch_udc_dev structure
459 */
pch_udc_csr_busy(struct pch_udc_dev * dev)460 static void pch_udc_csr_busy(struct pch_udc_dev *dev)
461 {
462 unsigned int count = 200;
463
464 /* Wait till idle */
465 while ((pch_udc_readl(dev, UDC_CSR_BUSY_ADDR) & UDC_CSR_BUSY)
466 && --count)
467 cpu_relax();
468 if (!count)
469 dev_err(&dev->pdev->dev, "%s: wait error\n", __func__);
470 }
471
472 /**
473 * pch_udc_write_csr() - Write the command and status registers.
474 * @dev: Reference to pch_udc_dev structure
475 * @val: value to be written to CSR register
476 * @ep: end-point number
477 */
pch_udc_write_csr(struct pch_udc_dev * dev,unsigned long val,unsigned int ep)478 static void pch_udc_write_csr(struct pch_udc_dev *dev, unsigned long val,
479 unsigned int ep)
480 {
481 unsigned long reg = PCH_UDC_CSR(ep);
482
483 pch_udc_csr_busy(dev); /* Wait till idle */
484 pch_udc_writel(dev, val, reg);
485 pch_udc_csr_busy(dev); /* Wait till idle */
486 }
487
488 /**
489 * pch_udc_read_csr() - Read the command and status registers.
490 * @dev: Reference to pch_udc_dev structure
491 * @ep: end-point number
492 *
493 * Return codes: content of CSR register
494 */
pch_udc_read_csr(struct pch_udc_dev * dev,unsigned int ep)495 static u32 pch_udc_read_csr(struct pch_udc_dev *dev, unsigned int ep)
496 {
497 unsigned long reg = PCH_UDC_CSR(ep);
498
499 pch_udc_csr_busy(dev); /* Wait till idle */
500 pch_udc_readl(dev, reg); /* Dummy read */
501 pch_udc_csr_busy(dev); /* Wait till idle */
502 return pch_udc_readl(dev, reg);
503 }
504
505 /**
506 * pch_udc_rmt_wakeup() - Initiate for remote wakeup
507 * @dev: Reference to pch_udc_dev structure
508 */
pch_udc_rmt_wakeup(struct pch_udc_dev * dev)509 static inline void pch_udc_rmt_wakeup(struct pch_udc_dev *dev)
510 {
511 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
512 mdelay(1);
513 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
514 }
515
516 /**
517 * pch_udc_get_frame() - Get the current frame from device status register
518 * @dev: Reference to pch_udc_dev structure
519 * Retern current frame
520 */
pch_udc_get_frame(struct pch_udc_dev * dev)521 static inline int pch_udc_get_frame(struct pch_udc_dev *dev)
522 {
523 u32 frame = pch_udc_readl(dev, UDC_DEVSTS_ADDR);
524 return (frame & UDC_DEVSTS_TS_MASK) >> UDC_DEVSTS_TS_SHIFT;
525 }
526
527 /**
528 * pch_udc_clear_selfpowered() - Clear the self power control
529 * @dev: Reference to pch_udc_regs structure
530 */
pch_udc_clear_selfpowered(struct pch_udc_dev * dev)531 static inline void pch_udc_clear_selfpowered(struct pch_udc_dev *dev)
532 {
533 pch_udc_bit_clr(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
534 }
535
536 /**
537 * pch_udc_set_selfpowered() - Set the self power control
538 * @dev: Reference to pch_udc_regs structure
539 */
pch_udc_set_selfpowered(struct pch_udc_dev * dev)540 static inline void pch_udc_set_selfpowered(struct pch_udc_dev *dev)
541 {
542 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_SP);
543 }
544
545 /**
546 * pch_udc_set_disconnect() - Set the disconnect status.
547 * @dev: Reference to pch_udc_regs structure
548 */
pch_udc_set_disconnect(struct pch_udc_dev * dev)549 static inline void pch_udc_set_disconnect(struct pch_udc_dev *dev)
550 {
551 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
552 }
553
554 /**
555 * pch_udc_clear_disconnect() - Clear the disconnect status.
556 * @dev: Reference to pch_udc_regs structure
557 */
pch_udc_clear_disconnect(struct pch_udc_dev * dev)558 static void pch_udc_clear_disconnect(struct pch_udc_dev *dev)
559 {
560 /* Clear the disconnect */
561 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
562 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
563 mdelay(1);
564 /* Resume USB signalling */
565 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
566 }
567
568 /**
569 * pch_udc_reconnect() - This API initializes usb device controller,
570 * and clear the disconnect status.
571 * @dev: Reference to pch_udc_regs structure
572 */
573 static void pch_udc_init(struct pch_udc_dev *dev);
pch_udc_reconnect(struct pch_udc_dev * dev)574 static void pch_udc_reconnect(struct pch_udc_dev *dev)
575 {
576 pch_udc_init(dev);
577
578 /* enable device interrupts */
579 /* pch_udc_enable_interrupts() */
580 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR,
581 UDC_DEVINT_UR | UDC_DEVINT_ENUM);
582
583 /* Clear the disconnect */
584 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
585 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_SD);
586 mdelay(1);
587 /* Resume USB signalling */
588 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RES);
589 }
590
591 /**
592 * pch_udc_vbus_session() - set or clearr the disconnect status.
593 * @dev: Reference to pch_udc_regs structure
594 * @is_active: Parameter specifying the action
595 * 0: indicating VBUS power is ending
596 * !0: indicating VBUS power is starting
597 */
pch_udc_vbus_session(struct pch_udc_dev * dev,int is_active)598 static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
599 int is_active)
600 {
601 unsigned long iflags;
602
603 spin_lock_irqsave(&dev->lock, iflags);
604 if (is_active) {
605 pch_udc_reconnect(dev);
606 dev->vbus_session = 1;
607 } else {
608 if (dev->driver && dev->driver->disconnect) {
609 spin_unlock_irqrestore(&dev->lock, iflags);
610 dev->driver->disconnect(&dev->gadget);
611 spin_lock_irqsave(&dev->lock, iflags);
612 }
613 pch_udc_set_disconnect(dev);
614 dev->vbus_session = 0;
615 }
616 spin_unlock_irqrestore(&dev->lock, iflags);
617 }
618
619 /**
620 * pch_udc_ep_set_stall() - Set the stall of endpoint
621 * @ep: Reference to structure of type pch_udc_ep_regs
622 */
pch_udc_ep_set_stall(struct pch_udc_ep * ep)623 static void pch_udc_ep_set_stall(struct pch_udc_ep *ep)
624 {
625 if (ep->in) {
626 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
627 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
628 } else {
629 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
630 }
631 }
632
633 /**
634 * pch_udc_ep_clear_stall() - Clear the stall of endpoint
635 * @ep: Reference to structure of type pch_udc_ep_regs
636 */
pch_udc_ep_clear_stall(struct pch_udc_ep * ep)637 static inline void pch_udc_ep_clear_stall(struct pch_udc_ep *ep)
638 {
639 /* Clear the stall */
640 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_S);
641 /* Clear NAK by writing CNAK */
642 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
643 }
644
645 /**
646 * pch_udc_ep_set_trfr_type() - Set the transfer type of endpoint
647 * @ep: Reference to structure of type pch_udc_ep_regs
648 * @type: Type of endpoint
649 */
pch_udc_ep_set_trfr_type(struct pch_udc_ep * ep,u8 type)650 static inline void pch_udc_ep_set_trfr_type(struct pch_udc_ep *ep,
651 u8 type)
652 {
653 pch_udc_ep_writel(ep, ((type << UDC_EPCTL_ET_SHIFT) &
654 UDC_EPCTL_ET_MASK), UDC_EPCTL_ADDR);
655 }
656
657 /**
658 * pch_udc_ep_set_bufsz() - Set the maximum packet size for the endpoint
659 * @ep: Reference to structure of type pch_udc_ep_regs
660 * @buf_size: The buffer word size
661 * @ep_in: EP is IN
662 */
pch_udc_ep_set_bufsz(struct pch_udc_ep * ep,u32 buf_size,u32 ep_in)663 static void pch_udc_ep_set_bufsz(struct pch_udc_ep *ep,
664 u32 buf_size, u32 ep_in)
665 {
666 u32 data;
667 if (ep_in) {
668 data = pch_udc_ep_readl(ep, UDC_BUFIN_FRAMENUM_ADDR);
669 data = (data & 0xffff0000) | (buf_size & 0xffff);
670 pch_udc_ep_writel(ep, data, UDC_BUFIN_FRAMENUM_ADDR);
671 } else {
672 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
673 data = (buf_size << 16) | (data & 0xffff);
674 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
675 }
676 }
677
678 /**
679 * pch_udc_ep_set_maxpkt() - Set the Max packet size for the endpoint
680 * @ep: Reference to structure of type pch_udc_ep_regs
681 * @pkt_size: The packet byte size
682 */
pch_udc_ep_set_maxpkt(struct pch_udc_ep * ep,u32 pkt_size)683 static void pch_udc_ep_set_maxpkt(struct pch_udc_ep *ep, u32 pkt_size)
684 {
685 u32 data = pch_udc_ep_readl(ep, UDC_BUFOUT_MAXPKT_ADDR);
686 data = (data & 0xffff0000) | (pkt_size & 0xffff);
687 pch_udc_ep_writel(ep, data, UDC_BUFOUT_MAXPKT_ADDR);
688 }
689
690 /**
691 * pch_udc_ep_set_subptr() - Set the Setup buffer pointer for the endpoint
692 * @ep: Reference to structure of type pch_udc_ep_regs
693 * @addr: Address of the register
694 */
pch_udc_ep_set_subptr(struct pch_udc_ep * ep,u32 addr)695 static inline void pch_udc_ep_set_subptr(struct pch_udc_ep *ep, u32 addr)
696 {
697 pch_udc_ep_writel(ep, addr, UDC_SUBPTR_ADDR);
698 }
699
700 /**
701 * pch_udc_ep_set_ddptr() - Set the Data descriptor pointer for the endpoint
702 * @ep: Reference to structure of type pch_udc_ep_regs
703 * @addr: Address of the register
704 */
pch_udc_ep_set_ddptr(struct pch_udc_ep * ep,u32 addr)705 static inline void pch_udc_ep_set_ddptr(struct pch_udc_ep *ep, u32 addr)
706 {
707 pch_udc_ep_writel(ep, addr, UDC_DESPTR_ADDR);
708 }
709
710 /**
711 * pch_udc_ep_set_pd() - Set the poll demand bit for the endpoint
712 * @ep: Reference to structure of type pch_udc_ep_regs
713 */
pch_udc_ep_set_pd(struct pch_udc_ep * ep)714 static inline void pch_udc_ep_set_pd(struct pch_udc_ep *ep)
715 {
716 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_P);
717 }
718
719 /**
720 * pch_udc_ep_set_rrdy() - Set the receive ready bit for the endpoint
721 * @ep: Reference to structure of type pch_udc_ep_regs
722 */
pch_udc_ep_set_rrdy(struct pch_udc_ep * ep)723 static inline void pch_udc_ep_set_rrdy(struct pch_udc_ep *ep)
724 {
725 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
726 }
727
728 /**
729 * pch_udc_ep_clear_rrdy() - Clear the receive ready bit for the endpoint
730 * @ep: Reference to structure of type pch_udc_ep_regs
731 */
pch_udc_ep_clear_rrdy(struct pch_udc_ep * ep)732 static inline void pch_udc_ep_clear_rrdy(struct pch_udc_ep *ep)
733 {
734 pch_udc_ep_bit_clr(ep, UDC_EPCTL_ADDR, UDC_EPCTL_RRDY);
735 }
736
737 /**
738 * pch_udc_set_dma() - Set the 'TDE' or RDE bit of device control
739 * register depending on the direction specified
740 * @dev: Reference to structure of type pch_udc_regs
741 * @dir: whether Tx or Rx
742 * DMA_DIR_RX: Receive
743 * DMA_DIR_TX: Transmit
744 */
pch_udc_set_dma(struct pch_udc_dev * dev,int dir)745 static inline void pch_udc_set_dma(struct pch_udc_dev *dev, int dir)
746 {
747 if (dir == DMA_DIR_RX)
748 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
749 else if (dir == DMA_DIR_TX)
750 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
751 }
752
753 /**
754 * pch_udc_clear_dma() - Clear the 'TDE' or RDE bit of device control
755 * register depending on the direction specified
756 * @dev: Reference to structure of type pch_udc_regs
757 * @dir: Whether Tx or Rx
758 * DMA_DIR_RX: Receive
759 * DMA_DIR_TX: Transmit
760 */
pch_udc_clear_dma(struct pch_udc_dev * dev,int dir)761 static inline void pch_udc_clear_dma(struct pch_udc_dev *dev, int dir)
762 {
763 if (dir == DMA_DIR_RX)
764 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_RDE);
765 else if (dir == DMA_DIR_TX)
766 pch_udc_bit_clr(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_TDE);
767 }
768
769 /**
770 * pch_udc_set_csr_done() - Set the device control register
771 * CSR done field (bit 13)
772 * @dev: reference to structure of type pch_udc_regs
773 */
pch_udc_set_csr_done(struct pch_udc_dev * dev)774 static inline void pch_udc_set_csr_done(struct pch_udc_dev *dev)
775 {
776 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR, UDC_DEVCTL_CSR_DONE);
777 }
778
779 /**
780 * pch_udc_disable_interrupts() - Disables the specified interrupts
781 * @dev: Reference to structure of type pch_udc_regs
782 * @mask: Mask to disable interrupts
783 */
pch_udc_disable_interrupts(struct pch_udc_dev * dev,u32 mask)784 static inline void pch_udc_disable_interrupts(struct pch_udc_dev *dev,
785 u32 mask)
786 {
787 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, mask);
788 }
789
790 /**
791 * pch_udc_enable_interrupts() - Enable the specified interrupts
792 * @dev: Reference to structure of type pch_udc_regs
793 * @mask: Mask to enable interrupts
794 */
pch_udc_enable_interrupts(struct pch_udc_dev * dev,u32 mask)795 static inline void pch_udc_enable_interrupts(struct pch_udc_dev *dev,
796 u32 mask)
797 {
798 pch_udc_bit_clr(dev, UDC_DEVIRQMSK_ADDR, mask);
799 }
800
801 /**
802 * pch_udc_disable_ep_interrupts() - Disable endpoint interrupts
803 * @dev: Reference to structure of type pch_udc_regs
804 * @mask: Mask to disable interrupts
805 */
pch_udc_disable_ep_interrupts(struct pch_udc_dev * dev,u32 mask)806 static inline void pch_udc_disable_ep_interrupts(struct pch_udc_dev *dev,
807 u32 mask)
808 {
809 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, mask);
810 }
811
812 /**
813 * pch_udc_enable_ep_interrupts() - Enable endpoint interrupts
814 * @dev: Reference to structure of type pch_udc_regs
815 * @mask: Mask to enable interrupts
816 */
pch_udc_enable_ep_interrupts(struct pch_udc_dev * dev,u32 mask)817 static inline void pch_udc_enable_ep_interrupts(struct pch_udc_dev *dev,
818 u32 mask)
819 {
820 pch_udc_bit_clr(dev, UDC_EPIRQMSK_ADDR, mask);
821 }
822
823 /**
824 * pch_udc_read_device_interrupts() - Read the device interrupts
825 * @dev: Reference to structure of type pch_udc_regs
826 * Retern The device interrupts
827 */
pch_udc_read_device_interrupts(struct pch_udc_dev * dev)828 static inline u32 pch_udc_read_device_interrupts(struct pch_udc_dev *dev)
829 {
830 return pch_udc_readl(dev, UDC_DEVIRQSTS_ADDR);
831 }
832
833 /**
834 * pch_udc_write_device_interrupts() - Write device interrupts
835 * @dev: Reference to structure of type pch_udc_regs
836 * @val: The value to be written to interrupt register
837 */
pch_udc_write_device_interrupts(struct pch_udc_dev * dev,u32 val)838 static inline void pch_udc_write_device_interrupts(struct pch_udc_dev *dev,
839 u32 val)
840 {
841 pch_udc_writel(dev, val, UDC_DEVIRQSTS_ADDR);
842 }
843
844 /**
845 * pch_udc_read_ep_interrupts() - Read the endpoint interrupts
846 * @dev: Reference to structure of type pch_udc_regs
847 * Retern The endpoint interrupt
848 */
pch_udc_read_ep_interrupts(struct pch_udc_dev * dev)849 static inline u32 pch_udc_read_ep_interrupts(struct pch_udc_dev *dev)
850 {
851 return pch_udc_readl(dev, UDC_EPIRQSTS_ADDR);
852 }
853
854 /**
855 * pch_udc_write_ep_interrupts() - Clear endpoint interupts
856 * @dev: Reference to structure of type pch_udc_regs
857 * @val: The value to be written to interrupt register
858 */
pch_udc_write_ep_interrupts(struct pch_udc_dev * dev,u32 val)859 static inline void pch_udc_write_ep_interrupts(struct pch_udc_dev *dev,
860 u32 val)
861 {
862 pch_udc_writel(dev, val, UDC_EPIRQSTS_ADDR);
863 }
864
865 /**
866 * pch_udc_read_device_status() - Read the device status
867 * @dev: Reference to structure of type pch_udc_regs
868 * Retern The device status
869 */
pch_udc_read_device_status(struct pch_udc_dev * dev)870 static inline u32 pch_udc_read_device_status(struct pch_udc_dev *dev)
871 {
872 return pch_udc_readl(dev, UDC_DEVSTS_ADDR);
873 }
874
875 /**
876 * pch_udc_read_ep_control() - Read the endpoint control
877 * @ep: Reference to structure of type pch_udc_ep_regs
878 * Retern The endpoint control register value
879 */
pch_udc_read_ep_control(struct pch_udc_ep * ep)880 static inline u32 pch_udc_read_ep_control(struct pch_udc_ep *ep)
881 {
882 return pch_udc_ep_readl(ep, UDC_EPCTL_ADDR);
883 }
884
885 /**
886 * pch_udc_clear_ep_control() - Clear the endpoint control register
887 * @ep: Reference to structure of type pch_udc_ep_regs
888 * Retern The endpoint control register value
889 */
pch_udc_clear_ep_control(struct pch_udc_ep * ep)890 static inline void pch_udc_clear_ep_control(struct pch_udc_ep *ep)
891 {
892 return pch_udc_ep_writel(ep, 0, UDC_EPCTL_ADDR);
893 }
894
895 /**
896 * pch_udc_read_ep_status() - Read the endpoint status
897 * @ep: Reference to structure of type pch_udc_ep_regs
898 * Retern The endpoint status
899 */
pch_udc_read_ep_status(struct pch_udc_ep * ep)900 static inline u32 pch_udc_read_ep_status(struct pch_udc_ep *ep)
901 {
902 return pch_udc_ep_readl(ep, UDC_EPSTS_ADDR);
903 }
904
905 /**
906 * pch_udc_clear_ep_status() - Clear the endpoint status
907 * @ep: Reference to structure of type pch_udc_ep_regs
908 * @stat: Endpoint status
909 */
pch_udc_clear_ep_status(struct pch_udc_ep * ep,u32 stat)910 static inline void pch_udc_clear_ep_status(struct pch_udc_ep *ep,
911 u32 stat)
912 {
913 return pch_udc_ep_writel(ep, stat, UDC_EPSTS_ADDR);
914 }
915
916 /**
917 * pch_udc_ep_set_nak() - Set the bit 7 (SNAK field)
918 * of the endpoint control register
919 * @ep: Reference to structure of type pch_udc_ep_regs
920 */
pch_udc_ep_set_nak(struct pch_udc_ep * ep)921 static inline void pch_udc_ep_set_nak(struct pch_udc_ep *ep)
922 {
923 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_SNAK);
924 }
925
926 /**
927 * pch_udc_ep_clear_nak() - Set the bit 8 (CNAK field)
928 * of the endpoint control register
929 * @ep: reference to structure of type pch_udc_ep_regs
930 */
pch_udc_ep_clear_nak(struct pch_udc_ep * ep)931 static void pch_udc_ep_clear_nak(struct pch_udc_ep *ep)
932 {
933 unsigned int loopcnt = 0;
934 struct pch_udc_dev *dev = ep->dev;
935
936 if (!(pch_udc_ep_readl(ep, UDC_EPCTL_ADDR) & UDC_EPCTL_NAK))
937 return;
938 if (!ep->in) {
939 loopcnt = 10000;
940 while (!(pch_udc_read_ep_status(ep) & UDC_EPSTS_MRXFIFO_EMP) &&
941 --loopcnt)
942 udelay(5);
943 if (!loopcnt)
944 dev_err(&dev->pdev->dev, "%s: RxFIFO not Empty\n",
945 __func__);
946 }
947 loopcnt = 10000;
948 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_NAK) && --loopcnt) {
949 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_CNAK);
950 udelay(5);
951 }
952 if (!loopcnt)
953 dev_err(&dev->pdev->dev, "%s: Clear NAK not set for ep%d%s\n",
954 __func__, ep->num, (ep->in ? "in" : "out"));
955 }
956
957 /**
958 * pch_udc_ep_fifo_flush() - Flush the endpoint fifo
959 * @ep: reference to structure of type pch_udc_ep_regs
960 * @dir: direction of endpoint
961 * 0: endpoint is OUT
962 * !0: endpoint is IN
963 */
pch_udc_ep_fifo_flush(struct pch_udc_ep * ep,int dir)964 static void pch_udc_ep_fifo_flush(struct pch_udc_ep *ep, int dir)
965 {
966 if (dir) { /* IN ep */
967 pch_udc_ep_bit_set(ep, UDC_EPCTL_ADDR, UDC_EPCTL_F);
968 return;
969 }
970 }
971
972 /**
973 * pch_udc_ep_enable() - This api enables endpoint
974 * @ep: reference to structure of type pch_udc_ep_regs
975 * @cfg: current configuration information
976 * @desc: endpoint descriptor
977 */
pch_udc_ep_enable(struct pch_udc_ep * ep,struct pch_udc_cfg_data * cfg,const struct usb_endpoint_descriptor * desc)978 static void pch_udc_ep_enable(struct pch_udc_ep *ep,
979 struct pch_udc_cfg_data *cfg,
980 const struct usb_endpoint_descriptor *desc)
981 {
982 u32 val = 0;
983 u32 buff_size = 0;
984
985 pch_udc_ep_set_trfr_type(ep, desc->bmAttributes);
986 if (ep->in)
987 buff_size = UDC_EPIN_BUFF_SIZE;
988 else
989 buff_size = UDC_EPOUT_BUFF_SIZE;
990 pch_udc_ep_set_bufsz(ep, buff_size, ep->in);
991 pch_udc_ep_set_maxpkt(ep, usb_endpoint_maxp(desc));
992 pch_udc_ep_set_nak(ep);
993 pch_udc_ep_fifo_flush(ep, ep->in);
994 /* Configure the endpoint */
995 val = ep->num << UDC_CSR_NE_NUM_SHIFT | ep->in << UDC_CSR_NE_DIR_SHIFT |
996 ((desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) <<
997 UDC_CSR_NE_TYPE_SHIFT) |
998 (cfg->cur_cfg << UDC_CSR_NE_CFG_SHIFT) |
999 (cfg->cur_intf << UDC_CSR_NE_INTF_SHIFT) |
1000 (cfg->cur_alt << UDC_CSR_NE_ALT_SHIFT) |
1001 usb_endpoint_maxp(desc) << UDC_CSR_NE_MAX_PKT_SHIFT;
1002
1003 if (ep->in)
1004 pch_udc_write_csr(ep->dev, val, UDC_EPIN_IDX(ep->num));
1005 else
1006 pch_udc_write_csr(ep->dev, val, UDC_EPOUT_IDX(ep->num));
1007 }
1008
1009 /**
1010 * pch_udc_ep_disable() - This api disables endpoint
1011 * @ep: reference to structure of type pch_udc_ep_regs
1012 */
pch_udc_ep_disable(struct pch_udc_ep * ep)1013 static void pch_udc_ep_disable(struct pch_udc_ep *ep)
1014 {
1015 if (ep->in) {
1016 /* flush the fifo */
1017 pch_udc_ep_writel(ep, UDC_EPCTL_F, UDC_EPCTL_ADDR);
1018 /* set NAK */
1019 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1020 pch_udc_ep_bit_set(ep, UDC_EPSTS_ADDR, UDC_EPSTS_IN);
1021 } else {
1022 /* set NAK */
1023 pch_udc_ep_writel(ep, UDC_EPCTL_SNAK, UDC_EPCTL_ADDR);
1024 }
1025 /* reset desc pointer */
1026 pch_udc_ep_writel(ep, 0, UDC_DESPTR_ADDR);
1027 }
1028
1029 /**
1030 * pch_udc_wait_ep_stall() - Wait EP stall.
1031 * @ep: reference to structure of type pch_udc_ep_regs
1032 */
pch_udc_wait_ep_stall(struct pch_udc_ep * ep)1033 static void pch_udc_wait_ep_stall(struct pch_udc_ep *ep)
1034 {
1035 unsigned int count = 10000;
1036
1037 /* Wait till idle */
1038 while ((pch_udc_read_ep_control(ep) & UDC_EPCTL_S) && --count)
1039 udelay(5);
1040 if (!count)
1041 dev_err(&ep->dev->pdev->dev, "%s: wait error\n", __func__);
1042 }
1043
1044 /**
1045 * pch_udc_init() - This API initializes usb device controller
1046 * @dev: Rreference to pch_udc_regs structure
1047 */
pch_udc_init(struct pch_udc_dev * dev)1048 static void pch_udc_init(struct pch_udc_dev *dev)
1049 {
1050 if (NULL == dev) {
1051 pr_err("%s: Invalid address\n", __func__);
1052 return;
1053 }
1054 /* Soft Reset and Reset PHY */
1055 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1056 pch_udc_writel(dev, UDC_SRST | UDC_PSRST, UDC_SRST_ADDR);
1057 mdelay(1);
1058 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
1059 pch_udc_writel(dev, 0x00, UDC_SRST_ADDR);
1060 mdelay(1);
1061 /* mask and clear all device interrupts */
1062 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1063 pch_udc_bit_set(dev, UDC_DEVIRQSTS_ADDR, UDC_DEVINT_MSK);
1064
1065 /* mask and clear all ep interrupts */
1066 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1067 pch_udc_bit_set(dev, UDC_EPIRQSTS_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1068
1069 /* enable dynamic CSR programmingi, self powered and device speed */
1070 if (speed_fs)
1071 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1072 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_FS);
1073 else /* defaul high speed */
1074 pch_udc_bit_set(dev, UDC_DEVCFG_ADDR, UDC_DEVCFG_CSR_PRG |
1075 UDC_DEVCFG_SP | UDC_DEVCFG_SPD_HS);
1076 pch_udc_bit_set(dev, UDC_DEVCTL_ADDR,
1077 (PCH_UDC_THLEN << UDC_DEVCTL_THLEN_SHIFT) |
1078 (PCH_UDC_BRLEN << UDC_DEVCTL_BRLEN_SHIFT) |
1079 UDC_DEVCTL_MODE | UDC_DEVCTL_BREN |
1080 UDC_DEVCTL_THE);
1081 }
1082
1083 /**
1084 * pch_udc_exit() - This API exit usb device controller
1085 * @dev: Reference to pch_udc_regs structure
1086 */
pch_udc_exit(struct pch_udc_dev * dev)1087 static void pch_udc_exit(struct pch_udc_dev *dev)
1088 {
1089 /* mask all device interrupts */
1090 pch_udc_bit_set(dev, UDC_DEVIRQMSK_ADDR, UDC_DEVINT_MSK);
1091 /* mask all ep interrupts */
1092 pch_udc_bit_set(dev, UDC_EPIRQMSK_ADDR, UDC_EPINT_MSK_DISABLE_ALL);
1093 /* put device in disconnected state */
1094 pch_udc_set_disconnect(dev);
1095 }
1096
1097 /**
1098 * pch_udc_pcd_get_frame() - This API is invoked to get the current frame number
1099 * @gadget: Reference to the gadget driver
1100 *
1101 * Return codes:
1102 * 0: Success
1103 * -EINVAL: If the gadget passed is NULL
1104 */
pch_udc_pcd_get_frame(struct usb_gadget * gadget)1105 static int pch_udc_pcd_get_frame(struct usb_gadget *gadget)
1106 {
1107 struct pch_udc_dev *dev;
1108
1109 if (!gadget)
1110 return -EINVAL;
1111 dev = container_of(gadget, struct pch_udc_dev, gadget);
1112 return pch_udc_get_frame(dev);
1113 }
1114
1115 /**
1116 * pch_udc_pcd_wakeup() - This API is invoked to initiate a remote wakeup
1117 * @gadget: Reference to the gadget driver
1118 *
1119 * Return codes:
1120 * 0: Success
1121 * -EINVAL: If the gadget passed is NULL
1122 */
pch_udc_pcd_wakeup(struct usb_gadget * gadget)1123 static int pch_udc_pcd_wakeup(struct usb_gadget *gadget)
1124 {
1125 struct pch_udc_dev *dev;
1126 unsigned long flags;
1127
1128 if (!gadget)
1129 return -EINVAL;
1130 dev = container_of(gadget, struct pch_udc_dev, gadget);
1131 spin_lock_irqsave(&dev->lock, flags);
1132 pch_udc_rmt_wakeup(dev);
1133 spin_unlock_irqrestore(&dev->lock, flags);
1134 return 0;
1135 }
1136
1137 /**
1138 * pch_udc_pcd_selfpowered() - This API is invoked to specify whether the device
1139 * is self powered or not
1140 * @gadget: Reference to the gadget driver
1141 * @value: Specifies self powered or not
1142 *
1143 * Return codes:
1144 * 0: Success
1145 * -EINVAL: If the gadget passed is NULL
1146 */
pch_udc_pcd_selfpowered(struct usb_gadget * gadget,int value)1147 static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
1148 {
1149 struct pch_udc_dev *dev;
1150
1151 if (!gadget)
1152 return -EINVAL;
1153 gadget->is_selfpowered = (value != 0);
1154 dev = container_of(gadget, struct pch_udc_dev, gadget);
1155 if (value)
1156 pch_udc_set_selfpowered(dev);
1157 else
1158 pch_udc_clear_selfpowered(dev);
1159 return 0;
1160 }
1161
1162 /**
1163 * pch_udc_pcd_pullup() - This API is invoked to make the device
1164 * visible/invisible to the host
1165 * @gadget: Reference to the gadget driver
1166 * @is_on: Specifies whether the pull up is made active or inactive
1167 *
1168 * Return codes:
1169 * 0: Success
1170 * -EINVAL: If the gadget passed is NULL
1171 */
pch_udc_pcd_pullup(struct usb_gadget * gadget,int is_on)1172 static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
1173 {
1174 struct pch_udc_dev *dev;
1175 unsigned long iflags;
1176
1177 if (!gadget)
1178 return -EINVAL;
1179
1180 dev = container_of(gadget, struct pch_udc_dev, gadget);
1181
1182 spin_lock_irqsave(&dev->lock, iflags);
1183 if (is_on) {
1184 pch_udc_reconnect(dev);
1185 } else {
1186 if (dev->driver && dev->driver->disconnect) {
1187 spin_unlock_irqrestore(&dev->lock, iflags);
1188 dev->driver->disconnect(&dev->gadget);
1189 spin_lock_irqsave(&dev->lock, iflags);
1190 }
1191 pch_udc_set_disconnect(dev);
1192 }
1193 spin_unlock_irqrestore(&dev->lock, iflags);
1194
1195 return 0;
1196 }
1197
1198 /**
1199 * pch_udc_pcd_vbus_session() - This API is used by a driver for an external
1200 * transceiver (or GPIO) that
1201 * detects a VBUS power session starting/ending
1202 * @gadget: Reference to the gadget driver
1203 * @is_active: specifies whether the session is starting or ending
1204 *
1205 * Return codes:
1206 * 0: Success
1207 * -EINVAL: If the gadget passed is NULL
1208 */
pch_udc_pcd_vbus_session(struct usb_gadget * gadget,int is_active)1209 static int pch_udc_pcd_vbus_session(struct usb_gadget *gadget, int is_active)
1210 {
1211 struct pch_udc_dev *dev;
1212
1213 if (!gadget)
1214 return -EINVAL;
1215 dev = container_of(gadget, struct pch_udc_dev, gadget);
1216 pch_udc_vbus_session(dev, is_active);
1217 return 0;
1218 }
1219
1220 /**
1221 * pch_udc_pcd_vbus_draw() - This API is used by gadget drivers during
1222 * SET_CONFIGURATION calls to
1223 * specify how much power the device can consume
1224 * @gadget: Reference to the gadget driver
1225 * @mA: specifies the current limit in 2mA unit
1226 *
1227 * Return codes:
1228 * -EINVAL: If the gadget passed is NULL
1229 * -EOPNOTSUPP:
1230 */
pch_udc_pcd_vbus_draw(struct usb_gadget * gadget,unsigned int mA)1231 static int pch_udc_pcd_vbus_draw(struct usb_gadget *gadget, unsigned int mA)
1232 {
1233 return -EOPNOTSUPP;
1234 }
1235
1236 static int pch_udc_start(struct usb_gadget *g,
1237 struct usb_gadget_driver *driver);
1238 static int pch_udc_stop(struct usb_gadget *g);
1239
1240 static const struct usb_gadget_ops pch_udc_ops = {
1241 .get_frame = pch_udc_pcd_get_frame,
1242 .wakeup = pch_udc_pcd_wakeup,
1243 .set_selfpowered = pch_udc_pcd_selfpowered,
1244 .pullup = pch_udc_pcd_pullup,
1245 .vbus_session = pch_udc_pcd_vbus_session,
1246 .vbus_draw = pch_udc_pcd_vbus_draw,
1247 .udc_start = pch_udc_start,
1248 .udc_stop = pch_udc_stop,
1249 };
1250
1251 /**
1252 * pch_vbus_gpio_get_value() - This API gets value of GPIO port as VBUS status.
1253 * @dev: Reference to the driver structure
1254 *
1255 * Return value:
1256 * 1: VBUS is high
1257 * 0: VBUS is low
1258 * -1: It is not enable to detect VBUS using GPIO
1259 */
pch_vbus_gpio_get_value(struct pch_udc_dev * dev)1260 static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev)
1261 {
1262 int vbus = 0;
1263
1264 if (dev->vbus_gpio.port)
1265 vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0;
1266 else
1267 vbus = -1;
1268
1269 return vbus;
1270 }
1271
1272 /**
1273 * pch_vbus_gpio_work_fall() - This API keeps watch on VBUS becoming Low.
1274 * If VBUS is Low, disconnect is processed
1275 * @irq_work: Structure for WorkQueue
1276 *
1277 */
pch_vbus_gpio_work_fall(struct work_struct * irq_work)1278 static void pch_vbus_gpio_work_fall(struct work_struct *irq_work)
1279 {
1280 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1281 struct pch_vbus_gpio_data, irq_work_fall);
1282 struct pch_udc_dev *dev =
1283 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1284 int vbus_saved = -1;
1285 int vbus;
1286 int count;
1287
1288 if (!dev->vbus_gpio.port)
1289 return;
1290
1291 for (count = 0; count < (PCH_VBUS_PERIOD / PCH_VBUS_INTERVAL);
1292 count++) {
1293 vbus = pch_vbus_gpio_get_value(dev);
1294
1295 if ((vbus_saved == vbus) && (vbus == 0)) {
1296 dev_dbg(&dev->pdev->dev, "VBUS fell");
1297 if (dev->driver
1298 && dev->driver->disconnect) {
1299 dev->driver->disconnect(
1300 &dev->gadget);
1301 }
1302 if (dev->vbus_gpio.intr)
1303 pch_udc_init(dev);
1304 else
1305 pch_udc_reconnect(dev);
1306 return;
1307 }
1308 vbus_saved = vbus;
1309 mdelay(PCH_VBUS_INTERVAL);
1310 }
1311 }
1312
1313 /**
1314 * pch_vbus_gpio_work_rise() - This API checks VBUS is High.
1315 * If VBUS is High, connect is processed
1316 * @irq_work: Structure for WorkQueue
1317 *
1318 */
pch_vbus_gpio_work_rise(struct work_struct * irq_work)1319 static void pch_vbus_gpio_work_rise(struct work_struct *irq_work)
1320 {
1321 struct pch_vbus_gpio_data *vbus_gpio = container_of(irq_work,
1322 struct pch_vbus_gpio_data, irq_work_rise);
1323 struct pch_udc_dev *dev =
1324 container_of(vbus_gpio, struct pch_udc_dev, vbus_gpio);
1325 int vbus;
1326
1327 if (!dev->vbus_gpio.port)
1328 return;
1329
1330 mdelay(PCH_VBUS_INTERVAL);
1331 vbus = pch_vbus_gpio_get_value(dev);
1332
1333 if (vbus == 1) {
1334 dev_dbg(&dev->pdev->dev, "VBUS rose");
1335 pch_udc_reconnect(dev);
1336 return;
1337 }
1338 }
1339
1340 /**
1341 * pch_vbus_gpio_irq() - IRQ handler for GPIO interrupt for changing VBUS
1342 * @irq: Interrupt request number
1343 * @data: Reference to the device structure
1344 *
1345 * Return codes:
1346 * 0: Success
1347 * -EINVAL: GPIO port is invalid or can't be initialized.
1348 */
pch_vbus_gpio_irq(int irq,void * data)1349 static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
1350 {
1351 struct pch_udc_dev *dev = (struct pch_udc_dev *)data;
1352
1353 if (!dev->vbus_gpio.port || !dev->vbus_gpio.intr)
1354 return IRQ_NONE;
1355
1356 if (pch_vbus_gpio_get_value(dev))
1357 schedule_work(&dev->vbus_gpio.irq_work_rise);
1358 else
1359 schedule_work(&dev->vbus_gpio.irq_work_fall);
1360
1361 return IRQ_HANDLED;
1362 }
1363
1364 static struct gpiod_lookup_table minnowboard_udc_gpios = {
1365 .dev_id = "0000:02:02.4",
1366 .table = {
1367 GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
1368 {}
1369 },
1370 };
1371
1372 static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
1373 {
1374 .ident = "MinnowBoard",
1375 .matches = {
1376 DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
1377 },
1378 .driver_data = &minnowboard_udc_gpios,
1379 },
1380 { }
1381 };
1382
pch_vbus_gpio_remove_table(void * table)1383 static void pch_vbus_gpio_remove_table(void *table)
1384 {
1385 gpiod_remove_lookup_table(table);
1386 }
1387
pch_vbus_gpio_add_table(struct pch_udc_dev * dev)1388 static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
1389 {
1390 struct device *d = &dev->pdev->dev;
1391 const struct dmi_system_id *dmi;
1392
1393 dmi = dmi_first_match(pch_udc_gpio_dmi_table);
1394 if (!dmi)
1395 return 0;
1396
1397 gpiod_add_lookup_table(dmi->driver_data);
1398 return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
1399 }
1400
1401 /**
1402 * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
1403 * @dev: Reference to the driver structure
1404 *
1405 * Return codes:
1406 * 0: Success
1407 * -EINVAL: GPIO port is invalid or can't be initialized.
1408 */
pch_vbus_gpio_init(struct pch_udc_dev * dev)1409 static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
1410 {
1411 struct device *d = &dev->pdev->dev;
1412 int err;
1413 int irq_num = 0;
1414 struct gpio_desc *gpiod;
1415
1416 dev->vbus_gpio.port = NULL;
1417 dev->vbus_gpio.intr = 0;
1418
1419 err = pch_vbus_gpio_add_table(dev);
1420 if (err)
1421 return err;
1422
1423 /* Retrieve the GPIO line from the USB gadget device */
1424 gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
1425 if (IS_ERR(gpiod))
1426 return PTR_ERR(gpiod);
1427 gpiod_set_consumer_name(gpiod, "pch_vbus");
1428
1429 dev->vbus_gpio.port = gpiod;
1430 INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall);
1431
1432 irq_num = gpiod_to_irq(gpiod);
1433 if (irq_num > 0) {
1434 irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH);
1435 err = request_irq(irq_num, pch_vbus_gpio_irq, 0,
1436 "vbus_detect", dev);
1437 if (!err) {
1438 dev->vbus_gpio.intr = irq_num;
1439 INIT_WORK(&dev->vbus_gpio.irq_work_rise,
1440 pch_vbus_gpio_work_rise);
1441 } else {
1442 pr_err("%s: can't request irq %d, err: %d\n",
1443 __func__, irq_num, err);
1444 }
1445 }
1446
1447 return 0;
1448 }
1449
1450 /**
1451 * pch_vbus_gpio_free() - This API frees resources of GPIO port
1452 * @dev: Reference to the driver structure
1453 */
pch_vbus_gpio_free(struct pch_udc_dev * dev)1454 static void pch_vbus_gpio_free(struct pch_udc_dev *dev)
1455 {
1456 if (dev->vbus_gpio.intr)
1457 free_irq(dev->vbus_gpio.intr, dev);
1458 }
1459
1460 /**
1461 * complete_req() - This API is invoked from the driver when processing
1462 * of a request is complete
1463 * @ep: Reference to the endpoint structure
1464 * @req: Reference to the request structure
1465 * @status: Indicates the success/failure of completion
1466 */
complete_req(struct pch_udc_ep * ep,struct pch_udc_request * req,int status)1467 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
1468 int status)
1469 __releases(&dev->lock)
1470 __acquires(&dev->lock)
1471 {
1472 struct pch_udc_dev *dev;
1473 unsigned halted = ep->halted;
1474
1475 list_del_init(&req->queue);
1476
1477 /* set new status if pending */
1478 if (req->req.status == -EINPROGRESS)
1479 req->req.status = status;
1480 else
1481 status = req->req.status;
1482
1483 dev = ep->dev;
1484 if (req->dma_mapped) {
1485 if (req->dma == DMA_ADDR_INVALID) {
1486 if (ep->in)
1487 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1488 req->req.length,
1489 DMA_TO_DEVICE);
1490 else
1491 dma_unmap_single(&dev->pdev->dev, req->req.dma,
1492 req->req.length,
1493 DMA_FROM_DEVICE);
1494 req->req.dma = DMA_ADDR_INVALID;
1495 } else {
1496 if (ep->in)
1497 dma_unmap_single(&dev->pdev->dev, req->dma,
1498 req->req.length,
1499 DMA_TO_DEVICE);
1500 else {
1501 dma_unmap_single(&dev->pdev->dev, req->dma,
1502 req->req.length,
1503 DMA_FROM_DEVICE);
1504 memcpy(req->req.buf, req->buf, req->req.length);
1505 }
1506 kfree(req->buf);
1507 req->dma = DMA_ADDR_INVALID;
1508 }
1509 req->dma_mapped = 0;
1510 }
1511 ep->halted = 1;
1512 spin_unlock(&dev->lock);
1513 if (!ep->in)
1514 pch_udc_ep_clear_rrdy(ep);
1515 usb_gadget_giveback_request(&ep->ep, &req->req);
1516 spin_lock(&dev->lock);
1517 ep->halted = halted;
1518 }
1519
1520 /**
1521 * empty_req_queue() - This API empties the request queue of an endpoint
1522 * @ep: Reference to the endpoint structure
1523 */
empty_req_queue(struct pch_udc_ep * ep)1524 static void empty_req_queue(struct pch_udc_ep *ep)
1525 {
1526 struct pch_udc_request *req;
1527
1528 ep->halted = 1;
1529 while (!list_empty(&ep->queue)) {
1530 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
1531 complete_req(ep, req, -ESHUTDOWN); /* Remove from list */
1532 }
1533 }
1534
1535 /**
1536 * pch_udc_free_dma_chain() - This function frees the DMA chain created
1537 * for the request
1538 * @dev: Reference to the driver structure
1539 * @req: Reference to the request to be freed
1540 *
1541 * Return codes:
1542 * 0: Success
1543 */
pch_udc_free_dma_chain(struct pch_udc_dev * dev,struct pch_udc_request * req)1544 static void pch_udc_free_dma_chain(struct pch_udc_dev *dev,
1545 struct pch_udc_request *req)
1546 {
1547 struct pch_udc_data_dma_desc *td = req->td_data;
1548 unsigned i = req->chain_len;
1549
1550 dma_addr_t addr2;
1551 dma_addr_t addr = (dma_addr_t)td->next;
1552 td->next = 0x00;
1553 for (; i > 1; --i) {
1554 /* do not free first desc., will be done by free for request */
1555 td = phys_to_virt(addr);
1556 addr2 = (dma_addr_t)td->next;
1557 dma_pool_free(dev->data_requests, td, addr);
1558 addr = addr2;
1559 }
1560 req->chain_len = 1;
1561 }
1562
1563 /**
1564 * pch_udc_create_dma_chain() - This function creates or reinitializes
1565 * a DMA chain
1566 * @ep: Reference to the endpoint structure
1567 * @req: Reference to the request
1568 * @buf_len: The buffer length
1569 * @gfp_flags: Flags to be used while mapping the data buffer
1570 *
1571 * Return codes:
1572 * 0: success,
1573 * -ENOMEM: dma_pool_alloc invocation fails
1574 */
pch_udc_create_dma_chain(struct pch_udc_ep * ep,struct pch_udc_request * req,unsigned long buf_len,gfp_t gfp_flags)1575 static int pch_udc_create_dma_chain(struct pch_udc_ep *ep,
1576 struct pch_udc_request *req,
1577 unsigned long buf_len,
1578 gfp_t gfp_flags)
1579 {
1580 struct pch_udc_data_dma_desc *td = req->td_data, *last;
1581 unsigned long bytes = req->req.length, i = 0;
1582 dma_addr_t dma_addr;
1583 unsigned len = 1;
1584
1585 if (req->chain_len > 1)
1586 pch_udc_free_dma_chain(ep->dev, req);
1587
1588 if (req->dma == DMA_ADDR_INVALID)
1589 td->dataptr = req->req.dma;
1590 else
1591 td->dataptr = req->dma;
1592
1593 td->status = PCH_UDC_BS_HST_BSY;
1594 for (; ; bytes -= buf_len, ++len) {
1595 td->status = PCH_UDC_BS_HST_BSY | min(buf_len, bytes);
1596 if (bytes <= buf_len)
1597 break;
1598 last = td;
1599 td = dma_pool_alloc(ep->dev->data_requests, gfp_flags,
1600 &dma_addr);
1601 if (!td)
1602 goto nomem;
1603 i += buf_len;
1604 td->dataptr = req->td_data->dataptr + i;
1605 last->next = dma_addr;
1606 }
1607
1608 req->td_data_last = td;
1609 td->status |= PCH_UDC_DMA_LAST;
1610 td->next = req->td_data_phys;
1611 req->chain_len = len;
1612 return 0;
1613
1614 nomem:
1615 if (len > 1) {
1616 req->chain_len = len;
1617 pch_udc_free_dma_chain(ep->dev, req);
1618 }
1619 req->chain_len = 1;
1620 return -ENOMEM;
1621 }
1622
1623 /**
1624 * prepare_dma() - This function creates and initializes the DMA chain
1625 * for the request
1626 * @ep: Reference to the endpoint structure
1627 * @req: Reference to the request
1628 * @gfp: Flag to be used while mapping the data buffer
1629 *
1630 * Return codes:
1631 * 0: Success
1632 * Other 0: linux error number on failure
1633 */
prepare_dma(struct pch_udc_ep * ep,struct pch_udc_request * req,gfp_t gfp)1634 static int prepare_dma(struct pch_udc_ep *ep, struct pch_udc_request *req,
1635 gfp_t gfp)
1636 {
1637 int retval;
1638
1639 /* Allocate and create a DMA chain */
1640 retval = pch_udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
1641 if (retval) {
1642 pr_err("%s: could not create DMA chain:%d\n", __func__, retval);
1643 return retval;
1644 }
1645 if (ep->in)
1646 req->td_data->status = (req->td_data->status &
1647 ~PCH_UDC_BUFF_STS) | PCH_UDC_BS_HST_RDY;
1648 return 0;
1649 }
1650
1651 /**
1652 * process_zlp() - This function process zero length packets
1653 * from the gadget driver
1654 * @ep: Reference to the endpoint structure
1655 * @req: Reference to the request
1656 */
process_zlp(struct pch_udc_ep * ep,struct pch_udc_request * req)1657 static void process_zlp(struct pch_udc_ep *ep, struct pch_udc_request *req)
1658 {
1659 struct pch_udc_dev *dev = ep->dev;
1660
1661 /* IN zlp's are handled by hardware */
1662 complete_req(ep, req, 0);
1663
1664 /* if set_config or set_intf is waiting for ack by zlp
1665 * then set CSR_DONE
1666 */
1667 if (dev->set_cfg_not_acked) {
1668 pch_udc_set_csr_done(dev);
1669 dev->set_cfg_not_acked = 0;
1670 }
1671 /* setup command is ACK'ed now by zlp */
1672 if (!dev->stall && dev->waiting_zlp_ack) {
1673 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
1674 dev->waiting_zlp_ack = 0;
1675 }
1676 }
1677
1678 /**
1679 * pch_udc_start_rxrequest() - This function starts the receive requirement.
1680 * @ep: Reference to the endpoint structure
1681 * @req: Reference to the request structure
1682 */
pch_udc_start_rxrequest(struct pch_udc_ep * ep,struct pch_udc_request * req)1683 static void pch_udc_start_rxrequest(struct pch_udc_ep *ep,
1684 struct pch_udc_request *req)
1685 {
1686 struct pch_udc_data_dma_desc *td_data;
1687
1688 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
1689 td_data = req->td_data;
1690 /* Set the status bits for all descriptors */
1691 while (1) {
1692 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
1693 PCH_UDC_BS_HST_RDY;
1694 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
1695 break;
1696 td_data = phys_to_virt(td_data->next);
1697 }
1698 /* Write the descriptor pointer */
1699 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
1700 req->dma_going = 1;
1701 pch_udc_enable_ep_interrupts(ep->dev, UDC_EPINT_OUT_EP0 << ep->num);
1702 pch_udc_set_dma(ep->dev, DMA_DIR_RX);
1703 pch_udc_ep_clear_nak(ep);
1704 pch_udc_ep_set_rrdy(ep);
1705 }
1706
1707 /**
1708 * pch_udc_pcd_ep_enable() - This API enables the endpoint. It is called
1709 * from gadget driver
1710 * @usbep: Reference to the USB endpoint structure
1711 * @desc: Reference to the USB endpoint descriptor structure
1712 *
1713 * Return codes:
1714 * 0: Success
1715 * -EINVAL:
1716 * -ESHUTDOWN:
1717 */
pch_udc_pcd_ep_enable(struct usb_ep * usbep,const struct usb_endpoint_descriptor * desc)1718 static int pch_udc_pcd_ep_enable(struct usb_ep *usbep,
1719 const struct usb_endpoint_descriptor *desc)
1720 {
1721 struct pch_udc_ep *ep;
1722 struct pch_udc_dev *dev;
1723 unsigned long iflags;
1724
1725 if (!usbep || (usbep->name == ep0_string) || !desc ||
1726 (desc->bDescriptorType != USB_DT_ENDPOINT) || !desc->wMaxPacketSize)
1727 return -EINVAL;
1728
1729 ep = container_of(usbep, struct pch_udc_ep, ep);
1730 dev = ep->dev;
1731 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1732 return -ESHUTDOWN;
1733 spin_lock_irqsave(&dev->lock, iflags);
1734 ep->ep.desc = desc;
1735 ep->halted = 0;
1736 pch_udc_ep_enable(ep, &ep->dev->cfg_data, desc);
1737 ep->ep.maxpacket = usb_endpoint_maxp(desc);
1738 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1739 spin_unlock_irqrestore(&dev->lock, iflags);
1740 return 0;
1741 }
1742
1743 /**
1744 * pch_udc_pcd_ep_disable() - This API disables endpoint and is called
1745 * from gadget driver
1746 * @usbep: Reference to the USB endpoint structure
1747 *
1748 * Return codes:
1749 * 0: Success
1750 * -EINVAL:
1751 */
pch_udc_pcd_ep_disable(struct usb_ep * usbep)1752 static int pch_udc_pcd_ep_disable(struct usb_ep *usbep)
1753 {
1754 struct pch_udc_ep *ep;
1755 unsigned long iflags;
1756
1757 if (!usbep)
1758 return -EINVAL;
1759
1760 ep = container_of(usbep, struct pch_udc_ep, ep);
1761 if ((usbep->name == ep0_string) || !ep->ep.desc)
1762 return -EINVAL;
1763
1764 spin_lock_irqsave(&ep->dev->lock, iflags);
1765 empty_req_queue(ep);
1766 ep->halted = 1;
1767 pch_udc_ep_disable(ep);
1768 pch_udc_disable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
1769 ep->ep.desc = NULL;
1770 INIT_LIST_HEAD(&ep->queue);
1771 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1772 return 0;
1773 }
1774
1775 /**
1776 * pch_udc_alloc_request() - This function allocates request structure.
1777 * It is called by gadget driver
1778 * @usbep: Reference to the USB endpoint structure
1779 * @gfp: Flag to be used while allocating memory
1780 *
1781 * Return codes:
1782 * NULL: Failure
1783 * Allocated address: Success
1784 */
pch_udc_alloc_request(struct usb_ep * usbep,gfp_t gfp)1785 static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
1786 gfp_t gfp)
1787 {
1788 struct pch_udc_request *req;
1789 struct pch_udc_ep *ep;
1790 struct pch_udc_data_dma_desc *dma_desc;
1791
1792 if (!usbep)
1793 return NULL;
1794 ep = container_of(usbep, struct pch_udc_ep, ep);
1795 req = kzalloc(sizeof *req, gfp);
1796 if (!req)
1797 return NULL;
1798 req->req.dma = DMA_ADDR_INVALID;
1799 req->dma = DMA_ADDR_INVALID;
1800 INIT_LIST_HEAD(&req->queue);
1801 if (!ep->dev->dma_addr)
1802 return &req->req;
1803 /* ep0 in requests are allocated from data pool here */
1804 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
1805 &req->td_data_phys);
1806 if (NULL == dma_desc) {
1807 kfree(req);
1808 return NULL;
1809 }
1810 /* prevent from using desc. - set HOST BUSY */
1811 dma_desc->status |= PCH_UDC_BS_HST_BSY;
1812 dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
1813 req->td_data = dma_desc;
1814 req->td_data_last = dma_desc;
1815 req->chain_len = 1;
1816 return &req->req;
1817 }
1818
1819 /**
1820 * pch_udc_free_request() - This function frees request structure.
1821 * It is called by gadget driver
1822 * @usbep: Reference to the USB endpoint structure
1823 * @usbreq: Reference to the USB request
1824 */
pch_udc_free_request(struct usb_ep * usbep,struct usb_request * usbreq)1825 static void pch_udc_free_request(struct usb_ep *usbep,
1826 struct usb_request *usbreq)
1827 {
1828 struct pch_udc_ep *ep;
1829 struct pch_udc_request *req;
1830 struct pch_udc_dev *dev;
1831
1832 if (!usbep || !usbreq)
1833 return;
1834 ep = container_of(usbep, struct pch_udc_ep, ep);
1835 req = container_of(usbreq, struct pch_udc_request, req);
1836 dev = ep->dev;
1837 if (!list_empty(&req->queue))
1838 dev_err(&dev->pdev->dev, "%s: %s req=0x%p queue not empty\n",
1839 __func__, usbep->name, req);
1840 if (req->td_data != NULL) {
1841 if (req->chain_len > 1)
1842 pch_udc_free_dma_chain(ep->dev, req);
1843 dma_pool_free(ep->dev->data_requests, req->td_data,
1844 req->td_data_phys);
1845 }
1846 kfree(req);
1847 }
1848
1849 /**
1850 * pch_udc_pcd_queue() - This function queues a request packet. It is called
1851 * by gadget driver
1852 * @usbep: Reference to the USB endpoint structure
1853 * @usbreq: Reference to the USB request
1854 * @gfp: Flag to be used while mapping the data buffer
1855 *
1856 * Return codes:
1857 * 0: Success
1858 * linux error number: Failure
1859 */
pch_udc_pcd_queue(struct usb_ep * usbep,struct usb_request * usbreq,gfp_t gfp)1860 static int pch_udc_pcd_queue(struct usb_ep *usbep, struct usb_request *usbreq,
1861 gfp_t gfp)
1862 {
1863 int retval = 0;
1864 struct pch_udc_ep *ep;
1865 struct pch_udc_dev *dev;
1866 struct pch_udc_request *req;
1867 unsigned long iflags;
1868
1869 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf)
1870 return -EINVAL;
1871 ep = container_of(usbep, struct pch_udc_ep, ep);
1872 dev = ep->dev;
1873 if (!ep->ep.desc && ep->num)
1874 return -EINVAL;
1875 req = container_of(usbreq, struct pch_udc_request, req);
1876 if (!list_empty(&req->queue))
1877 return -EINVAL;
1878 if (!dev->driver || (dev->gadget.speed == USB_SPEED_UNKNOWN))
1879 return -ESHUTDOWN;
1880 spin_lock_irqsave(&dev->lock, iflags);
1881 /* map the buffer for dma */
1882 if (usbreq->length &&
1883 ((usbreq->dma == DMA_ADDR_INVALID) || !usbreq->dma)) {
1884 if (!((unsigned long)(usbreq->buf) & 0x03)) {
1885 if (ep->in)
1886 usbreq->dma = dma_map_single(&dev->pdev->dev,
1887 usbreq->buf,
1888 usbreq->length,
1889 DMA_TO_DEVICE);
1890 else
1891 usbreq->dma = dma_map_single(&dev->pdev->dev,
1892 usbreq->buf,
1893 usbreq->length,
1894 DMA_FROM_DEVICE);
1895 } else {
1896 req->buf = kzalloc(usbreq->length, GFP_ATOMIC);
1897 if (!req->buf) {
1898 retval = -ENOMEM;
1899 goto probe_end;
1900 }
1901 if (ep->in) {
1902 memcpy(req->buf, usbreq->buf, usbreq->length);
1903 req->dma = dma_map_single(&dev->pdev->dev,
1904 req->buf,
1905 usbreq->length,
1906 DMA_TO_DEVICE);
1907 } else
1908 req->dma = dma_map_single(&dev->pdev->dev,
1909 req->buf,
1910 usbreq->length,
1911 DMA_FROM_DEVICE);
1912 }
1913 req->dma_mapped = 1;
1914 }
1915 if (usbreq->length > 0) {
1916 retval = prepare_dma(ep, req, GFP_ATOMIC);
1917 if (retval)
1918 goto probe_end;
1919 }
1920 usbreq->actual = 0;
1921 usbreq->status = -EINPROGRESS;
1922 req->dma_done = 0;
1923 if (list_empty(&ep->queue) && !ep->halted) {
1924 /* no pending transfer, so start this req */
1925 if (!usbreq->length) {
1926 process_zlp(ep, req);
1927 retval = 0;
1928 goto probe_end;
1929 }
1930 if (!ep->in) {
1931 pch_udc_start_rxrequest(ep, req);
1932 } else {
1933 /*
1934 * For IN trfr the descriptors will be programmed and
1935 * P bit will be set when
1936 * we get an IN token
1937 */
1938 pch_udc_wait_ep_stall(ep);
1939 pch_udc_ep_clear_nak(ep);
1940 pch_udc_enable_ep_interrupts(ep->dev, (1 << ep->num));
1941 }
1942 }
1943 /* Now add this request to the ep's pending requests */
1944 if (req != NULL)
1945 list_add_tail(&req->queue, &ep->queue);
1946
1947 probe_end:
1948 spin_unlock_irqrestore(&dev->lock, iflags);
1949 return retval;
1950 }
1951
1952 /**
1953 * pch_udc_pcd_dequeue() - This function de-queues a request packet.
1954 * It is called by gadget driver
1955 * @usbep: Reference to the USB endpoint structure
1956 * @usbreq: Reference to the USB request
1957 *
1958 * Return codes:
1959 * 0: Success
1960 * linux error number: Failure
1961 */
pch_udc_pcd_dequeue(struct usb_ep * usbep,struct usb_request * usbreq)1962 static int pch_udc_pcd_dequeue(struct usb_ep *usbep,
1963 struct usb_request *usbreq)
1964 {
1965 struct pch_udc_ep *ep;
1966 struct pch_udc_request *req;
1967 unsigned long flags;
1968 int ret = -EINVAL;
1969
1970 ep = container_of(usbep, struct pch_udc_ep, ep);
1971 if (!usbep || !usbreq || (!ep->ep.desc && ep->num))
1972 return ret;
1973 req = container_of(usbreq, struct pch_udc_request, req);
1974 spin_lock_irqsave(&ep->dev->lock, flags);
1975 /* make sure it's still queued on this endpoint */
1976 list_for_each_entry(req, &ep->queue, queue) {
1977 if (&req->req == usbreq) {
1978 pch_udc_ep_set_nak(ep);
1979 if (!list_empty(&req->queue))
1980 complete_req(ep, req, -ECONNRESET);
1981 ret = 0;
1982 break;
1983 }
1984 }
1985 spin_unlock_irqrestore(&ep->dev->lock, flags);
1986 return ret;
1987 }
1988
1989 /**
1990 * pch_udc_pcd_set_halt() - This function Sets or clear the endpoint halt
1991 * feature
1992 * @usbep: Reference to the USB endpoint structure
1993 * @halt: Specifies whether to set or clear the feature
1994 *
1995 * Return codes:
1996 * 0: Success
1997 * linux error number: Failure
1998 */
pch_udc_pcd_set_halt(struct usb_ep * usbep,int halt)1999 static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
2000 {
2001 struct pch_udc_ep *ep;
2002 unsigned long iflags;
2003 int ret;
2004
2005 if (!usbep)
2006 return -EINVAL;
2007 ep = container_of(usbep, struct pch_udc_ep, ep);
2008 if (!ep->ep.desc && !ep->num)
2009 return -EINVAL;
2010 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2011 return -ESHUTDOWN;
2012 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2013 if (list_empty(&ep->queue)) {
2014 if (halt) {
2015 if (ep->num == PCH_UDC_EP0)
2016 ep->dev->stall = 1;
2017 pch_udc_ep_set_stall(ep);
2018 pch_udc_enable_ep_interrupts(
2019 ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2020 } else {
2021 pch_udc_ep_clear_stall(ep);
2022 }
2023 ret = 0;
2024 } else {
2025 ret = -EAGAIN;
2026 }
2027 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2028 return ret;
2029 }
2030
2031 /**
2032 * pch_udc_pcd_set_wedge() - This function Sets or clear the endpoint
2033 * halt feature
2034 * @usbep: Reference to the USB endpoint structure
2035 *
2036 * Return codes:
2037 * 0: Success
2038 * linux error number: Failure
2039 */
pch_udc_pcd_set_wedge(struct usb_ep * usbep)2040 static int pch_udc_pcd_set_wedge(struct usb_ep *usbep)
2041 {
2042 struct pch_udc_ep *ep;
2043 unsigned long iflags;
2044 int ret;
2045
2046 if (!usbep)
2047 return -EINVAL;
2048 ep = container_of(usbep, struct pch_udc_ep, ep);
2049 if (!ep->ep.desc && !ep->num)
2050 return -EINVAL;
2051 if (!ep->dev->driver || (ep->dev->gadget.speed == USB_SPEED_UNKNOWN))
2052 return -ESHUTDOWN;
2053 spin_lock_irqsave(&udc_stall_spinlock, iflags);
2054 if (!list_empty(&ep->queue)) {
2055 ret = -EAGAIN;
2056 } else {
2057 if (ep->num == PCH_UDC_EP0)
2058 ep->dev->stall = 1;
2059 pch_udc_ep_set_stall(ep);
2060 pch_udc_enable_ep_interrupts(ep->dev,
2061 PCH_UDC_EPINT(ep->in, ep->num));
2062 ep->dev->prot_stall = 1;
2063 ret = 0;
2064 }
2065 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
2066 return ret;
2067 }
2068
2069 /**
2070 * pch_udc_pcd_fifo_flush() - This function Flush the FIFO of specified endpoint
2071 * @usbep: Reference to the USB endpoint structure
2072 */
pch_udc_pcd_fifo_flush(struct usb_ep * usbep)2073 static void pch_udc_pcd_fifo_flush(struct usb_ep *usbep)
2074 {
2075 struct pch_udc_ep *ep;
2076
2077 if (!usbep)
2078 return;
2079
2080 ep = container_of(usbep, struct pch_udc_ep, ep);
2081 if (ep->ep.desc || !ep->num)
2082 pch_udc_ep_fifo_flush(ep, ep->in);
2083 }
2084
2085 static const struct usb_ep_ops pch_udc_ep_ops = {
2086 .enable = pch_udc_pcd_ep_enable,
2087 .disable = pch_udc_pcd_ep_disable,
2088 .alloc_request = pch_udc_alloc_request,
2089 .free_request = pch_udc_free_request,
2090 .queue = pch_udc_pcd_queue,
2091 .dequeue = pch_udc_pcd_dequeue,
2092 .set_halt = pch_udc_pcd_set_halt,
2093 .set_wedge = pch_udc_pcd_set_wedge,
2094 .fifo_status = NULL,
2095 .fifo_flush = pch_udc_pcd_fifo_flush,
2096 };
2097
2098 /**
2099 * pch_udc_init_setup_buff() - This function initializes the SETUP buffer
2100 * @td_stp: Reference to the SETP buffer structure
2101 */
pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc * td_stp)2102 static void pch_udc_init_setup_buff(struct pch_udc_stp_dma_desc *td_stp)
2103 {
2104 static u32 pky_marker;
2105
2106 if (!td_stp)
2107 return;
2108 td_stp->reserved = ++pky_marker;
2109 memset(&td_stp->request, 0xFF, sizeof td_stp->request);
2110 td_stp->status = PCH_UDC_BS_HST_RDY;
2111 }
2112
2113 /**
2114 * pch_udc_start_next_txrequest() - This function starts
2115 * the next transmission requirement
2116 * @ep: Reference to the endpoint structure
2117 */
pch_udc_start_next_txrequest(struct pch_udc_ep * ep)2118 static void pch_udc_start_next_txrequest(struct pch_udc_ep *ep)
2119 {
2120 struct pch_udc_request *req;
2121 struct pch_udc_data_dma_desc *td_data;
2122
2123 if (pch_udc_read_ep_control(ep) & UDC_EPCTL_P)
2124 return;
2125
2126 if (list_empty(&ep->queue))
2127 return;
2128
2129 /* next request */
2130 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2131 if (req->dma_going)
2132 return;
2133 if (!req->td_data)
2134 return;
2135 pch_udc_wait_ep_stall(ep);
2136 req->dma_going = 1;
2137 pch_udc_ep_set_ddptr(ep, 0);
2138 td_data = req->td_data;
2139 while (1) {
2140 td_data->status = (td_data->status & ~PCH_UDC_BUFF_STS) |
2141 PCH_UDC_BS_HST_RDY;
2142 if ((td_data->status & PCH_UDC_DMA_LAST) == PCH_UDC_DMA_LAST)
2143 break;
2144 td_data = phys_to_virt(td_data->next);
2145 }
2146 pch_udc_ep_set_ddptr(ep, req->td_data_phys);
2147 pch_udc_set_dma(ep->dev, DMA_DIR_TX);
2148 pch_udc_ep_set_pd(ep);
2149 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2150 pch_udc_ep_clear_nak(ep);
2151 }
2152
2153 /**
2154 * pch_udc_complete_transfer() - This function completes a transfer
2155 * @ep: Reference to the endpoint structure
2156 */
pch_udc_complete_transfer(struct pch_udc_ep * ep)2157 static void pch_udc_complete_transfer(struct pch_udc_ep *ep)
2158 {
2159 struct pch_udc_request *req;
2160 struct pch_udc_dev *dev = ep->dev;
2161
2162 if (list_empty(&ep->queue))
2163 return;
2164 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2165 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2166 PCH_UDC_BS_DMA_DONE)
2167 return;
2168 if ((req->td_data_last->status & PCH_UDC_RXTX_STS) !=
2169 PCH_UDC_RTS_SUCC) {
2170 dev_err(&dev->pdev->dev, "Invalid RXTX status (0x%08x) "
2171 "epstatus=0x%08x\n",
2172 (req->td_data_last->status & PCH_UDC_RXTX_STS),
2173 (int)(ep->epsts));
2174 return;
2175 }
2176
2177 req->req.actual = req->req.length;
2178 req->td_data_last->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2179 req->td_data->status = PCH_UDC_BS_HST_BSY | PCH_UDC_DMA_LAST;
2180 complete_req(ep, req, 0);
2181 req->dma_going = 0;
2182 if (!list_empty(&ep->queue)) {
2183 pch_udc_wait_ep_stall(ep);
2184 pch_udc_ep_clear_nak(ep);
2185 pch_udc_enable_ep_interrupts(ep->dev,
2186 PCH_UDC_EPINT(ep->in, ep->num));
2187 } else {
2188 pch_udc_disable_ep_interrupts(ep->dev,
2189 PCH_UDC_EPINT(ep->in, ep->num));
2190 }
2191 }
2192
2193 /**
2194 * pch_udc_complete_receiver() - This function completes a receiver
2195 * @ep: Reference to the endpoint structure
2196 */
pch_udc_complete_receiver(struct pch_udc_ep * ep)2197 static void pch_udc_complete_receiver(struct pch_udc_ep *ep)
2198 {
2199 struct pch_udc_request *req;
2200 struct pch_udc_dev *dev = ep->dev;
2201 unsigned int count;
2202 struct pch_udc_data_dma_desc *td;
2203 dma_addr_t addr;
2204
2205 if (list_empty(&ep->queue))
2206 return;
2207 /* next request */
2208 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2209 pch_udc_clear_dma(ep->dev, DMA_DIR_RX);
2210 pch_udc_ep_set_ddptr(ep, 0);
2211 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) ==
2212 PCH_UDC_BS_DMA_DONE)
2213 td = req->td_data_last;
2214 else
2215 td = req->td_data;
2216
2217 while (1) {
2218 if ((td->status & PCH_UDC_RXTX_STS) != PCH_UDC_RTS_SUCC) {
2219 dev_err(&dev->pdev->dev, "Invalid RXTX status=0x%08x "
2220 "epstatus=0x%08x\n",
2221 (req->td_data->status & PCH_UDC_RXTX_STS),
2222 (int)(ep->epsts));
2223 return;
2224 }
2225 if ((td->status & PCH_UDC_BUFF_STS) == PCH_UDC_BS_DMA_DONE)
2226 if (td->status & PCH_UDC_DMA_LAST) {
2227 count = td->status & PCH_UDC_RXTX_BYTES;
2228 break;
2229 }
2230 if (td == req->td_data_last) {
2231 dev_err(&dev->pdev->dev, "Not complete RX descriptor");
2232 return;
2233 }
2234 addr = (dma_addr_t)td->next;
2235 td = phys_to_virt(addr);
2236 }
2237 /* on 64k packets the RXBYTES field is zero */
2238 if (!count && (req->req.length == UDC_DMA_MAXPACKET))
2239 count = UDC_DMA_MAXPACKET;
2240 req->td_data->status |= PCH_UDC_DMA_LAST;
2241 td->status |= PCH_UDC_BS_HST_BSY;
2242
2243 req->dma_going = 0;
2244 req->req.actual = count;
2245 complete_req(ep, req, 0);
2246 /* If there is a new/failed requests try that now */
2247 if (!list_empty(&ep->queue)) {
2248 req = list_entry(ep->queue.next, struct pch_udc_request, queue);
2249 pch_udc_start_rxrequest(ep, req);
2250 }
2251 }
2252
2253 /**
2254 * pch_udc_svc_data_in() - This function process endpoint interrupts
2255 * for IN endpoints
2256 * @dev: Reference to the device structure
2257 * @ep_num: Endpoint that generated the interrupt
2258 */
pch_udc_svc_data_in(struct pch_udc_dev * dev,int ep_num)2259 static void pch_udc_svc_data_in(struct pch_udc_dev *dev, int ep_num)
2260 {
2261 u32 epsts;
2262 struct pch_udc_ep *ep;
2263
2264 ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2265 epsts = ep->epsts;
2266 ep->epsts = 0;
2267
2268 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2269 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2270 UDC_EPSTS_RSS | UDC_EPSTS_XFERDONE)))
2271 return;
2272 if ((epsts & UDC_EPSTS_BNA))
2273 return;
2274 if (epsts & UDC_EPSTS_HE)
2275 return;
2276 if (epsts & UDC_EPSTS_RSS) {
2277 pch_udc_ep_set_stall(ep);
2278 pch_udc_enable_ep_interrupts(ep->dev,
2279 PCH_UDC_EPINT(ep->in, ep->num));
2280 }
2281 if (epsts & UDC_EPSTS_RCS) {
2282 if (!dev->prot_stall) {
2283 pch_udc_ep_clear_stall(ep);
2284 } else {
2285 pch_udc_ep_set_stall(ep);
2286 pch_udc_enable_ep_interrupts(ep->dev,
2287 PCH_UDC_EPINT(ep->in, ep->num));
2288 }
2289 }
2290 if (epsts & UDC_EPSTS_TDC)
2291 pch_udc_complete_transfer(ep);
2292 /* On IN interrupt, provide data if we have any */
2293 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_RSS) &&
2294 !(epsts & UDC_EPSTS_TDC) && !(epsts & UDC_EPSTS_TXEMPTY))
2295 pch_udc_start_next_txrequest(ep);
2296 }
2297
2298 /**
2299 * pch_udc_svc_data_out() - Handles interrupts from OUT endpoint
2300 * @dev: Reference to the device structure
2301 * @ep_num: Endpoint that generated the interrupt
2302 */
pch_udc_svc_data_out(struct pch_udc_dev * dev,int ep_num)2303 static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
2304 {
2305 u32 epsts;
2306 struct pch_udc_ep *ep;
2307 struct pch_udc_request *req = NULL;
2308
2309 ep = &dev->ep[UDC_EPOUT_IDX(ep_num)];
2310 epsts = ep->epsts;
2311 ep->epsts = 0;
2312
2313 if ((epsts & UDC_EPSTS_BNA) && (!list_empty(&ep->queue))) {
2314 /* next request */
2315 req = list_entry(ep->queue.next, struct pch_udc_request,
2316 queue);
2317 if ((req->td_data_last->status & PCH_UDC_BUFF_STS) !=
2318 PCH_UDC_BS_DMA_DONE) {
2319 if (!req->dma_going)
2320 pch_udc_start_rxrequest(ep, req);
2321 return;
2322 }
2323 }
2324 if (epsts & UDC_EPSTS_HE)
2325 return;
2326 if (epsts & UDC_EPSTS_RSS) {
2327 pch_udc_ep_set_stall(ep);
2328 pch_udc_enable_ep_interrupts(ep->dev,
2329 PCH_UDC_EPINT(ep->in, ep->num));
2330 }
2331 if (epsts & UDC_EPSTS_RCS) {
2332 if (!dev->prot_stall) {
2333 pch_udc_ep_clear_stall(ep);
2334 } else {
2335 pch_udc_ep_set_stall(ep);
2336 pch_udc_enable_ep_interrupts(ep->dev,
2337 PCH_UDC_EPINT(ep->in, ep->num));
2338 }
2339 }
2340 if (((epsts & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2341 UDC_EPSTS_OUT_DATA) {
2342 if (ep->dev->prot_stall == 1) {
2343 pch_udc_ep_set_stall(ep);
2344 pch_udc_enable_ep_interrupts(ep->dev,
2345 PCH_UDC_EPINT(ep->in, ep->num));
2346 } else {
2347 pch_udc_complete_receiver(ep);
2348 }
2349 }
2350 if (list_empty(&ep->queue))
2351 pch_udc_set_dma(dev, DMA_DIR_RX);
2352 }
2353
pch_udc_gadget_setup(struct pch_udc_dev * dev)2354 static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
2355 __must_hold(&dev->lock)
2356 {
2357 int rc;
2358
2359 /* In some cases we can get an interrupt before driver gets setup */
2360 if (!dev->driver)
2361 return -ESHUTDOWN;
2362
2363 spin_unlock(&dev->lock);
2364 rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
2365 spin_lock(&dev->lock);
2366 return rc;
2367 }
2368
2369 /**
2370 * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
2371 * @dev: Reference to the device structure
2372 */
pch_udc_svc_control_in(struct pch_udc_dev * dev)2373 static void pch_udc_svc_control_in(struct pch_udc_dev *dev)
2374 {
2375 u32 epsts;
2376 struct pch_udc_ep *ep;
2377 struct pch_udc_ep *ep_out;
2378
2379 ep = &dev->ep[UDC_EP0IN_IDX];
2380 ep_out = &dev->ep[UDC_EP0OUT_IDX];
2381 epsts = ep->epsts;
2382 ep->epsts = 0;
2383
2384 if (!(epsts & (UDC_EPSTS_IN | UDC_EPSTS_BNA | UDC_EPSTS_HE |
2385 UDC_EPSTS_TDC | UDC_EPSTS_RCS | UDC_EPSTS_TXEMPTY |
2386 UDC_EPSTS_XFERDONE)))
2387 return;
2388 if ((epsts & UDC_EPSTS_BNA))
2389 return;
2390 if (epsts & UDC_EPSTS_HE)
2391 return;
2392 if ((epsts & UDC_EPSTS_TDC) && (!dev->stall)) {
2393 pch_udc_complete_transfer(ep);
2394 pch_udc_clear_dma(dev, DMA_DIR_RX);
2395 ep_out->td_data->status = (ep_out->td_data->status &
2396 ~PCH_UDC_BUFF_STS) |
2397 PCH_UDC_BS_HST_RDY;
2398 pch_udc_ep_clear_nak(ep_out);
2399 pch_udc_set_dma(dev, DMA_DIR_RX);
2400 pch_udc_ep_set_rrdy(ep_out);
2401 }
2402 /* On IN interrupt, provide data if we have any */
2403 if ((epsts & UDC_EPSTS_IN) && !(epsts & UDC_EPSTS_TDC) &&
2404 !(epsts & UDC_EPSTS_TXEMPTY))
2405 pch_udc_start_next_txrequest(ep);
2406 }
2407
2408 /**
2409 * pch_udc_svc_control_out() - Routine that handle Control
2410 * OUT endpoint interrupts
2411 * @dev: Reference to the device structure
2412 */
pch_udc_svc_control_out(struct pch_udc_dev * dev)2413 static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
2414 __releases(&dev->lock)
2415 __acquires(&dev->lock)
2416 {
2417 u32 stat;
2418 int setup_supported;
2419 struct pch_udc_ep *ep;
2420
2421 ep = &dev->ep[UDC_EP0OUT_IDX];
2422 stat = ep->epsts;
2423 ep->epsts = 0;
2424
2425 /* If setup data */
2426 if (((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2427 UDC_EPSTS_OUT_SETUP) {
2428 dev->stall = 0;
2429 dev->ep[UDC_EP0IN_IDX].halted = 0;
2430 dev->ep[UDC_EP0OUT_IDX].halted = 0;
2431 dev->setup_data = ep->td_stp->request;
2432 pch_udc_init_setup_buff(ep->td_stp);
2433 pch_udc_clear_dma(dev, DMA_DIR_RX);
2434 pch_udc_ep_fifo_flush(&(dev->ep[UDC_EP0IN_IDX]),
2435 dev->ep[UDC_EP0IN_IDX].in);
2436 if ((dev->setup_data.bRequestType & USB_DIR_IN))
2437 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2438 else /* OUT */
2439 dev->gadget.ep0 = &ep->ep;
2440 /* If Mass storage Reset */
2441 if ((dev->setup_data.bRequestType == 0x21) &&
2442 (dev->setup_data.bRequest == 0xFF))
2443 dev->prot_stall = 0;
2444 /* call gadget with setup data received */
2445 setup_supported = pch_udc_gadget_setup(dev);
2446
2447 if (dev->setup_data.bRequestType & USB_DIR_IN) {
2448 ep->td_data->status = (ep->td_data->status &
2449 ~PCH_UDC_BUFF_STS) |
2450 PCH_UDC_BS_HST_RDY;
2451 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2452 }
2453 /* ep0 in returns data on IN phase */
2454 if (setup_supported >= 0 && setup_supported <
2455 UDC_EP0IN_MAX_PKT_SIZE) {
2456 pch_udc_ep_clear_nak(&(dev->ep[UDC_EP0IN_IDX]));
2457 /* Gadget would have queued a request when
2458 * we called the setup */
2459 if (!(dev->setup_data.bRequestType & USB_DIR_IN)) {
2460 pch_udc_set_dma(dev, DMA_DIR_RX);
2461 pch_udc_ep_clear_nak(ep);
2462 }
2463 } else if (setup_supported < 0) {
2464 /* if unsupported request, then stall */
2465 pch_udc_ep_set_stall(&(dev->ep[UDC_EP0IN_IDX]));
2466 pch_udc_enable_ep_interrupts(ep->dev,
2467 PCH_UDC_EPINT(ep->in, ep->num));
2468 dev->stall = 0;
2469 pch_udc_set_dma(dev, DMA_DIR_RX);
2470 } else {
2471 dev->waiting_zlp_ack = 1;
2472 }
2473 } else if ((((stat & UDC_EPSTS_OUT_MASK) >> UDC_EPSTS_OUT_SHIFT) ==
2474 UDC_EPSTS_OUT_DATA) && !dev->stall) {
2475 pch_udc_clear_dma(dev, DMA_DIR_RX);
2476 pch_udc_ep_set_ddptr(ep, 0);
2477 if (!list_empty(&ep->queue)) {
2478 ep->epsts = stat;
2479 pch_udc_svc_data_out(dev, PCH_UDC_EP0);
2480 }
2481 pch_udc_set_dma(dev, DMA_DIR_RX);
2482 }
2483 pch_udc_ep_set_rrdy(ep);
2484 }
2485
2486
2487 /**
2488 * pch_udc_postsvc_epinters() - This function enables end point interrupts
2489 * and clears NAK status
2490 * @dev: Reference to the device structure
2491 * @ep_num: End point number
2492 */
pch_udc_postsvc_epinters(struct pch_udc_dev * dev,int ep_num)2493 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
2494 {
2495 struct pch_udc_ep *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
2496 if (list_empty(&ep->queue))
2497 return;
2498 pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
2499 pch_udc_ep_clear_nak(ep);
2500 }
2501
2502 /**
2503 * pch_udc_read_all_epstatus() - This function read all endpoint status
2504 * @dev: Reference to the device structure
2505 * @ep_intr: Status of endpoint interrupt
2506 */
pch_udc_read_all_epstatus(struct pch_udc_dev * dev,u32 ep_intr)2507 static void pch_udc_read_all_epstatus(struct pch_udc_dev *dev, u32 ep_intr)
2508 {
2509 int i;
2510 struct pch_udc_ep *ep;
2511
2512 for (i = 0; i < PCH_UDC_USED_EP_NUM; i++) {
2513 /* IN */
2514 if (ep_intr & (0x1 << i)) {
2515 ep = &dev->ep[UDC_EPIN_IDX(i)];
2516 ep->epsts = pch_udc_read_ep_status(ep);
2517 pch_udc_clear_ep_status(ep, ep->epsts);
2518 }
2519 /* OUT */
2520 if (ep_intr & (0x10000 << i)) {
2521 ep = &dev->ep[UDC_EPOUT_IDX(i)];
2522 ep->epsts = pch_udc_read_ep_status(ep);
2523 pch_udc_clear_ep_status(ep, ep->epsts);
2524 }
2525 }
2526 }
2527
2528 /**
2529 * pch_udc_activate_control_ep() - This function enables the control endpoints
2530 * for traffic after a reset
2531 * @dev: Reference to the device structure
2532 */
pch_udc_activate_control_ep(struct pch_udc_dev * dev)2533 static void pch_udc_activate_control_ep(struct pch_udc_dev *dev)
2534 {
2535 struct pch_udc_ep *ep;
2536 u32 val;
2537
2538 /* Setup the IN endpoint */
2539 ep = &dev->ep[UDC_EP0IN_IDX];
2540 pch_udc_clear_ep_control(ep);
2541 pch_udc_ep_fifo_flush(ep, ep->in);
2542 pch_udc_ep_set_bufsz(ep, UDC_EP0IN_BUFF_SIZE, ep->in);
2543 pch_udc_ep_set_maxpkt(ep, UDC_EP0IN_MAX_PKT_SIZE);
2544 /* Initialize the IN EP Descriptor */
2545 ep->td_data = NULL;
2546 ep->td_stp = NULL;
2547 ep->td_data_phys = 0;
2548 ep->td_stp_phys = 0;
2549
2550 /* Setup the OUT endpoint */
2551 ep = &dev->ep[UDC_EP0OUT_IDX];
2552 pch_udc_clear_ep_control(ep);
2553 pch_udc_ep_fifo_flush(ep, ep->in);
2554 pch_udc_ep_set_bufsz(ep, UDC_EP0OUT_BUFF_SIZE, ep->in);
2555 pch_udc_ep_set_maxpkt(ep, UDC_EP0OUT_MAX_PKT_SIZE);
2556 val = UDC_EP0OUT_MAX_PKT_SIZE << UDC_CSR_NE_MAX_PKT_SHIFT;
2557 pch_udc_write_csr(ep->dev, val, UDC_EP0OUT_IDX);
2558
2559 /* Initialize the SETUP buffer */
2560 pch_udc_init_setup_buff(ep->td_stp);
2561 /* Write the pointer address of dma descriptor */
2562 pch_udc_ep_set_subptr(ep, ep->td_stp_phys);
2563 /* Write the pointer address of Setup descriptor */
2564 pch_udc_ep_set_ddptr(ep, ep->td_data_phys);
2565
2566 /* Initialize the dma descriptor */
2567 ep->td_data->status = PCH_UDC_DMA_LAST;
2568 ep->td_data->dataptr = dev->dma_addr;
2569 ep->td_data->next = ep->td_data_phys;
2570
2571 pch_udc_ep_clear_nak(ep);
2572 }
2573
2574
2575 /**
2576 * pch_udc_svc_ur_interrupt() - This function handles a USB reset interrupt
2577 * @dev: Reference to driver structure
2578 */
pch_udc_svc_ur_interrupt(struct pch_udc_dev * dev)2579 static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
2580 {
2581 struct pch_udc_ep *ep;
2582 int i;
2583
2584 pch_udc_clear_dma(dev, DMA_DIR_TX);
2585 pch_udc_clear_dma(dev, DMA_DIR_RX);
2586 /* Mask all endpoint interrupts */
2587 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2588 /* clear all endpoint interrupts */
2589 pch_udc_write_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
2590
2591 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2592 ep = &dev->ep[i];
2593 pch_udc_clear_ep_status(ep, UDC_EPSTS_ALL_CLR_MASK);
2594 pch_udc_clear_ep_control(ep);
2595 pch_udc_ep_set_ddptr(ep, 0);
2596 pch_udc_write_csr(ep->dev, 0x00, i);
2597 }
2598 dev->stall = 0;
2599 dev->prot_stall = 0;
2600 dev->waiting_zlp_ack = 0;
2601 dev->set_cfg_not_acked = 0;
2602
2603 /* disable ep to empty req queue. Skip the control EP's */
2604 for (i = 0; i < (PCH_UDC_USED_EP_NUM*2); i++) {
2605 ep = &dev->ep[i];
2606 pch_udc_ep_set_nak(ep);
2607 pch_udc_ep_fifo_flush(ep, ep->in);
2608 /* Complete request queue */
2609 empty_req_queue(ep);
2610 }
2611 if (dev->driver) {
2612 spin_unlock(&dev->lock);
2613 usb_gadget_udc_reset(&dev->gadget, dev->driver);
2614 spin_lock(&dev->lock);
2615 }
2616 }
2617
2618 /**
2619 * pch_udc_svc_enum_interrupt() - This function handles a USB speed enumeration
2620 * done interrupt
2621 * @dev: Reference to driver structure
2622 */
pch_udc_svc_enum_interrupt(struct pch_udc_dev * dev)2623 static void pch_udc_svc_enum_interrupt(struct pch_udc_dev *dev)
2624 {
2625 u32 dev_stat, dev_speed;
2626 u32 speed = USB_SPEED_FULL;
2627
2628 dev_stat = pch_udc_read_device_status(dev);
2629 dev_speed = (dev_stat & UDC_DEVSTS_ENUM_SPEED_MASK) >>
2630 UDC_DEVSTS_ENUM_SPEED_SHIFT;
2631 switch (dev_speed) {
2632 case UDC_DEVSTS_ENUM_SPEED_HIGH:
2633 speed = USB_SPEED_HIGH;
2634 break;
2635 case UDC_DEVSTS_ENUM_SPEED_FULL:
2636 speed = USB_SPEED_FULL;
2637 break;
2638 case UDC_DEVSTS_ENUM_SPEED_LOW:
2639 speed = USB_SPEED_LOW;
2640 break;
2641 default:
2642 BUG();
2643 }
2644 dev->gadget.speed = speed;
2645 pch_udc_activate_control_ep(dev);
2646 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 | UDC_EPINT_OUT_EP0);
2647 pch_udc_set_dma(dev, DMA_DIR_TX);
2648 pch_udc_set_dma(dev, DMA_DIR_RX);
2649 pch_udc_ep_set_rrdy(&(dev->ep[UDC_EP0OUT_IDX]));
2650
2651 /* enable device interrupts */
2652 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2653 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2654 UDC_DEVINT_SI | UDC_DEVINT_SC);
2655 }
2656
2657 /**
2658 * pch_udc_svc_intf_interrupt() - This function handles a set interface
2659 * interrupt
2660 * @dev: Reference to driver structure
2661 */
pch_udc_svc_intf_interrupt(struct pch_udc_dev * dev)2662 static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
2663 {
2664 u32 reg, dev_stat = 0;
2665 int i;
2666
2667 dev_stat = pch_udc_read_device_status(dev);
2668 dev->cfg_data.cur_intf = (dev_stat & UDC_DEVSTS_INTF_MASK) >>
2669 UDC_DEVSTS_INTF_SHIFT;
2670 dev->cfg_data.cur_alt = (dev_stat & UDC_DEVSTS_ALT_MASK) >>
2671 UDC_DEVSTS_ALT_SHIFT;
2672 dev->set_cfg_not_acked = 1;
2673 /* Construct the usb request for gadget driver and inform it */
2674 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2675 dev->setup_data.bRequest = USB_REQ_SET_INTERFACE;
2676 dev->setup_data.bRequestType = USB_RECIP_INTERFACE;
2677 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_alt);
2678 dev->setup_data.wIndex = cpu_to_le16(dev->cfg_data.cur_intf);
2679 /* programm the Endpoint Cfg registers */
2680 /* Only one end point cfg register */
2681 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2682 reg = (reg & ~UDC_CSR_NE_INTF_MASK) |
2683 (dev->cfg_data.cur_intf << UDC_CSR_NE_INTF_SHIFT);
2684 reg = (reg & ~UDC_CSR_NE_ALT_MASK) |
2685 (dev->cfg_data.cur_alt << UDC_CSR_NE_ALT_SHIFT);
2686 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2687 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2688 /* clear stall bits */
2689 pch_udc_ep_clear_stall(&(dev->ep[i]));
2690 dev->ep[i].halted = 0;
2691 }
2692 dev->stall = 0;
2693 pch_udc_gadget_setup(dev);
2694 }
2695
2696 /**
2697 * pch_udc_svc_cfg_interrupt() - This function handles a set configuration
2698 * interrupt
2699 * @dev: Reference to driver structure
2700 */
pch_udc_svc_cfg_interrupt(struct pch_udc_dev * dev)2701 static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
2702 {
2703 int i;
2704 u32 reg, dev_stat = 0;
2705
2706 dev_stat = pch_udc_read_device_status(dev);
2707 dev->set_cfg_not_acked = 1;
2708 dev->cfg_data.cur_cfg = (dev_stat & UDC_DEVSTS_CFG_MASK) >>
2709 UDC_DEVSTS_CFG_SHIFT;
2710 /* make usb request for gadget driver */
2711 memset(&dev->setup_data, 0 , sizeof dev->setup_data);
2712 dev->setup_data.bRequest = USB_REQ_SET_CONFIGURATION;
2713 dev->setup_data.wValue = cpu_to_le16(dev->cfg_data.cur_cfg);
2714 /* program the NE registers */
2715 /* Only one end point cfg register */
2716 reg = pch_udc_read_csr(dev, UDC_EP0OUT_IDX);
2717 reg = (reg & ~UDC_CSR_NE_CFG_MASK) |
2718 (dev->cfg_data.cur_cfg << UDC_CSR_NE_CFG_SHIFT);
2719 pch_udc_write_csr(dev, reg, UDC_EP0OUT_IDX);
2720 for (i = 0; i < PCH_UDC_USED_EP_NUM * 2; i++) {
2721 /* clear stall bits */
2722 pch_udc_ep_clear_stall(&(dev->ep[i]));
2723 dev->ep[i].halted = 0;
2724 }
2725 dev->stall = 0;
2726
2727 /* call gadget zero with setup data received */
2728 pch_udc_gadget_setup(dev);
2729 }
2730
2731 /**
2732 * pch_udc_dev_isr() - This function services device interrupts
2733 * by invoking appropriate routines.
2734 * @dev: Reference to the device structure
2735 * @dev_intr: The Device interrupt status.
2736 */
pch_udc_dev_isr(struct pch_udc_dev * dev,u32 dev_intr)2737 static void pch_udc_dev_isr(struct pch_udc_dev *dev, u32 dev_intr)
2738 {
2739 int vbus;
2740
2741 /* USB Reset Interrupt */
2742 if (dev_intr & UDC_DEVINT_UR) {
2743 pch_udc_svc_ur_interrupt(dev);
2744 dev_dbg(&dev->pdev->dev, "USB_RESET\n");
2745 }
2746 /* Enumeration Done Interrupt */
2747 if (dev_intr & UDC_DEVINT_ENUM) {
2748 pch_udc_svc_enum_interrupt(dev);
2749 dev_dbg(&dev->pdev->dev, "USB_ENUM\n");
2750 }
2751 /* Set Interface Interrupt */
2752 if (dev_intr & UDC_DEVINT_SI)
2753 pch_udc_svc_intf_interrupt(dev);
2754 /* Set Config Interrupt */
2755 if (dev_intr & UDC_DEVINT_SC)
2756 pch_udc_svc_cfg_interrupt(dev);
2757 /* USB Suspend interrupt */
2758 if (dev_intr & UDC_DEVINT_US) {
2759 if (dev->driver
2760 && dev->driver->suspend) {
2761 spin_unlock(&dev->lock);
2762 dev->driver->suspend(&dev->gadget);
2763 spin_lock(&dev->lock);
2764 }
2765
2766 vbus = pch_vbus_gpio_get_value(dev);
2767 if ((dev->vbus_session == 0)
2768 && (vbus != 1)) {
2769 if (dev->driver && dev->driver->disconnect) {
2770 spin_unlock(&dev->lock);
2771 dev->driver->disconnect(&dev->gadget);
2772 spin_lock(&dev->lock);
2773 }
2774 pch_udc_reconnect(dev);
2775 } else if ((dev->vbus_session == 0)
2776 && (vbus == 1)
2777 && !dev->vbus_gpio.intr)
2778 schedule_work(&dev->vbus_gpio.irq_work_fall);
2779
2780 dev_dbg(&dev->pdev->dev, "USB_SUSPEND\n");
2781 }
2782 /* Clear the SOF interrupt, if enabled */
2783 if (dev_intr & UDC_DEVINT_SOF)
2784 dev_dbg(&dev->pdev->dev, "SOF\n");
2785 /* ES interrupt, IDLE > 3ms on the USB */
2786 if (dev_intr & UDC_DEVINT_ES)
2787 dev_dbg(&dev->pdev->dev, "ES\n");
2788 /* RWKP interrupt */
2789 if (dev_intr & UDC_DEVINT_RWKP)
2790 dev_dbg(&dev->pdev->dev, "RWKP\n");
2791 }
2792
2793 /**
2794 * pch_udc_isr() - This function handles interrupts from the PCH USB Device
2795 * @irq: Interrupt request number
2796 * @pdev: Reference to the device structure
2797 */
pch_udc_isr(int irq,void * pdev)2798 static irqreturn_t pch_udc_isr(int irq, void *pdev)
2799 {
2800 struct pch_udc_dev *dev = (struct pch_udc_dev *) pdev;
2801 u32 dev_intr, ep_intr;
2802 int i;
2803
2804 dev_intr = pch_udc_read_device_interrupts(dev);
2805 ep_intr = pch_udc_read_ep_interrupts(dev);
2806
2807 /* For a hot plug, this find that the controller is hung up. */
2808 if (dev_intr == ep_intr)
2809 if (dev_intr == pch_udc_readl(dev, UDC_DEVCFG_ADDR)) {
2810 dev_dbg(&dev->pdev->dev, "UDC: Hung up\n");
2811 /* The controller is reset */
2812 pch_udc_writel(dev, UDC_SRST, UDC_SRST_ADDR);
2813 return IRQ_HANDLED;
2814 }
2815 if (dev_intr)
2816 /* Clear device interrupts */
2817 pch_udc_write_device_interrupts(dev, dev_intr);
2818 if (ep_intr)
2819 /* Clear ep interrupts */
2820 pch_udc_write_ep_interrupts(dev, ep_intr);
2821 if (!dev_intr && !ep_intr)
2822 return IRQ_NONE;
2823 spin_lock(&dev->lock);
2824 if (dev_intr)
2825 pch_udc_dev_isr(dev, dev_intr);
2826 if (ep_intr) {
2827 pch_udc_read_all_epstatus(dev, ep_intr);
2828 /* Process Control In interrupts, if present */
2829 if (ep_intr & UDC_EPINT_IN_EP0) {
2830 pch_udc_svc_control_in(dev);
2831 pch_udc_postsvc_epinters(dev, 0);
2832 }
2833 /* Process Control Out interrupts, if present */
2834 if (ep_intr & UDC_EPINT_OUT_EP0)
2835 pch_udc_svc_control_out(dev);
2836 /* Process data in end point interrupts */
2837 for (i = 1; i < PCH_UDC_USED_EP_NUM; i++) {
2838 if (ep_intr & (1 << i)) {
2839 pch_udc_svc_data_in(dev, i);
2840 pch_udc_postsvc_epinters(dev, i);
2841 }
2842 }
2843 /* Process data out end point interrupts */
2844 for (i = UDC_EPINT_OUT_SHIFT + 1; i < (UDC_EPINT_OUT_SHIFT +
2845 PCH_UDC_USED_EP_NUM); i++)
2846 if (ep_intr & (1 << i))
2847 pch_udc_svc_data_out(dev, i -
2848 UDC_EPINT_OUT_SHIFT);
2849 }
2850 spin_unlock(&dev->lock);
2851 return IRQ_HANDLED;
2852 }
2853
2854 /**
2855 * pch_udc_setup_ep0() - This function enables control endpoint for traffic
2856 * @dev: Reference to the device structure
2857 */
pch_udc_setup_ep0(struct pch_udc_dev * dev)2858 static void pch_udc_setup_ep0(struct pch_udc_dev *dev)
2859 {
2860 /* enable ep0 interrupts */
2861 pch_udc_enable_ep_interrupts(dev, UDC_EPINT_IN_EP0 |
2862 UDC_EPINT_OUT_EP0);
2863 /* enable device interrupts */
2864 pch_udc_enable_interrupts(dev, UDC_DEVINT_UR | UDC_DEVINT_US |
2865 UDC_DEVINT_ES | UDC_DEVINT_ENUM |
2866 UDC_DEVINT_SI | UDC_DEVINT_SC);
2867 }
2868
2869 /**
2870 * pch_udc_pcd_reinit() - This API initializes the endpoint structures
2871 * @dev: Reference to the driver structure
2872 */
pch_udc_pcd_reinit(struct pch_udc_dev * dev)2873 static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
2874 {
2875 const char *const ep_string[] = {
2876 ep0_string, "ep0out", "ep1in", "ep1out", "ep2in", "ep2out",
2877 "ep3in", "ep3out", "ep4in", "ep4out", "ep5in", "ep5out",
2878 "ep6in", "ep6out", "ep7in", "ep7out", "ep8in", "ep8out",
2879 "ep9in", "ep9out", "ep10in", "ep10out", "ep11in", "ep11out",
2880 "ep12in", "ep12out", "ep13in", "ep13out", "ep14in", "ep14out",
2881 "ep15in", "ep15out",
2882 };
2883 int i;
2884
2885 dev->gadget.speed = USB_SPEED_UNKNOWN;
2886 INIT_LIST_HEAD(&dev->gadget.ep_list);
2887
2888 /* Initialize the endpoints structures */
2889 memset(dev->ep, 0, sizeof dev->ep);
2890 for (i = 0; i < PCH_UDC_EP_NUM; i++) {
2891 struct pch_udc_ep *ep = &dev->ep[i];
2892 ep->dev = dev;
2893 ep->halted = 1;
2894 ep->num = i / 2;
2895 ep->in = ~i & 1;
2896 ep->ep.name = ep_string[i];
2897 ep->ep.ops = &pch_udc_ep_ops;
2898 if (ep->in) {
2899 ep->offset_addr = ep->num * UDC_EP_REG_SHIFT;
2900 ep->ep.caps.dir_in = true;
2901 } else {
2902 ep->offset_addr = (UDC_EPINT_OUT_SHIFT + ep->num) *
2903 UDC_EP_REG_SHIFT;
2904 ep->ep.caps.dir_out = true;
2905 }
2906 if (i == UDC_EP0IN_IDX || i == UDC_EP0OUT_IDX) {
2907 ep->ep.caps.type_control = true;
2908 } else {
2909 ep->ep.caps.type_iso = true;
2910 ep->ep.caps.type_bulk = true;
2911 ep->ep.caps.type_int = true;
2912 }
2913 /* need to set ep->ep.maxpacket and set Default Configuration?*/
2914 usb_ep_set_maxpacket_limit(&ep->ep, UDC_BULK_MAX_PKT_SIZE);
2915 list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
2916 INIT_LIST_HEAD(&ep->queue);
2917 }
2918 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IDX].ep, UDC_EP0IN_MAX_PKT_SIZE);
2919 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IDX].ep, UDC_EP0OUT_MAX_PKT_SIZE);
2920
2921 /* remove ep0 in and out from the list. They have own pointer */
2922 list_del_init(&dev->ep[UDC_EP0IN_IDX].ep.ep_list);
2923 list_del_init(&dev->ep[UDC_EP0OUT_IDX].ep.ep_list);
2924
2925 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
2926 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2927 }
2928
2929 /**
2930 * pch_udc_pcd_init() - This API initializes the driver structure
2931 * @dev: Reference to the driver structure
2932 *
2933 * Return codes:
2934 * 0: Success
2935 * -%ERRNO: All kind of errors when retrieving VBUS GPIO
2936 */
pch_udc_pcd_init(struct pch_udc_dev * dev)2937 static int pch_udc_pcd_init(struct pch_udc_dev *dev)
2938 {
2939 int ret;
2940
2941 pch_udc_init(dev);
2942 pch_udc_pcd_reinit(dev);
2943
2944 ret = pch_vbus_gpio_init(dev);
2945 if (ret)
2946 pch_udc_exit(dev);
2947 return ret;
2948 }
2949
2950 /**
2951 * init_dma_pools() - create dma pools during initialization
2952 * @dev: reference to struct pci_dev
2953 */
init_dma_pools(struct pch_udc_dev * dev)2954 static int init_dma_pools(struct pch_udc_dev *dev)
2955 {
2956 struct pch_udc_stp_dma_desc *td_stp;
2957 struct pch_udc_data_dma_desc *td_data;
2958 void *ep0out_buf;
2959
2960 /* DMA setup */
2961 dev->data_requests = dma_pool_create("data_requests", &dev->pdev->dev,
2962 sizeof(struct pch_udc_data_dma_desc), 0, 0);
2963 if (!dev->data_requests) {
2964 dev_err(&dev->pdev->dev, "%s: can't get request data pool\n",
2965 __func__);
2966 return -ENOMEM;
2967 }
2968
2969 /* dma desc for setup data */
2970 dev->stp_requests = dma_pool_create("setup requests", &dev->pdev->dev,
2971 sizeof(struct pch_udc_stp_dma_desc), 0, 0);
2972 if (!dev->stp_requests) {
2973 dev_err(&dev->pdev->dev, "%s: can't get setup request pool\n",
2974 __func__);
2975 return -ENOMEM;
2976 }
2977 /* setup */
2978 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
2979 &dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
2980 if (!td_stp) {
2981 dev_err(&dev->pdev->dev,
2982 "%s: can't allocate setup dma descriptor\n", __func__);
2983 return -ENOMEM;
2984 }
2985 dev->ep[UDC_EP0OUT_IDX].td_stp = td_stp;
2986
2987 /* data: 0 packets !? */
2988 td_data = dma_pool_alloc(dev->data_requests, GFP_KERNEL,
2989 &dev->ep[UDC_EP0OUT_IDX].td_data_phys);
2990 if (!td_data) {
2991 dev_err(&dev->pdev->dev,
2992 "%s: can't allocate data dma descriptor\n", __func__);
2993 return -ENOMEM;
2994 }
2995 dev->ep[UDC_EP0OUT_IDX].td_data = td_data;
2996 dev->ep[UDC_EP0IN_IDX].td_stp = NULL;
2997 dev->ep[UDC_EP0IN_IDX].td_stp_phys = 0;
2998 dev->ep[UDC_EP0IN_IDX].td_data = NULL;
2999 dev->ep[UDC_EP0IN_IDX].td_data_phys = 0;
3000
3001 ep0out_buf = devm_kzalloc(&dev->pdev->dev, UDC_EP0OUT_BUFF_SIZE * 4,
3002 GFP_KERNEL);
3003 if (!ep0out_buf)
3004 return -ENOMEM;
3005 dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
3006 UDC_EP0OUT_BUFF_SIZE * 4,
3007 DMA_FROM_DEVICE);
3008 return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
3009 }
3010
pch_udc_start(struct usb_gadget * g,struct usb_gadget_driver * driver)3011 static int pch_udc_start(struct usb_gadget *g,
3012 struct usb_gadget_driver *driver)
3013 {
3014 struct pch_udc_dev *dev = to_pch_udc(g);
3015
3016 driver->driver.bus = NULL;
3017 dev->driver = driver;
3018
3019 /* get ready for ep0 traffic */
3020 pch_udc_setup_ep0(dev);
3021
3022 /* clear SD */
3023 if ((pch_vbus_gpio_get_value(dev) != 0) || !dev->vbus_gpio.intr)
3024 pch_udc_clear_disconnect(dev);
3025
3026 dev->connected = 1;
3027 return 0;
3028 }
3029
pch_udc_stop(struct usb_gadget * g)3030 static int pch_udc_stop(struct usb_gadget *g)
3031 {
3032 struct pch_udc_dev *dev = to_pch_udc(g);
3033
3034 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3035
3036 /* Assures that there are no pending requests with this driver */
3037 dev->driver = NULL;
3038 dev->connected = 0;
3039
3040 /* set SD */
3041 pch_udc_set_disconnect(dev);
3042
3043 return 0;
3044 }
3045
pch_udc_shutdown(struct pci_dev * pdev)3046 static void pch_udc_shutdown(struct pci_dev *pdev)
3047 {
3048 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3049
3050 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3051 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3052
3053 /* disable the pullup so the host will think we're gone */
3054 pch_udc_set_disconnect(dev);
3055 }
3056
pch_udc_remove(struct pci_dev * pdev)3057 static void pch_udc_remove(struct pci_dev *pdev)
3058 {
3059 struct pch_udc_dev *dev = pci_get_drvdata(pdev);
3060
3061 usb_del_gadget_udc(&dev->gadget);
3062
3063 /* gadget driver must not be registered */
3064 if (dev->driver)
3065 dev_err(&pdev->dev,
3066 "%s: gadget driver still bound!!!\n", __func__);
3067 /* dma pool cleanup */
3068 dma_pool_destroy(dev->data_requests);
3069
3070 if (dev->stp_requests) {
3071 /* cleanup DMA desc's for ep0in */
3072 if (dev->ep[UDC_EP0OUT_IDX].td_stp) {
3073 dma_pool_free(dev->stp_requests,
3074 dev->ep[UDC_EP0OUT_IDX].td_stp,
3075 dev->ep[UDC_EP0OUT_IDX].td_stp_phys);
3076 }
3077 if (dev->ep[UDC_EP0OUT_IDX].td_data) {
3078 dma_pool_free(dev->stp_requests,
3079 dev->ep[UDC_EP0OUT_IDX].td_data,
3080 dev->ep[UDC_EP0OUT_IDX].td_data_phys);
3081 }
3082 dma_pool_destroy(dev->stp_requests);
3083 }
3084
3085 if (dev->dma_addr)
3086 dma_unmap_single(&dev->pdev->dev, dev->dma_addr,
3087 UDC_EP0OUT_BUFF_SIZE * 4, DMA_FROM_DEVICE);
3088
3089 pch_vbus_gpio_free(dev);
3090
3091 pch_udc_exit(dev);
3092 }
3093
3094 #ifdef CONFIG_PM_SLEEP
pch_udc_suspend(struct device * d)3095 static int pch_udc_suspend(struct device *d)
3096 {
3097 struct pch_udc_dev *dev = dev_get_drvdata(d);
3098
3099 pch_udc_disable_interrupts(dev, UDC_DEVINT_MSK);
3100 pch_udc_disable_ep_interrupts(dev, UDC_EPINT_MSK_DISABLE_ALL);
3101
3102 return 0;
3103 }
3104
pch_udc_resume(struct device * d)3105 static int pch_udc_resume(struct device *d)
3106 {
3107 return 0;
3108 }
3109
3110 static SIMPLE_DEV_PM_OPS(pch_udc_pm, pch_udc_suspend, pch_udc_resume);
3111 #define PCH_UDC_PM_OPS (&pch_udc_pm)
3112 #else
3113 #define PCH_UDC_PM_OPS NULL
3114 #endif /* CONFIG_PM_SLEEP */
3115
pch_udc_probe(struct pci_dev * pdev,const struct pci_device_id * id)3116 static int pch_udc_probe(struct pci_dev *pdev,
3117 const struct pci_device_id *id)
3118 {
3119 int bar;
3120 int retval;
3121 struct pch_udc_dev *dev;
3122
3123 /* init */
3124 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
3125 if (!dev)
3126 return -ENOMEM;
3127
3128 /* pci setup */
3129 retval = pcim_enable_device(pdev);
3130 if (retval)
3131 return retval;
3132
3133 dev->pdev = pdev;
3134 pci_set_drvdata(pdev, dev);
3135
3136 /* Determine BAR based on PCI ID */
3137 if (id->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC)
3138 bar = PCH_UDC_PCI_BAR_QUARK_X1000;
3139 else
3140 bar = PCH_UDC_PCI_BAR;
3141
3142 /* PCI resource allocation */
3143 retval = pcim_iomap_regions(pdev, 1 << bar, pci_name(pdev));
3144 if (retval)
3145 return retval;
3146
3147 dev->base_addr = pcim_iomap_table(pdev)[bar];
3148
3149 /* initialize the hardware */
3150 retval = pch_udc_pcd_init(dev);
3151 if (retval)
3152 return retval;
3153
3154 pci_enable_msi(pdev);
3155
3156 retval = devm_request_irq(&pdev->dev, pdev->irq, pch_udc_isr,
3157 IRQF_SHARED, KBUILD_MODNAME, dev);
3158 if (retval) {
3159 dev_err(&pdev->dev, "%s: request_irq(%d) fail\n", __func__,
3160 pdev->irq);
3161 goto finished;
3162 }
3163
3164 pci_set_master(pdev);
3165 pci_try_set_mwi(pdev);
3166
3167 /* device struct setup */
3168 spin_lock_init(&dev->lock);
3169 dev->gadget.ops = &pch_udc_ops;
3170
3171 retval = init_dma_pools(dev);
3172 if (retval)
3173 goto finished;
3174
3175 dev->gadget.name = KBUILD_MODNAME;
3176 dev->gadget.max_speed = USB_SPEED_HIGH;
3177
3178 /* Put the device in disconnected state till a driver is bound */
3179 pch_udc_set_disconnect(dev);
3180 retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
3181 if (retval)
3182 goto finished;
3183 return 0;
3184
3185 finished:
3186 pch_udc_remove(pdev);
3187 return retval;
3188 }
3189
3190 static const struct pci_device_id pch_udc_pcidev_id[] = {
3191 {
3192 PCI_DEVICE(PCI_VENDOR_ID_INTEL,
3193 PCI_DEVICE_ID_INTEL_QUARK_X1000_UDC),
3194 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3195 .class_mask = 0xffffffff,
3196 },
3197 {
3198 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EG20T_UDC),
3199 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3200 .class_mask = 0xffffffff,
3201 },
3202 {
3203 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7213_IOH_UDC),
3204 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3205 .class_mask = 0xffffffff,
3206 },
3207 {
3208 PCI_DEVICE(PCI_VENDOR_ID_ROHM, PCI_DEVICE_ID_ML7831_IOH_UDC),
3209 .class = PCI_CLASS_SERIAL_USB_DEVICE,
3210 .class_mask = 0xffffffff,
3211 },
3212 { 0 },
3213 };
3214
3215 MODULE_DEVICE_TABLE(pci, pch_udc_pcidev_id);
3216
3217 static struct pci_driver pch_udc_driver = {
3218 .name = KBUILD_MODNAME,
3219 .id_table = pch_udc_pcidev_id,
3220 .probe = pch_udc_probe,
3221 .remove = pch_udc_remove,
3222 .shutdown = pch_udc_shutdown,
3223 .driver = {
3224 .pm = PCH_UDC_PM_OPS,
3225 },
3226 };
3227
3228 module_pci_driver(pch_udc_driver);
3229
3230 MODULE_DESCRIPTION("Intel EG20T USB Device Controller");
3231 MODULE_AUTHOR("LAPIS Semiconductor, <tomoya-linux@dsn.lapis-semi.com>");
3232 MODULE_LICENSE("GPL");
3233