1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * MUSB OTG driver defines
4 *
5 * Copyright 2005 Mentor Graphics Corporation
6 * Copyright (C) 2005-2006 by Texas Instruments
7 * Copyright (C) 2006-2007 Nokia Corporation
8 */
9
10 #ifndef __MUSB_CORE_H__
11 #define __MUSB_CORE_H__
12
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/timer.h>
18 #include <linux/device.h>
19 #include <linux/usb/ch9.h>
20 #include <linux/usb/gadget.h>
21 #include <linux/usb.h>
22 #include <linux/usb/otg.h>
23 #include <linux/usb/musb.h>
24 #include <linux/phy/phy.h>
25 #include <linux/workqueue.h>
26
27 struct musb;
28 struct musb_hw_ep;
29 struct musb_ep;
30 struct musb_qh;
31
32 /* Helper defines for struct musb->hwvers */
33 #define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
34 #define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
35 #define MUSB_HWVERS_RC 0x8000
36 #define MUSB_HWVERS_1300 0x52C
37 #define MUSB_HWVERS_1400 0x590
38 #define MUSB_HWVERS_1800 0x720
39 #define MUSB_HWVERS_1900 0x784
40 #define MUSB_HWVERS_2000 0x800
41
42 #include "musb_debug.h"
43 #include "musb_dma.h"
44
45 #include "musb_io.h"
46
47 #include "musb_gadget.h"
48 #include <linux/usb/hcd.h>
49 #include "musb_host.h"
50
51 /* NOTE: otg and peripheral-only state machines start at B_IDLE.
52 * OTG or host-only go to A_IDLE when ID is sensed.
53 */
54 #define is_peripheral_active(m) (!(m)->is_host)
55 #define is_host_active(m) ((m)->is_host)
56
57 /****************************** CONSTANTS ********************************/
58
59 #ifndef MUSB_C_NUM_EPS
60 #define MUSB_C_NUM_EPS ((u8)16)
61 #endif
62
63 #ifndef MUSB_MAX_END0_PACKET
64 #define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
65 #endif
66
67 /* host side ep0 states */
68 enum musb_h_ep0_state {
69 MUSB_EP0_IDLE,
70 MUSB_EP0_START, /* expect ack of setup */
71 MUSB_EP0_IN, /* expect IN DATA */
72 MUSB_EP0_OUT, /* expect ack of OUT DATA */
73 MUSB_EP0_STATUS, /* expect ack of STATUS */
74 } __attribute__ ((packed));
75
76 /* peripheral side ep0 states */
77 enum musb_g_ep0_state {
78 MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
79 MUSB_EP0_STAGE_SETUP, /* received SETUP */
80 MUSB_EP0_STAGE_TX, /* IN data */
81 MUSB_EP0_STAGE_RX, /* OUT data */
82 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
83 MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
84 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
85 } __attribute__ ((packed));
86
87 /*
88 * OTG protocol constants. See USB OTG 1.3 spec,
89 * sections 5.5 "Device Timings" and 6.6.5 "Timers".
90 */
91 #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
92 #define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
93 #define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
94 #define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
95
96 /****************************** FUNCTIONS ********************************/
97
98 #define MUSB_HST_MODE(_musb)\
99 { (_musb)->is_host = true; }
100 #define MUSB_DEV_MODE(_musb) \
101 { (_musb)->is_host = false; }
102
103 #define test_devctl_hst_mode(_x) \
104 (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
105
106 #define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
107
108 /******************************** TYPES *************************************/
109
110 struct musb_io;
111
112 /**
113 * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
114 * @quirks: flags for platform specific quirks
115 * @enable: enable device
116 * @disable: disable device
117 * @ep_offset: returns the end point offset
118 * @ep_select: selects the specified end point
119 * @fifo_mode: sets the fifo mode
120 * @fifo_offset: returns the fifo offset
121 * @readb: read 8 bits
122 * @writeb: write 8 bits
123 * @clearb: could be clear-on-readb or W1C
124 * @readw: read 16 bits
125 * @writew: write 16 bits
126 * @clearw: could be clear-on-readw or W1C
127 * @read_fifo: reads the fifo
128 * @write_fifo: writes to fifo
129 * @get_toggle: platform specific get toggle function
130 * @set_toggle: platform specific set toggle function
131 * @dma_init: platform specific dma init function
132 * @dma_exit: platform specific dma exit function
133 * @init: turns on clocks, sets up platform-specific registers, etc
134 * @exit: undoes @init
135 * @set_mode: forcefully changes operating mode
136 * @try_idle: tries to idle the IP
137 * @recover: platform-specific babble recovery
138 * @vbus_status: returns vbus status if possible
139 * @set_vbus: forces vbus status
140 * @pre_root_reset_end: called before the root usb port reset flag gets cleared
141 * @post_root_reset_end: called after the root usb port reset flag gets cleared
142 * @phy_callback: optional callback function for the phy to call
143 */
144 struct musb_platform_ops {
145
146 #define MUSB_G_NO_SKB_RESERVE BIT(9)
147 #define MUSB_DA8XX BIT(8)
148 #define MUSB_PRESERVE_SESSION BIT(7)
149 #define MUSB_DMA_UX500 BIT(6)
150 #define MUSB_DMA_CPPI41 BIT(5)
151 #define MUSB_DMA_CPPI BIT(4)
152 #define MUSB_DMA_TUSB_OMAP BIT(3)
153 #define MUSB_DMA_INVENTRA BIT(2)
154 #define MUSB_IN_TUSB BIT(1)
155 #define MUSB_INDEXED_EP BIT(0)
156 u32 quirks;
157
158 int (*init)(struct musb *musb);
159 int (*exit)(struct musb *musb);
160
161 void (*enable)(struct musb *musb);
162 void (*disable)(struct musb *musb);
163
164 u32 (*ep_offset)(u8 epnum, u16 offset);
165 void (*ep_select)(void __iomem *mbase, u8 epnum);
166 u16 fifo_mode;
167 u32 (*fifo_offset)(u8 epnum);
168 u32 (*busctl_offset)(u8 epnum, u16 offset);
169 u8 (*readb)(void __iomem *addr, u32 offset);
170 void (*writeb)(void __iomem *addr, u32 offset, u8 data);
171 u8 (*clearb)(void __iomem *addr, u32 offset);
172 u16 (*readw)(void __iomem *addr, u32 offset);
173 void (*writew)(void __iomem *addr, u32 offset, u16 data);
174 u16 (*clearw)(void __iomem *addr, u32 offset);
175 void (*read_fifo)(struct musb_hw_ep *hw_ep, u16 len, u8 *buf);
176 void (*write_fifo)(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf);
177 u16 (*get_toggle)(struct musb_qh *qh, int is_out);
178 u16 (*set_toggle)(struct musb_qh *qh, int is_out, struct urb *urb);
179 struct dma_controller *
180 (*dma_init) (struct musb *musb, void __iomem *base);
181 void (*dma_exit)(struct dma_controller *c);
182 int (*set_mode)(struct musb *musb, u8 mode);
183 void (*try_idle)(struct musb *musb, unsigned long timeout);
184 int (*recover)(struct musb *musb);
185
186 int (*vbus_status)(struct musb *musb);
187 void (*set_vbus)(struct musb *musb, int on);
188
189 void (*pre_root_reset_end)(struct musb *musb);
190 void (*post_root_reset_end)(struct musb *musb);
191 int (*phy_callback)(enum musb_vbus_id_status status);
192 void (*clear_ep_rxintr)(struct musb *musb, int epnum);
193 };
194
195 /*
196 * struct musb_hw_ep - endpoint hardware (bidirectional)
197 *
198 * Ordered slightly for better cacheline locality.
199 */
200 struct musb_hw_ep {
201 struct musb *musb;
202 void __iomem *fifo;
203 void __iomem *regs;
204
205 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
206 void __iomem *conf;
207 #endif
208
209 /* index in musb->endpoints[] */
210 u8 epnum;
211
212 /* hardware configuration, possibly dynamic */
213 bool is_shared_fifo;
214 bool tx_double_buffered;
215 bool rx_double_buffered;
216 u16 max_packet_sz_tx;
217 u16 max_packet_sz_rx;
218
219 struct dma_channel *tx_channel;
220 struct dma_channel *rx_channel;
221
222 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
223 /* TUSB has "asynchronous" and "synchronous" dma modes */
224 dma_addr_t fifo_async;
225 dma_addr_t fifo_sync;
226 void __iomem *fifo_sync_va;
227 #endif
228
229 /* currently scheduled peripheral endpoint */
230 struct musb_qh *in_qh;
231 struct musb_qh *out_qh;
232
233 u8 rx_reinit;
234 u8 tx_reinit;
235
236 /* peripheral side */
237 struct musb_ep ep_in; /* TX */
238 struct musb_ep ep_out; /* RX */
239 };
240
next_in_request(struct musb_hw_ep * hw_ep)241 static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
242 {
243 return next_request(&hw_ep->ep_in);
244 }
245
next_out_request(struct musb_hw_ep * hw_ep)246 static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
247 {
248 return next_request(&hw_ep->ep_out);
249 }
250
251 struct musb_csr_regs {
252 /* FIFO registers */
253 u16 txmaxp, txcsr, rxmaxp, rxcsr;
254 u16 rxfifoadd, txfifoadd;
255 u8 txtype, txinterval, rxtype, rxinterval;
256 u8 rxfifosz, txfifosz;
257 u8 txfunaddr, txhubaddr, txhubport;
258 u8 rxfunaddr, rxhubaddr, rxhubport;
259 };
260
261 struct musb_context_registers {
262
263 u8 power;
264 u8 intrusbe;
265 u16 frame;
266 u8 index, testmode;
267
268 u8 devctl, busctl, misc;
269 u32 otg_interfsel;
270
271 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
272 };
273
274 /*
275 * struct musb - Driver instance data.
276 */
277 struct musb {
278 /* device lock */
279 spinlock_t lock;
280 spinlock_t list_lock; /* resume work list lock */
281
282 struct musb_io io;
283 const struct musb_platform_ops *ops;
284 struct musb_context_registers context;
285
286 irqreturn_t (*isr)(int, void *);
287 struct delayed_work irq_work;
288 struct delayed_work deassert_reset_work;
289 struct delayed_work finish_resume_work;
290 struct delayed_work gadget_work;
291 u16 hwvers;
292
293 u16 intrrxe;
294 u16 intrtxe;
295 /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
296 #define MUSB_PORT_STAT_RESUME (1 << 31)
297
298 u32 port1_status;
299
300 unsigned long rh_timer;
301
302 enum musb_h_ep0_state ep0_stage;
303
304 /* bulk traffic normally dedicates endpoint hardware, and each
305 * direction has its own ring of host side endpoints.
306 * we try to progress the transfer at the head of each endpoint's
307 * queue until it completes or NAKs too much; then we try the next
308 * endpoint.
309 */
310 struct musb_hw_ep *bulk_ep;
311
312 struct list_head control; /* of musb_qh */
313 struct list_head in_bulk; /* of musb_qh */
314 struct list_head out_bulk; /* of musb_qh */
315 struct list_head pending_list; /* pending work list */
316
317 struct timer_list otg_timer;
318 struct timer_list dev_timer;
319 struct notifier_block nb;
320
321 struct dma_controller *dma_controller;
322
323 struct device *controller;
324 void __iomem *ctrl_base;
325 void __iomem *mregs;
326
327 #if IS_ENABLED(CONFIG_USB_MUSB_TUSB6010)
328 dma_addr_t async;
329 dma_addr_t sync;
330 void __iomem *sync_va;
331 u8 tusb_revision;
332 #endif
333
334 /* passed down from chip/board specific irq handlers */
335 u8 int_usb;
336 u16 int_rx;
337 u16 int_tx;
338
339 struct usb_phy *xceiv;
340 struct phy *phy;
341
342 int nIrq;
343 unsigned irq_wake:1;
344
345 struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
346 #define control_ep endpoints
347
348 #define VBUSERR_RETRY_COUNT 3
349 u16 vbuserr_retry;
350 u16 epmask;
351 u8 nr_endpoints;
352
353 int (*board_set_power)(int state);
354
355 u8 min_power; /* vbus for periph, in mA/2 */
356
357 enum musb_mode port_mode;
358 bool session;
359 unsigned long quirk_retries;
360 bool is_host;
361
362 int a_wait_bcon; /* VBUS timeout in msecs */
363 unsigned long idle_timeout; /* Next timeout in jiffies */
364
365 unsigned is_initialized:1;
366 unsigned is_runtime_suspended:1;
367
368 /* active means connected and not suspended */
369 unsigned is_active:1;
370
371 unsigned is_multipoint:1;
372
373 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
374 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
375 unsigned dyn_fifo:1; /* dynamic FIFO supported? */
376
377 unsigned bulk_split:1;
378 #define can_bulk_split(musb,type) \
379 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
380
381 unsigned bulk_combine:1;
382 #define can_bulk_combine(musb,type) \
383 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
384
385 /* is_suspended means USB B_PERIPHERAL suspend */
386 unsigned is_suspended:1;
387
388 /* may_wakeup means remote wakeup is enabled */
389 unsigned may_wakeup:1;
390
391 /* is_self_powered is reported in device status and the
392 * config descriptor. is_bus_powered means B_PERIPHERAL
393 * draws some VBUS current; both can be true.
394 */
395 unsigned is_self_powered:1;
396 unsigned is_bus_powered:1;
397
398 unsigned set_address:1;
399 unsigned test_mode:1;
400 unsigned softconnect:1;
401
402 unsigned flush_irq_work:1;
403
404 u8 address;
405 u8 test_mode_nr;
406 u16 ackpend; /* ep0 */
407 enum musb_g_ep0_state ep0_state;
408 struct usb_gadget g; /* the gadget */
409 struct usb_gadget_driver *gadget_driver; /* its driver */
410 struct usb_hcd *hcd; /* the usb hcd */
411
412 const struct musb_hdrc_config *config;
413
414 int xceiv_old_state;
415 #ifdef CONFIG_DEBUG_FS
416 struct dentry *debugfs_root;
417 #endif
418 };
419
420 /* This must be included after struct musb is defined */
421 #include "musb_regs.h"
422
gadget_to_musb(struct usb_gadget * g)423 static inline struct musb *gadget_to_musb(struct usb_gadget *g)
424 {
425 return container_of(g, struct musb, g);
426 }
427
musb_ep_xfertype_string(u8 type)428 static inline char *musb_ep_xfertype_string(u8 type)
429 {
430 char *s;
431
432 switch (type) {
433 case USB_ENDPOINT_XFER_CONTROL:
434 s = "ctrl";
435 break;
436 case USB_ENDPOINT_XFER_ISOC:
437 s = "iso";
438 break;
439 case USB_ENDPOINT_XFER_BULK:
440 s = "bulk";
441 break;
442 case USB_ENDPOINT_XFER_INT:
443 s = "int";
444 break;
445 default:
446 s = "";
447 break;
448 }
449 return s;
450 }
451
musb_read_fifosize(struct musb * musb,struct musb_hw_ep * hw_ep,u8 epnum)452 static inline int musb_read_fifosize(struct musb *musb,
453 struct musb_hw_ep *hw_ep, u8 epnum)
454 {
455 void __iomem *mbase = musb->mregs;
456 u8 reg = 0;
457
458 /* read from core using indexed model */
459 reg = musb_readb(mbase, musb->io.ep_offset(epnum, MUSB_FIFOSIZE));
460 /* 0's returned when no more endpoints */
461 if (!reg)
462 return -ENODEV;
463
464 musb->nr_endpoints++;
465 musb->epmask |= (1 << epnum);
466
467 hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
468
469 /* shared TX/RX FIFO? */
470 if ((reg & 0xf0) == 0xf0) {
471 hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
472 hw_ep->is_shared_fifo = true;
473 return 0;
474 } else {
475 hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
476 hw_ep->is_shared_fifo = false;
477 }
478
479 return 0;
480 }
481
musb_configure_ep0(struct musb * musb)482 static inline void musb_configure_ep0(struct musb *musb)
483 {
484 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
485 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
486 musb->endpoints[0].is_shared_fifo = true;
487 }
488
489 /***************************** Glue it together *****************************/
490
491 extern const char musb_driver_name[];
492
493 extern void musb_stop(struct musb *musb);
494 extern void musb_start(struct musb *musb);
495
496 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
497 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
498
499 extern int musb_set_host(struct musb *musb);
500 extern int musb_set_peripheral(struct musb *musb);
501
502 extern void musb_load_testpacket(struct musb *);
503
504 extern irqreturn_t musb_interrupt(struct musb *);
505
506 extern void musb_hnp_stop(struct musb *musb);
507
508 int musb_queue_resume_work(struct musb *musb,
509 int (*callback)(struct musb *musb, void *data),
510 void *data);
511
musb_platform_set_vbus(struct musb * musb,int is_on)512 static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
513 {
514 if (musb->ops->set_vbus)
515 musb->ops->set_vbus(musb, is_on);
516 }
517
musb_platform_enable(struct musb * musb)518 static inline void musb_platform_enable(struct musb *musb)
519 {
520 if (musb->ops->enable)
521 musb->ops->enable(musb);
522 }
523
musb_platform_disable(struct musb * musb)524 static inline void musb_platform_disable(struct musb *musb)
525 {
526 if (musb->ops->disable)
527 musb->ops->disable(musb);
528 }
529
musb_platform_set_mode(struct musb * musb,u8 mode)530 static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
531 {
532 if (!musb->ops->set_mode)
533 return 0;
534
535 return musb->ops->set_mode(musb, mode);
536 }
537
musb_platform_try_idle(struct musb * musb,unsigned long timeout)538 static inline void musb_platform_try_idle(struct musb *musb,
539 unsigned long timeout)
540 {
541 if (musb->ops->try_idle)
542 musb->ops->try_idle(musb, timeout);
543 }
544
musb_platform_recover(struct musb * musb)545 static inline int musb_platform_recover(struct musb *musb)
546 {
547 if (!musb->ops->recover)
548 return 0;
549
550 return musb->ops->recover(musb);
551 }
552
musb_platform_get_vbus_status(struct musb * musb)553 static inline int musb_platform_get_vbus_status(struct musb *musb)
554 {
555 if (!musb->ops->vbus_status)
556 return -EINVAL;
557
558 return musb->ops->vbus_status(musb);
559 }
560
musb_platform_init(struct musb * musb)561 static inline int musb_platform_init(struct musb *musb)
562 {
563 if (!musb->ops->init)
564 return -EINVAL;
565
566 return musb->ops->init(musb);
567 }
568
musb_platform_exit(struct musb * musb)569 static inline int musb_platform_exit(struct musb *musb)
570 {
571 if (!musb->ops->exit)
572 return -EINVAL;
573
574 return musb->ops->exit(musb);
575 }
576
musb_platform_pre_root_reset_end(struct musb * musb)577 static inline void musb_platform_pre_root_reset_end(struct musb *musb)
578 {
579 if (musb->ops->pre_root_reset_end)
580 musb->ops->pre_root_reset_end(musb);
581 }
582
musb_platform_post_root_reset_end(struct musb * musb)583 static inline void musb_platform_post_root_reset_end(struct musb *musb)
584 {
585 if (musb->ops->post_root_reset_end)
586 musb->ops->post_root_reset_end(musb);
587 }
588
musb_platform_clear_ep_rxintr(struct musb * musb,int epnum)589 static inline void musb_platform_clear_ep_rxintr(struct musb *musb, int epnum)
590 {
591 if (musb->ops->clear_ep_rxintr)
592 musb->ops->clear_ep_rxintr(musb, epnum);
593 }
594
595 /*
596 * gets the "dr_mode" property from DT and converts it into musb_mode
597 * if the property is not found or not recognized returns MUSB_OTG
598 */
599 extern enum musb_mode musb_get_mode(struct device *dev);
600
601 #endif /* __MUSB_CORE_H__ */
602