1 /**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/interrupt.h>
26 #include <linux/io.h>
27 #include <linux/list.h>
28 #include <linux/dma-mapping.h>
29
30 #include <linux/usb/ch9.h>
31 #include <linux/usb/gadget.h>
32
33 #include "debug.h"
34 #include "core.h"
35 #include "gadget.h"
36 #include "io.h"
37
38 /**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
dwc3_gadget_set_test_mode(struct dwc3 * dwc,int mode)47 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48 {
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69 }
70
71 /**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
dwc3_gadget_get_link_state(struct dwc3 * dwc)78 int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79 {
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85 }
86
87 /**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
93 * return 0 on success or -ETIMEDOUT.
94 */
dwc3_gadget_set_link_state(struct dwc3 * dwc,enum dwc3_link_state state)95 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96 {
97 int retries = 10000;
98 u32 reg;
99
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
131 /* wait for a change in DSTS */
132 retries = 10000;
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
139 udelay(5);
140 }
141
142 dwc3_trace(trace_dwc3_gadget,
143 "link state change request timed out");
144
145 return -ETIMEDOUT;
146 }
147
148 /**
149 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
150 * @dwc: pointer to our context structure
151 *
152 * This function will a best effort FIFO allocation in order
153 * to improve FIFO usage and throughput, while still allowing
154 * us to enable as many endpoints as possible.
155 *
156 * Keep in mind that this operation will be highly dependent
157 * on the configured size for RAM1 - which contains TxFifo -,
158 * the amount of endpoints enabled on coreConsultant tool, and
159 * the width of the Master Bus.
160 *
161 * In the ideal world, we would always be able to satisfy the
162 * following equation:
163 *
164 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
165 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
166 *
167 * Unfortunately, due to many variables that's not always the case.
168 */
dwc3_gadget_resize_tx_fifos(struct dwc3 * dwc)169 int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
170 {
171 int last_fifo_depth = 0;
172 int ram1_depth;
173 int fifo_size;
174 int mdwidth;
175 int num;
176
177 if (!dwc->needs_fifo_resize)
178 return 0;
179
180 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
181 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
182
183 /* MDWIDTH is represented in bits, we need it in bytes */
184 mdwidth >>= 3;
185
186 /*
187 * FIXME For now we will only allocate 1 wMaxPacketSize space
188 * for each enabled endpoint, later patches will come to
189 * improve this algorithm so that we better use the internal
190 * FIFO space
191 */
192 for (num = 0; num < dwc->num_in_eps; num++) {
193 /* bit0 indicates direction; 1 means IN ep */
194 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
195 int mult = 1;
196 int tmp;
197
198 if (!(dep->flags & DWC3_EP_ENABLED))
199 continue;
200
201 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
202 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
203 mult = 3;
204
205 /*
206 * REVISIT: the following assumes we will always have enough
207 * space available on the FIFO RAM for all possible use cases.
208 * Make sure that's true somehow and change FIFO allocation
209 * accordingly.
210 *
211 * If we have Bulk or Isochronous endpoints, we want
212 * them to be able to be very, very fast. So we're giving
213 * those endpoints a fifo_size which is enough for 3 full
214 * packets
215 */
216 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
217 tmp += mdwidth;
218
219 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
220
221 fifo_size |= (last_fifo_depth << 16);
222
223 dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
224 dep->name, last_fifo_depth, fifo_size & 0xffff);
225
226 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
227
228 last_fifo_depth += (fifo_size & 0xffff);
229 }
230
231 return 0;
232 }
233
dwc3_gadget_giveback(struct dwc3_ep * dep,struct dwc3_request * req,int status)234 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
235 int status)
236 {
237 struct dwc3 *dwc = dep->dwc;
238 unsigned int unmap_after_complete = false;
239 int i;
240
241 if (req->queued) {
242 i = 0;
243 do {
244 dep->busy_slot++;
245 /*
246 * Skip LINK TRB. We can't use req->trb and check for
247 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
248 * just completed (not the LINK TRB).
249 */
250 if (((dep->busy_slot & DWC3_TRB_MASK) ==
251 DWC3_TRB_NUM- 1) &&
252 usb_endpoint_xfer_isoc(dep->endpoint.desc))
253 dep->busy_slot++;
254 } while(++i < req->request.num_mapped_sgs);
255 req->queued = false;
256 }
257 list_del(&req->list);
258 req->trb = NULL;
259
260 if (req->request.status == -EINPROGRESS)
261 req->request.status = status;
262
263 /*
264 * NOTICE we don't want to unmap before calling ->complete() if we're
265 * dealing with a bounced ep0 request. If we unmap it here, we would end
266 * up overwritting the contents of req->buf and this could confuse the
267 * gadget driver.
268 */
269 if (dwc->ep0_bounced && dep->number <= 1) {
270 dwc->ep0_bounced = false;
271 unmap_after_complete = true;
272 } else {
273 usb_gadget_unmap_request(&dwc->gadget,
274 &req->request, req->direction);
275 }
276
277 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
278 req, dep->name, req->request.actual,
279 req->request.length, status);
280 trace_dwc3_gadget_giveback(req);
281
282 spin_unlock(&dwc->lock);
283 usb_gadget_giveback_request(&dep->endpoint, &req->request);
284 spin_lock(&dwc->lock);
285
286 if (unmap_after_complete)
287 usb_gadget_unmap_request(&dwc->gadget,
288 &req->request, req->direction);
289 }
290
dwc3_send_gadget_generic_command(struct dwc3 * dwc,unsigned cmd,u32 param)291 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
292 {
293 u32 timeout = 500;
294 u32 reg;
295
296 trace_dwc3_gadget_generic_cmd(cmd, param);
297
298 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
299 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
300
301 do {
302 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
303 if (!(reg & DWC3_DGCMD_CMDACT)) {
304 dwc3_trace(trace_dwc3_gadget,
305 "Command Complete --> %d",
306 DWC3_DGCMD_STATUS(reg));
307 if (DWC3_DGCMD_STATUS(reg))
308 return -EINVAL;
309 return 0;
310 }
311
312 /*
313 * We can't sleep here, because it's also called from
314 * interrupt context.
315 */
316 timeout--;
317 if (!timeout) {
318 dwc3_trace(trace_dwc3_gadget,
319 "Command Timed Out");
320 return -ETIMEDOUT;
321 }
322 udelay(1);
323 } while (1);
324 }
325
dwc3_send_gadget_ep_cmd(struct dwc3 * dwc,unsigned ep,unsigned cmd,struct dwc3_gadget_ep_cmd_params * params)326 int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
327 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
328 {
329 struct dwc3_ep *dep = dwc->eps[ep];
330 u32 timeout = 500;
331 u32 reg;
332
333 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
334
335 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
336 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
337 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
338
339 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
340 do {
341 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
342 if (!(reg & DWC3_DEPCMD_CMDACT)) {
343 dwc3_trace(trace_dwc3_gadget,
344 "Command Complete --> %d",
345 DWC3_DEPCMD_STATUS(reg));
346 if (DWC3_DEPCMD_STATUS(reg))
347 return -EINVAL;
348 return 0;
349 }
350
351 /*
352 * We can't sleep here, because it is also called from
353 * interrupt context.
354 */
355 timeout--;
356 if (!timeout) {
357 dwc3_trace(trace_dwc3_gadget,
358 "Command Timed Out");
359 return -ETIMEDOUT;
360 }
361
362 udelay(1);
363 } while (1);
364 }
365
dwc3_trb_dma_offset(struct dwc3_ep * dep,struct dwc3_trb * trb)366 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
367 struct dwc3_trb *trb)
368 {
369 u32 offset = (char *) trb - (char *) dep->trb_pool;
370
371 return dep->trb_pool_dma + offset;
372 }
373
dwc3_alloc_trb_pool(struct dwc3_ep * dep)374 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
375 {
376 struct dwc3 *dwc = dep->dwc;
377
378 if (dep->trb_pool)
379 return 0;
380
381 dep->trb_pool = dma_alloc_coherent(dwc->dev,
382 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
383 &dep->trb_pool_dma, GFP_KERNEL);
384 if (!dep->trb_pool) {
385 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
386 dep->name);
387 return -ENOMEM;
388 }
389
390 return 0;
391 }
392
dwc3_free_trb_pool(struct dwc3_ep * dep)393 static void dwc3_free_trb_pool(struct dwc3_ep *dep)
394 {
395 struct dwc3 *dwc = dep->dwc;
396
397 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
398 dep->trb_pool, dep->trb_pool_dma);
399
400 dep->trb_pool = NULL;
401 dep->trb_pool_dma = 0;
402 }
403
404 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
405
406 /**
407 * dwc3_gadget_start_config - Configure EP resources
408 * @dwc: pointer to our controller context structure
409 * @dep: endpoint that is being enabled
410 *
411 * The assignment of transfer resources cannot perfectly follow the
412 * data book due to the fact that the controller driver does not have
413 * all knowledge of the configuration in advance. It is given this
414 * information piecemeal by the composite gadget framework after every
415 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
416 * programming model in this scenario can cause errors. For two
417 * reasons:
418 *
419 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
420 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
421 * multiple interfaces.
422 *
423 * 2) The databook does not mention doing more DEPXFERCFG for new
424 * endpoint on alt setting (8.1.6).
425 *
426 * The following simplified method is used instead:
427 *
428 * All hardware endpoints can be assigned a transfer resource and this
429 * setting will stay persistent until either a core reset or
430 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
431 * do DEPXFERCFG for every hardware endpoint as well. We are
432 * guaranteed that there are as many transfer resources as endpoints.
433 *
434 * This function is called for each endpoint when it is being enabled
435 * but is triggered only when called for EP0-out, which always happens
436 * first, and which should only happen in one of the above conditions.
437 */
dwc3_gadget_start_config(struct dwc3 * dwc,struct dwc3_ep * dep)438 static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
439 {
440 struct dwc3_gadget_ep_cmd_params params;
441 u32 cmd;
442 int i;
443 int ret;
444
445 if (dep->number)
446 return 0;
447
448 memset(¶ms, 0x00, sizeof(params));
449 cmd = DWC3_DEPCMD_DEPSTARTCFG;
450
451 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
452 if (ret)
453 return ret;
454
455 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
456 struct dwc3_ep *dep = dwc->eps[i];
457
458 if (!dep)
459 continue;
460
461 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
462 if (ret)
463 return ret;
464 }
465
466 return 0;
467 }
468
dwc3_gadget_set_ep_config(struct dwc3 * dwc,struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)469 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
470 const struct usb_endpoint_descriptor *desc,
471 const struct usb_ss_ep_comp_descriptor *comp_desc,
472 bool ignore, bool restore)
473 {
474 struct dwc3_gadget_ep_cmd_params params;
475
476 memset(¶ms, 0x00, sizeof(params));
477
478 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
479 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
480
481 /* Burst size is only needed in SuperSpeed mode */
482 if (dwc->gadget.speed == USB_SPEED_SUPER) {
483 u32 burst = dep->endpoint.maxburst - 1;
484
485 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
486 }
487
488 if (ignore)
489 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
490
491 if (restore) {
492 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
493 params.param2 |= dep->saved_state;
494 }
495
496 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
497 | DWC3_DEPCFG_XFER_NOT_READY_EN;
498
499 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
500 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
501 | DWC3_DEPCFG_STREAM_EVENT_EN;
502 dep->stream_capable = true;
503 }
504
505 if (!usb_endpoint_xfer_control(desc))
506 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
507
508 /*
509 * We are doing 1:1 mapping for endpoints, meaning
510 * Physical Endpoints 2 maps to Logical Endpoint 2 and
511 * so on. We consider the direction bit as part of the physical
512 * endpoint number. So USB endpoint 0x81 is 0x03.
513 */
514 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
515
516 /*
517 * We must use the lower 16 TX FIFOs even though
518 * HW might have more
519 */
520 if (dep->direction)
521 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
522
523 if (desc->bInterval) {
524 u8 bInterval_m1;
525
526 /*
527 * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
528 * must be set to 0 when the controller operates in full-speed.
529 */
530 bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
531 if (dwc->gadget.speed == USB_SPEED_FULL)
532 bInterval_m1 = 0;
533
534 if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
535 dwc->gadget.speed == USB_SPEED_FULL)
536 dep->interval = desc->bInterval;
537 else
538 dep->interval = 1 << (desc->bInterval - 1);
539
540 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
541 }
542
543 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
544 DWC3_DEPCMD_SETEPCONFIG, ¶ms);
545 }
546
dwc3_gadget_set_xfer_resource(struct dwc3 * dwc,struct dwc3_ep * dep)547 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
548 {
549 struct dwc3_gadget_ep_cmd_params params;
550
551 memset(¶ms, 0x00, sizeof(params));
552
553 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
554
555 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
556 DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms);
557 }
558
559 /**
560 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
561 * @dep: endpoint to be initialized
562 * @desc: USB Endpoint Descriptor
563 *
564 * Caller should take care of locking
565 */
__dwc3_gadget_ep_enable(struct dwc3_ep * dep,const struct usb_endpoint_descriptor * desc,const struct usb_ss_ep_comp_descriptor * comp_desc,bool ignore,bool restore)566 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
567 const struct usb_endpoint_descriptor *desc,
568 const struct usb_ss_ep_comp_descriptor *comp_desc,
569 bool ignore, bool restore)
570 {
571 struct dwc3 *dwc = dep->dwc;
572 u32 reg;
573 int ret;
574
575 dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
576
577 if (!(dep->flags & DWC3_EP_ENABLED)) {
578 ret = dwc3_gadget_start_config(dwc, dep);
579 if (ret)
580 return ret;
581 }
582
583 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
584 restore);
585 if (ret)
586 return ret;
587
588 if (!(dep->flags & DWC3_EP_ENABLED)) {
589 struct dwc3_trb *trb_st_hw;
590 struct dwc3_trb *trb_link;
591
592 dep->endpoint.desc = desc;
593 dep->comp_desc = comp_desc;
594 dep->type = usb_endpoint_type(desc);
595 dep->flags |= DWC3_EP_ENABLED;
596
597 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
598 reg |= DWC3_DALEPENA_EP(dep->number);
599 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
600
601 if (!usb_endpoint_xfer_isoc(desc))
602 return 0;
603
604 /* Link TRB for ISOC. The HWO bit is never reset */
605 trb_st_hw = &dep->trb_pool[0];
606
607 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
608 memset(trb_link, 0, sizeof(*trb_link));
609
610 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
611 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
612 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
613 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
614 }
615
616 switch (usb_endpoint_type(desc)) {
617 case USB_ENDPOINT_XFER_CONTROL:
618 strlcat(dep->name, "-control", sizeof(dep->name));
619 break;
620 case USB_ENDPOINT_XFER_ISOC:
621 strlcat(dep->name, "-isoc", sizeof(dep->name));
622 break;
623 case USB_ENDPOINT_XFER_BULK:
624 strlcat(dep->name, "-bulk", sizeof(dep->name));
625 break;
626 case USB_ENDPOINT_XFER_INT:
627 strlcat(dep->name, "-int", sizeof(dep->name));
628 break;
629 default:
630 dev_err(dwc->dev, "invalid endpoint transfer type\n");
631 }
632
633 return 0;
634 }
635
636 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
dwc3_remove_requests(struct dwc3 * dwc,struct dwc3_ep * dep)637 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
638 {
639 struct dwc3_request *req;
640
641 if (!list_empty(&dep->req_queued)) {
642 dwc3_stop_active_transfer(dwc, dep->number, true);
643
644 /* - giveback all requests to gadget driver */
645 while (!list_empty(&dep->req_queued)) {
646 req = next_request(&dep->req_queued);
647
648 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
649 }
650 }
651
652 while (!list_empty(&dep->request_list)) {
653 req = next_request(&dep->request_list);
654
655 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
656 }
657 }
658
659 /**
660 * __dwc3_gadget_ep_disable - Disables a HW endpoint
661 * @dep: the endpoint to disable
662 *
663 * This function also removes requests which are currently processed ny the
664 * hardware and those which are not yet scheduled.
665 * Caller should take care of locking.
666 */
__dwc3_gadget_ep_disable(struct dwc3_ep * dep)667 static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
668 {
669 struct dwc3 *dwc = dep->dwc;
670 u32 reg;
671
672 dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
673
674 dwc3_remove_requests(dwc, dep);
675
676 /* make sure HW endpoint isn't stalled */
677 if (dep->flags & DWC3_EP_STALL)
678 __dwc3_gadget_ep_set_halt(dep, 0, false);
679
680 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
681 reg &= ~DWC3_DALEPENA_EP(dep->number);
682 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
683
684 dep->stream_capable = false;
685 dep->endpoint.desc = NULL;
686 dep->comp_desc = NULL;
687 dep->type = 0;
688 dep->flags = 0;
689
690 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
691 dep->number >> 1,
692 (dep->number & 1) ? "in" : "out");
693
694 return 0;
695 }
696
697 /* -------------------------------------------------------------------------- */
698
dwc3_gadget_ep0_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)699 static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
700 const struct usb_endpoint_descriptor *desc)
701 {
702 return -EINVAL;
703 }
704
dwc3_gadget_ep0_disable(struct usb_ep * ep)705 static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
706 {
707 return -EINVAL;
708 }
709
710 /* -------------------------------------------------------------------------- */
711
dwc3_gadget_ep_enable(struct usb_ep * ep,const struct usb_endpoint_descriptor * desc)712 static int dwc3_gadget_ep_enable(struct usb_ep *ep,
713 const struct usb_endpoint_descriptor *desc)
714 {
715 struct dwc3_ep *dep;
716 struct dwc3 *dwc;
717 unsigned long flags;
718 int ret;
719
720 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
721 pr_debug("dwc3: invalid parameters\n");
722 return -EINVAL;
723 }
724
725 if (!desc->wMaxPacketSize) {
726 pr_debug("dwc3: missing wMaxPacketSize\n");
727 return -EINVAL;
728 }
729
730 dep = to_dwc3_ep(ep);
731 dwc = dep->dwc;
732
733 if (dep->flags & DWC3_EP_ENABLED) {
734 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
735 dep->name);
736 return 0;
737 }
738
739 spin_lock_irqsave(&dwc->lock, flags);
740 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
741 spin_unlock_irqrestore(&dwc->lock, flags);
742
743 return ret;
744 }
745
dwc3_gadget_ep_disable(struct usb_ep * ep)746 static int dwc3_gadget_ep_disable(struct usb_ep *ep)
747 {
748 struct dwc3_ep *dep;
749 struct dwc3 *dwc;
750 unsigned long flags;
751 int ret;
752
753 if (!ep) {
754 pr_debug("dwc3: invalid parameters\n");
755 return -EINVAL;
756 }
757
758 dep = to_dwc3_ep(ep);
759 dwc = dep->dwc;
760
761 if (!(dep->flags & DWC3_EP_ENABLED)) {
762 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
763 dep->name);
764 return 0;
765 }
766
767 spin_lock_irqsave(&dwc->lock, flags);
768 ret = __dwc3_gadget_ep_disable(dep);
769 spin_unlock_irqrestore(&dwc->lock, flags);
770
771 return ret;
772 }
773
dwc3_gadget_ep_alloc_request(struct usb_ep * ep,gfp_t gfp_flags)774 static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
775 gfp_t gfp_flags)
776 {
777 struct dwc3_request *req;
778 struct dwc3_ep *dep = to_dwc3_ep(ep);
779
780 req = kzalloc(sizeof(*req), gfp_flags);
781 if (!req)
782 return NULL;
783
784 req->epnum = dep->number;
785 req->dep = dep;
786
787 trace_dwc3_alloc_request(req);
788
789 return &req->request;
790 }
791
dwc3_gadget_ep_free_request(struct usb_ep * ep,struct usb_request * request)792 static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
793 struct usb_request *request)
794 {
795 struct dwc3_request *req = to_dwc3_request(request);
796
797 trace_dwc3_free_request(req);
798 kfree(req);
799 }
800
801 /**
802 * dwc3_prepare_one_trb - setup one TRB from one request
803 * @dep: endpoint for which this request is prepared
804 * @req: dwc3_request pointer
805 */
dwc3_prepare_one_trb(struct dwc3_ep * dep,struct dwc3_request * req,dma_addr_t dma,unsigned length,unsigned last,unsigned chain,unsigned node)806 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
807 struct dwc3_request *req, dma_addr_t dma,
808 unsigned length, unsigned last, unsigned chain, unsigned node)
809 {
810 struct dwc3_trb *trb;
811
812 dwc3_trace(trace_dwc3_gadget, "%s: req %p dma %08llx length %d%s%s",
813 dep->name, req, (unsigned long long) dma,
814 length, last ? " last" : "",
815 chain ? " chain" : "");
816
817
818 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
819
820 if (!req->trb) {
821 dwc3_gadget_move_request_queued(req);
822 req->trb = trb;
823 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
824 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
825 }
826
827 dep->free_slot++;
828 /* Skip the LINK-TRB on ISOC */
829 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
830 usb_endpoint_xfer_isoc(dep->endpoint.desc))
831 dep->free_slot++;
832
833 trb->size = DWC3_TRB_SIZE_LENGTH(length);
834 trb->bpl = lower_32_bits(dma);
835 trb->bph = upper_32_bits(dma);
836
837 switch (usb_endpoint_type(dep->endpoint.desc)) {
838 case USB_ENDPOINT_XFER_CONTROL:
839 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
840 break;
841
842 case USB_ENDPOINT_XFER_ISOC:
843 if (!node)
844 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
845 else
846 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
847 break;
848
849 case USB_ENDPOINT_XFER_BULK:
850 case USB_ENDPOINT_XFER_INT:
851 trb->ctrl = DWC3_TRBCTL_NORMAL;
852 break;
853 default:
854 /*
855 * This is only possible with faulty memory because we
856 * checked it already :)
857 */
858 BUG();
859 }
860
861 if (!req->request.no_interrupt && !chain)
862 trb->ctrl |= DWC3_TRB_CTRL_IOC;
863
864 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
865 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
866 trb->ctrl |= DWC3_TRB_CTRL_CSP;
867 } else if (last) {
868 trb->ctrl |= DWC3_TRB_CTRL_LST;
869 }
870
871 if (chain)
872 trb->ctrl |= DWC3_TRB_CTRL_CHN;
873
874 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
875 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
876
877 trb->ctrl |= DWC3_TRB_CTRL_HWO;
878
879 trace_dwc3_prepare_trb(dep, trb);
880 }
881
882 /*
883 * dwc3_prepare_trbs - setup TRBs from requests
884 * @dep: endpoint for which requests are being prepared
885 * @starting: true if the endpoint is idle and no requests are queued.
886 *
887 * The function goes through the requests list and sets up TRBs for the
888 * transfers. The function returns once there are no more TRBs available or
889 * it runs out of requests.
890 */
dwc3_prepare_trbs(struct dwc3_ep * dep,bool starting)891 static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
892 {
893 struct dwc3_request *req, *n;
894 u32 trbs_left;
895 u32 max;
896 unsigned int last_one = 0;
897
898 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
899
900 /* the first request must not be queued */
901 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
902
903 /* Can't wrap around on a non-isoc EP since there's no link TRB */
904 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
905 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
906 if (trbs_left > max)
907 trbs_left = max;
908 }
909
910 /*
911 * If busy & slot are equal than it is either full or empty. If we are
912 * starting to process requests then we are empty. Otherwise we are
913 * full and don't do anything
914 */
915 if (!trbs_left) {
916 if (!starting)
917 return;
918 trbs_left = DWC3_TRB_NUM;
919 /*
920 * In case we start from scratch, we queue the ISOC requests
921 * starting from slot 1. This is done because we use ring
922 * buffer and have no LST bit to stop us. Instead, we place
923 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
924 * after the first request so we start at slot 1 and have
925 * 7 requests proceed before we hit the first IOC.
926 * Other transfer types don't use the ring buffer and are
927 * processed from the first TRB until the last one. Since we
928 * don't wrap around we have to start at the beginning.
929 */
930 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
931 dep->busy_slot = 1;
932 dep->free_slot = 1;
933 } else {
934 dep->busy_slot = 0;
935 dep->free_slot = 0;
936 }
937 }
938
939 /* The last TRB is a link TRB, not used for xfer */
940 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
941 return;
942
943 list_for_each_entry_safe(req, n, &dep->request_list, list) {
944 unsigned length;
945 dma_addr_t dma;
946 last_one = false;
947
948 if (req->request.num_mapped_sgs > 0) {
949 struct usb_request *request = &req->request;
950 struct scatterlist *sg = request->sg;
951 struct scatterlist *s;
952 int i;
953
954 for_each_sg(sg, s, request->num_mapped_sgs, i) {
955 unsigned chain = true;
956
957 length = sg_dma_len(s);
958 dma = sg_dma_address(s);
959
960 if (i == (request->num_mapped_sgs - 1) ||
961 sg_is_last(s)) {
962 if (list_empty(&dep->request_list))
963 last_one = true;
964 chain = false;
965 }
966
967 trbs_left--;
968 if (!trbs_left)
969 last_one = true;
970
971 if (last_one)
972 chain = false;
973
974 dwc3_prepare_one_trb(dep, req, dma, length,
975 last_one, chain, i);
976
977 if (last_one)
978 break;
979 }
980
981 if (last_one)
982 break;
983 } else {
984 dma = req->request.dma;
985 length = req->request.length;
986 trbs_left--;
987
988 if (!trbs_left)
989 last_one = 1;
990
991 /* Is this the last request? */
992 if (list_is_last(&req->list, &dep->request_list))
993 last_one = 1;
994
995 dwc3_prepare_one_trb(dep, req, dma, length,
996 last_one, false, 0);
997
998 if (last_one)
999 break;
1000 }
1001 }
1002 }
1003
__dwc3_gadget_kick_transfer(struct dwc3_ep * dep,u16 cmd_param,int start_new)1004 static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
1005 int start_new)
1006 {
1007 struct dwc3_gadget_ep_cmd_params params;
1008 struct dwc3_request *req;
1009 struct dwc3 *dwc = dep->dwc;
1010 int ret;
1011 u32 cmd;
1012
1013 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
1014 dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
1015 return -EBUSY;
1016 }
1017
1018 /*
1019 * If we are getting here after a short-out-packet we don't enqueue any
1020 * new requests as we try to set the IOC bit only on the last request.
1021 */
1022 if (start_new) {
1023 if (list_empty(&dep->req_queued))
1024 dwc3_prepare_trbs(dep, start_new);
1025
1026 /* req points to the first request which will be sent */
1027 req = next_request(&dep->req_queued);
1028 } else {
1029 dwc3_prepare_trbs(dep, start_new);
1030
1031 /*
1032 * req points to the first request where HWO changed from 0 to 1
1033 */
1034 req = next_request(&dep->req_queued);
1035 }
1036 if (!req) {
1037 dep->flags |= DWC3_EP_PENDING_REQUEST;
1038 return 0;
1039 }
1040
1041 memset(¶ms, 0, sizeof(params));
1042
1043 if (start_new) {
1044 params.param0 = upper_32_bits(req->trb_dma);
1045 params.param1 = lower_32_bits(req->trb_dma);
1046 cmd = DWC3_DEPCMD_STARTTRANSFER;
1047 } else {
1048 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1049 }
1050
1051 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
1052 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
1053 if (ret < 0) {
1054 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
1055
1056 /*
1057 * FIXME we need to iterate over the list of requests
1058 * here and stop, unmap, free and del each of the linked
1059 * requests instead of what we do now.
1060 */
1061 usb_gadget_unmap_request(&dwc->gadget, &req->request,
1062 req->direction);
1063 list_del(&req->list);
1064 return ret;
1065 }
1066
1067 dep->flags |= DWC3_EP_BUSY;
1068
1069 if (start_new) {
1070 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
1071 dep->number);
1072 WARN_ON_ONCE(!dep->resource_index);
1073 }
1074
1075 return 0;
1076 }
1077
__dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,u32 cur_uf)1078 static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1079 struct dwc3_ep *dep, u32 cur_uf)
1080 {
1081 u32 uf;
1082
1083 if (list_empty(&dep->request_list)) {
1084 dwc3_trace(trace_dwc3_gadget,
1085 "ISOC ep %s run out for requests",
1086 dep->name);
1087 dep->flags |= DWC3_EP_PENDING_REQUEST;
1088 return;
1089 }
1090
1091 /* 4 micro frames in the future */
1092 uf = cur_uf + dep->interval * 4;
1093
1094 __dwc3_gadget_kick_transfer(dep, uf, 1);
1095 }
1096
dwc3_gadget_start_isoc(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)1097 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1098 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1099 {
1100 u32 cur_uf, mask;
1101
1102 mask = ~(dep->interval - 1);
1103 cur_uf = event->parameters & mask;
1104
1105 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1106 }
1107
__dwc3_gadget_ep_queue(struct dwc3_ep * dep,struct dwc3_request * req)1108 static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1109 {
1110 struct dwc3 *dwc = dep->dwc;
1111 int ret;
1112
1113 req->request.actual = 0;
1114 req->request.status = -EINPROGRESS;
1115 req->direction = dep->direction;
1116 req->epnum = dep->number;
1117
1118 trace_dwc3_ep_queue(req);
1119
1120 /*
1121 * We only add to our list of requests now and
1122 * start consuming the list once we get XferNotReady
1123 * IRQ.
1124 *
1125 * That way, we avoid doing anything that we don't need
1126 * to do now and defer it until the point we receive a
1127 * particular token from the Host side.
1128 *
1129 * This will also avoid Host cancelling URBs due to too
1130 * many NAKs.
1131 */
1132 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1133 dep->direction);
1134 if (ret)
1135 return ret;
1136
1137 list_add_tail(&req->list, &dep->request_list);
1138
1139 /*
1140 * If there are no pending requests and the endpoint isn't already
1141 * busy, we will just start the request straight away.
1142 *
1143 * This will save one IRQ (XFER_NOT_READY) and possibly make it a
1144 * little bit faster.
1145 */
1146 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1147 !usb_endpoint_xfer_int(dep->endpoint.desc) &&
1148 !(dep->flags & DWC3_EP_BUSY)) {
1149 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1150 goto out;
1151 }
1152
1153 /*
1154 * There are a few special cases:
1155 *
1156 * 1. XferNotReady with empty list of requests. We need to kick the
1157 * transfer here in that situation, otherwise we will be NAKing
1158 * forever. If we get XferNotReady before gadget driver has a
1159 * chance to queue a request, we will ACK the IRQ but won't be
1160 * able to receive the data until the next request is queued.
1161 * The following code is handling exactly that.
1162 *
1163 */
1164 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
1165 /*
1166 * If xfernotready is already elapsed and it is a case
1167 * of isoc transfer, then issue END TRANSFER, so that
1168 * you can receive xfernotready again and can have
1169 * notion of current microframe.
1170 */
1171 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1172 if (list_empty(&dep->req_queued)) {
1173 dwc3_stop_active_transfer(dwc, dep->number, true);
1174 dep->flags = DWC3_EP_ENABLED;
1175 }
1176 return 0;
1177 }
1178
1179 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1180 if (!ret)
1181 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
1182
1183 goto out;
1184 }
1185
1186 /*
1187 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1188 * kick the transfer here after queuing a request, otherwise the
1189 * core may not see the modified TRB(s).
1190 */
1191 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1192 (dep->flags & DWC3_EP_BUSY) &&
1193 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
1194 WARN_ON_ONCE(!dep->resource_index);
1195 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
1196 false);
1197 goto out;
1198 }
1199
1200 /*
1201 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1202 * right away, otherwise host will not know we have streams to be
1203 * handled.
1204 */
1205 if (dep->stream_capable)
1206 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1207
1208 out:
1209 if (ret && ret != -EBUSY)
1210 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1211 dep->name);
1212 if (ret == -EBUSY)
1213 ret = 0;
1214
1215 return ret;
1216 }
1217
__dwc3_gadget_ep_zlp_complete(struct usb_ep * ep,struct usb_request * request)1218 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
1219 struct usb_request *request)
1220 {
1221 dwc3_gadget_ep_free_request(ep, request);
1222 }
1223
__dwc3_gadget_ep_queue_zlp(struct dwc3 * dwc,struct dwc3_ep * dep)1224 static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
1225 {
1226 struct dwc3_request *req;
1227 struct usb_request *request;
1228 struct usb_ep *ep = &dep->endpoint;
1229
1230 dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
1231 request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
1232 if (!request)
1233 return -ENOMEM;
1234
1235 request->length = 0;
1236 request->buf = dwc->zlp_buf;
1237 request->complete = __dwc3_gadget_ep_zlp_complete;
1238
1239 req = to_dwc3_request(request);
1240
1241 return __dwc3_gadget_ep_queue(dep, req);
1242 }
1243
dwc3_gadget_ep_queue(struct usb_ep * ep,struct usb_request * request,gfp_t gfp_flags)1244 static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1245 gfp_t gfp_flags)
1246 {
1247 struct dwc3_request *req = to_dwc3_request(request);
1248 struct dwc3_ep *dep = to_dwc3_ep(ep);
1249 struct dwc3 *dwc = dep->dwc;
1250
1251 unsigned long flags;
1252
1253 int ret;
1254
1255 spin_lock_irqsave(&dwc->lock, flags);
1256 if (!dep->endpoint.desc) {
1257 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1258 request, ep->name);
1259 ret = -ESHUTDOWN;
1260 goto out;
1261 }
1262
1263 if (WARN(req->dep != dep, "request %pK belongs to '%s'\n",
1264 request, req->dep->name)) {
1265 ret = -EINVAL;
1266 goto out;
1267 }
1268
1269 ret = __dwc3_gadget_ep_queue(dep, req);
1270
1271 /*
1272 * Okay, here's the thing, if gadget driver has requested for a ZLP by
1273 * setting request->zero, instead of doing magic, we will just queue an
1274 * extra usb_request ourselves so that it gets handled the same way as
1275 * any other request.
1276 */
1277 if (ret == 0 && request->zero && request->length &&
1278 (request->length % ep->maxpacket == 0))
1279 ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
1280
1281 out:
1282 spin_unlock_irqrestore(&dwc->lock, flags);
1283
1284 return ret;
1285 }
1286
dwc3_gadget_ep_dequeue(struct usb_ep * ep,struct usb_request * request)1287 static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1288 struct usb_request *request)
1289 {
1290 struct dwc3_request *req = to_dwc3_request(request);
1291 struct dwc3_request *r = NULL;
1292
1293 struct dwc3_ep *dep = to_dwc3_ep(ep);
1294 struct dwc3 *dwc = dep->dwc;
1295
1296 unsigned long flags;
1297 int ret = 0;
1298
1299 trace_dwc3_ep_dequeue(req);
1300
1301 spin_lock_irqsave(&dwc->lock, flags);
1302
1303 list_for_each_entry(r, &dep->request_list, list) {
1304 if (r == req)
1305 break;
1306 }
1307
1308 if (r != req) {
1309 list_for_each_entry(r, &dep->req_queued, list) {
1310 if (r == req)
1311 break;
1312 }
1313 if (r == req) {
1314 /* wait until it is processed */
1315 dwc3_stop_active_transfer(dwc, dep->number, true);
1316 goto out1;
1317 }
1318 dev_err(dwc->dev, "request %pK was not queued to %s\n",
1319 request, ep->name);
1320 ret = -EINVAL;
1321 goto out0;
1322 }
1323
1324 out1:
1325 /* giveback the request */
1326 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1327
1328 out0:
1329 spin_unlock_irqrestore(&dwc->lock, flags);
1330
1331 return ret;
1332 }
1333
__dwc3_gadget_ep_set_halt(struct dwc3_ep * dep,int value,int protocol)1334 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1335 {
1336 struct dwc3_gadget_ep_cmd_params params;
1337 struct dwc3 *dwc = dep->dwc;
1338 int ret;
1339
1340 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1341 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1342 return -EINVAL;
1343 }
1344
1345 memset(¶ms, 0x00, sizeof(params));
1346
1347 if (value) {
1348 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1349 (!list_empty(&dep->req_queued) ||
1350 !list_empty(&dep->request_list)))) {
1351 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1352 dep->name);
1353 return -EAGAIN;
1354 }
1355
1356 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1357 DWC3_DEPCMD_SETSTALL, ¶ms);
1358 if (ret)
1359 dev_err(dwc->dev, "failed to set STALL on %s\n",
1360 dep->name);
1361 else
1362 dep->flags |= DWC3_EP_STALL;
1363 } else {
1364 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1365 DWC3_DEPCMD_CLEARSTALL, ¶ms);
1366 if (ret)
1367 dev_err(dwc->dev, "failed to clear STALL on %s\n",
1368 dep->name);
1369 else
1370 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
1371 }
1372
1373 return ret;
1374 }
1375
dwc3_gadget_ep_set_halt(struct usb_ep * ep,int value)1376 static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1377 {
1378 struct dwc3_ep *dep = to_dwc3_ep(ep);
1379 struct dwc3 *dwc = dep->dwc;
1380
1381 unsigned long flags;
1382
1383 int ret;
1384
1385 spin_lock_irqsave(&dwc->lock, flags);
1386 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1387 spin_unlock_irqrestore(&dwc->lock, flags);
1388
1389 return ret;
1390 }
1391
dwc3_gadget_ep_set_wedge(struct usb_ep * ep)1392 static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1393 {
1394 struct dwc3_ep *dep = to_dwc3_ep(ep);
1395 struct dwc3 *dwc = dep->dwc;
1396 unsigned long flags;
1397 int ret;
1398
1399 spin_lock_irqsave(&dwc->lock, flags);
1400 dep->flags |= DWC3_EP_WEDGE;
1401
1402 if (dep->number == 0 || dep->number == 1)
1403 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1404 else
1405 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1406 spin_unlock_irqrestore(&dwc->lock, flags);
1407
1408 return ret;
1409 }
1410
1411 /* -------------------------------------------------------------------------- */
1412
1413 static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1414 .bLength = USB_DT_ENDPOINT_SIZE,
1415 .bDescriptorType = USB_DT_ENDPOINT,
1416 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1417 };
1418
1419 static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1420 .enable = dwc3_gadget_ep0_enable,
1421 .disable = dwc3_gadget_ep0_disable,
1422 .alloc_request = dwc3_gadget_ep_alloc_request,
1423 .free_request = dwc3_gadget_ep_free_request,
1424 .queue = dwc3_gadget_ep0_queue,
1425 .dequeue = dwc3_gadget_ep_dequeue,
1426 .set_halt = dwc3_gadget_ep0_set_halt,
1427 .set_wedge = dwc3_gadget_ep_set_wedge,
1428 };
1429
1430 static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1431 .enable = dwc3_gadget_ep_enable,
1432 .disable = dwc3_gadget_ep_disable,
1433 .alloc_request = dwc3_gadget_ep_alloc_request,
1434 .free_request = dwc3_gadget_ep_free_request,
1435 .queue = dwc3_gadget_ep_queue,
1436 .dequeue = dwc3_gadget_ep_dequeue,
1437 .set_halt = dwc3_gadget_ep_set_halt,
1438 .set_wedge = dwc3_gadget_ep_set_wedge,
1439 };
1440
1441 /* -------------------------------------------------------------------------- */
1442
dwc3_gadget_get_frame(struct usb_gadget * g)1443 static int dwc3_gadget_get_frame(struct usb_gadget *g)
1444 {
1445 struct dwc3 *dwc = gadget_to_dwc(g);
1446 u32 reg;
1447
1448 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1449 return DWC3_DSTS_SOFFN(reg);
1450 }
1451
dwc3_gadget_wakeup(struct usb_gadget * g)1452 static int dwc3_gadget_wakeup(struct usb_gadget *g)
1453 {
1454 struct dwc3 *dwc = gadget_to_dwc(g);
1455
1456 unsigned long timeout;
1457 unsigned long flags;
1458
1459 u32 reg;
1460
1461 int ret = 0;
1462
1463 u8 link_state;
1464 u8 speed;
1465
1466 spin_lock_irqsave(&dwc->lock, flags);
1467
1468 /*
1469 * According to the Databook Remote wakeup request should
1470 * be issued only when the device is in early suspend state.
1471 *
1472 * We can check that via USB Link State bits in DSTS register.
1473 */
1474 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1475
1476 speed = reg & DWC3_DSTS_CONNECTSPD;
1477 if (speed == DWC3_DSTS_SUPERSPEED) {
1478 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1479 ret = -EINVAL;
1480 goto out;
1481 }
1482
1483 link_state = DWC3_DSTS_USBLNKST(reg);
1484
1485 switch (link_state) {
1486 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1487 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1488 break;
1489 default:
1490 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1491 link_state);
1492 ret = -EINVAL;
1493 goto out;
1494 }
1495
1496 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1497 if (ret < 0) {
1498 dev_err(dwc->dev, "failed to put link in Recovery\n");
1499 goto out;
1500 }
1501
1502 /* Recent versions do this automatically */
1503 if (dwc->revision < DWC3_REVISION_194A) {
1504 /* write zeroes to Link Change Request */
1505 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1506 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1507 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1508 }
1509
1510 /* poll until Link State changes to ON */
1511 timeout = jiffies + msecs_to_jiffies(100);
1512
1513 while (!time_after(jiffies, timeout)) {
1514 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1515
1516 /* in HS, means ON */
1517 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1518 break;
1519 }
1520
1521 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1522 dev_err(dwc->dev, "failed to send remote wakeup\n");
1523 ret = -EINVAL;
1524 }
1525
1526 out:
1527 spin_unlock_irqrestore(&dwc->lock, flags);
1528
1529 return ret;
1530 }
1531
dwc3_gadget_set_selfpowered(struct usb_gadget * g,int is_selfpowered)1532 static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1533 int is_selfpowered)
1534 {
1535 struct dwc3 *dwc = gadget_to_dwc(g);
1536 unsigned long flags;
1537
1538 spin_lock_irqsave(&dwc->lock, flags);
1539 g->is_selfpowered = !!is_selfpowered;
1540 spin_unlock_irqrestore(&dwc->lock, flags);
1541
1542 return 0;
1543 }
1544
dwc3_gadget_run_stop(struct dwc3 * dwc,int is_on,int suspend)1545 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
1546 {
1547 u32 reg;
1548 u32 timeout = 500;
1549
1550 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1551 if (is_on) {
1552 if (dwc->revision <= DWC3_REVISION_187A) {
1553 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1554 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1555 }
1556
1557 if (dwc->revision >= DWC3_REVISION_194A)
1558 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1559 reg |= DWC3_DCTL_RUN_STOP;
1560
1561 if (dwc->has_hibernation)
1562 reg |= DWC3_DCTL_KEEP_CONNECT;
1563
1564 dwc->pullups_connected = true;
1565 } else {
1566 reg &= ~DWC3_DCTL_RUN_STOP;
1567
1568 if (dwc->has_hibernation && !suspend)
1569 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1570
1571 dwc->pullups_connected = false;
1572 }
1573
1574 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1575
1576 do {
1577 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1578 if (is_on) {
1579 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1580 break;
1581 } else {
1582 if (reg & DWC3_DSTS_DEVCTRLHLT)
1583 break;
1584 }
1585 timeout--;
1586 if (!timeout)
1587 return -ETIMEDOUT;
1588 udelay(1);
1589 } while (1);
1590
1591 dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
1592 dwc->gadget_driver
1593 ? dwc->gadget_driver->function : "no-function",
1594 is_on ? "connect" : "disconnect");
1595
1596 return 0;
1597 }
1598
dwc3_gadget_pullup(struct usb_gadget * g,int is_on)1599 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1600 {
1601 struct dwc3 *dwc = gadget_to_dwc(g);
1602 unsigned long flags;
1603 int ret;
1604
1605 is_on = !!is_on;
1606
1607 spin_lock_irqsave(&dwc->lock, flags);
1608 ret = dwc3_gadget_run_stop(dwc, is_on, false);
1609 spin_unlock_irqrestore(&dwc->lock, flags);
1610
1611 return ret;
1612 }
1613
dwc3_gadget_enable_irq(struct dwc3 * dwc)1614 static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1615 {
1616 u32 reg;
1617
1618 /* Enable all but Start and End of Frame IRQs */
1619 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1620 DWC3_DEVTEN_EVNTOVERFLOWEN |
1621 DWC3_DEVTEN_CMDCMPLTEN |
1622 DWC3_DEVTEN_ERRTICERREN |
1623 DWC3_DEVTEN_WKUPEVTEN |
1624 DWC3_DEVTEN_ULSTCNGEN |
1625 DWC3_DEVTEN_CONNECTDONEEN |
1626 DWC3_DEVTEN_USBRSTEN |
1627 DWC3_DEVTEN_DISCONNEVTEN);
1628
1629 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1630 }
1631
dwc3_gadget_disable_irq(struct dwc3 * dwc)1632 static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1633 {
1634 /* mask all interrupts */
1635 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1636 }
1637
1638 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
1639 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
1640
dwc3_gadget_start(struct usb_gadget * g,struct usb_gadget_driver * driver)1641 static int dwc3_gadget_start(struct usb_gadget *g,
1642 struct usb_gadget_driver *driver)
1643 {
1644 struct dwc3 *dwc = gadget_to_dwc(g);
1645 struct dwc3_ep *dep;
1646 unsigned long flags;
1647 int ret = 0;
1648 int irq;
1649 u32 reg;
1650
1651 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1652 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
1653 IRQF_SHARED, "dwc3", dwc);
1654 if (ret) {
1655 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1656 irq, ret);
1657 goto err0;
1658 }
1659
1660 spin_lock_irqsave(&dwc->lock, flags);
1661
1662 if (dwc->gadget_driver) {
1663 dev_err(dwc->dev, "%s is already bound to %s\n",
1664 dwc->gadget.name,
1665 dwc->gadget_driver->driver.name);
1666 ret = -EBUSY;
1667 goto err1;
1668 }
1669
1670 dwc->gadget_driver = driver;
1671
1672 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1673 reg &= ~(DWC3_DCFG_SPEED_MASK);
1674
1675 /**
1676 * WORKAROUND: DWC3 revision < 2.20a have an issue
1677 * which would cause metastability state on Run/Stop
1678 * bit if we try to force the IP to USB2-only mode.
1679 *
1680 * Because of that, we cannot configure the IP to any
1681 * speed other than the SuperSpeed
1682 *
1683 * Refers to:
1684 *
1685 * STAR#9000525659: Clock Domain Crossing on DCTL in
1686 * USB 2.0 Mode
1687 */
1688 if (dwc->revision < DWC3_REVISION_220A) {
1689 reg |= DWC3_DCFG_SUPERSPEED;
1690 } else {
1691 switch (dwc->maximum_speed) {
1692 case USB_SPEED_LOW:
1693 reg |= DWC3_DSTS_LOWSPEED;
1694 break;
1695 case USB_SPEED_FULL:
1696 reg |= DWC3_DSTS_FULLSPEED1;
1697 break;
1698 case USB_SPEED_HIGH:
1699 reg |= DWC3_DSTS_HIGHSPEED;
1700 break;
1701 case USB_SPEED_SUPER: /* FALLTHROUGH */
1702 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1703 default:
1704 reg |= DWC3_DSTS_SUPERSPEED;
1705 }
1706 }
1707 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1708
1709 /* Start with SuperSpeed Default */
1710 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1711
1712 dep = dwc->eps[0];
1713 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1714 false);
1715 if (ret) {
1716 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1717 goto err2;
1718 }
1719
1720 dep = dwc->eps[1];
1721 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1722 false);
1723 if (ret) {
1724 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
1725 goto err3;
1726 }
1727
1728 /* begin to receive SETUP packets */
1729 dwc->ep0state = EP0_SETUP_PHASE;
1730 dwc->link_state = DWC3_LINK_STATE_SS_DIS;
1731 dwc3_ep0_out_start(dwc);
1732
1733 dwc3_gadget_enable_irq(dwc);
1734
1735 spin_unlock_irqrestore(&dwc->lock, flags);
1736
1737 return 0;
1738
1739 err3:
1740 __dwc3_gadget_ep_disable(dwc->eps[0]);
1741
1742 err2:
1743 dwc->gadget_driver = NULL;
1744
1745 err1:
1746 spin_unlock_irqrestore(&dwc->lock, flags);
1747
1748 free_irq(irq, dwc);
1749
1750 err0:
1751 return ret;
1752 }
1753
dwc3_gadget_stop(struct usb_gadget * g)1754 static int dwc3_gadget_stop(struct usb_gadget *g)
1755 {
1756 struct dwc3 *dwc = gadget_to_dwc(g);
1757 unsigned long flags;
1758 int irq;
1759
1760 spin_lock_irqsave(&dwc->lock, flags);
1761
1762 dwc3_gadget_disable_irq(dwc);
1763 __dwc3_gadget_ep_disable(dwc->eps[0]);
1764 __dwc3_gadget_ep_disable(dwc->eps[1]);
1765
1766 dwc->gadget_driver = NULL;
1767
1768 spin_unlock_irqrestore(&dwc->lock, flags);
1769
1770 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1771 free_irq(irq, dwc);
1772
1773 return 0;
1774 }
1775
1776 static const struct usb_gadget_ops dwc3_gadget_ops = {
1777 .get_frame = dwc3_gadget_get_frame,
1778 .wakeup = dwc3_gadget_wakeup,
1779 .set_selfpowered = dwc3_gadget_set_selfpowered,
1780 .pullup = dwc3_gadget_pullup,
1781 .udc_start = dwc3_gadget_start,
1782 .udc_stop = dwc3_gadget_stop,
1783 };
1784
1785 /* -------------------------------------------------------------------------- */
1786
dwc3_gadget_init_hw_endpoints(struct dwc3 * dwc,u8 num,u32 direction)1787 static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1788 u8 num, u32 direction)
1789 {
1790 struct dwc3_ep *dep;
1791 u8 i;
1792
1793 for (i = 0; i < num; i++) {
1794 u8 epnum = (i << 1) | (!!direction);
1795
1796 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
1797 if (!dep)
1798 return -ENOMEM;
1799
1800 dep->dwc = dwc;
1801 dep->number = epnum;
1802 dep->direction = !!direction;
1803 dwc->eps[epnum] = dep;
1804
1805 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1806 (epnum & 1) ? "in" : "out");
1807
1808 dep->endpoint.name = dep->name;
1809
1810 dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
1811
1812 if (epnum == 0 || epnum == 1) {
1813 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
1814 dep->endpoint.maxburst = 1;
1815 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1816 if (!epnum)
1817 dwc->gadget.ep0 = &dep->endpoint;
1818 } else {
1819 int ret;
1820
1821 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
1822 dep->endpoint.max_streams = 15;
1823 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1824 list_add_tail(&dep->endpoint.ep_list,
1825 &dwc->gadget.ep_list);
1826
1827 ret = dwc3_alloc_trb_pool(dep);
1828 if (ret)
1829 return ret;
1830 }
1831
1832 if (epnum == 0 || epnum == 1) {
1833 dep->endpoint.caps.type_control = true;
1834 } else {
1835 dep->endpoint.caps.type_iso = true;
1836 dep->endpoint.caps.type_bulk = true;
1837 dep->endpoint.caps.type_int = true;
1838 }
1839
1840 dep->endpoint.caps.dir_in = !!direction;
1841 dep->endpoint.caps.dir_out = !direction;
1842
1843 INIT_LIST_HEAD(&dep->request_list);
1844 INIT_LIST_HEAD(&dep->req_queued);
1845 }
1846
1847 return 0;
1848 }
1849
dwc3_gadget_init_endpoints(struct dwc3 * dwc)1850 static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1851 {
1852 int ret;
1853
1854 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1855
1856 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1857 if (ret < 0) {
1858 dwc3_trace(trace_dwc3_gadget,
1859 "failed to allocate OUT endpoints");
1860 return ret;
1861 }
1862
1863 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1864 if (ret < 0) {
1865 dwc3_trace(trace_dwc3_gadget,
1866 "failed to allocate IN endpoints");
1867 return ret;
1868 }
1869
1870 return 0;
1871 }
1872
dwc3_gadget_free_endpoints(struct dwc3 * dwc)1873 static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1874 {
1875 struct dwc3_ep *dep;
1876 u8 epnum;
1877
1878 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1879 dep = dwc->eps[epnum];
1880 if (!dep)
1881 continue;
1882 /*
1883 * Physical endpoints 0 and 1 are special; they form the
1884 * bi-directional USB endpoint 0.
1885 *
1886 * For those two physical endpoints, we don't allocate a TRB
1887 * pool nor do we add them the endpoints list. Due to that, we
1888 * shouldn't do these two operations otherwise we would end up
1889 * with all sorts of bugs when removing dwc3.ko.
1890 */
1891 if (epnum != 0 && epnum != 1) {
1892 dwc3_free_trb_pool(dep);
1893 list_del(&dep->endpoint.ep_list);
1894 }
1895
1896 kfree(dep);
1897 }
1898 }
1899
1900 /* -------------------------------------------------------------------------- */
1901
__dwc3_cleanup_done_trbs(struct dwc3 * dwc,struct dwc3_ep * dep,struct dwc3_request * req,struct dwc3_trb * trb,const struct dwc3_event_depevt * event,int status)1902 static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1903 struct dwc3_request *req, struct dwc3_trb *trb,
1904 const struct dwc3_event_depevt *event, int status)
1905 {
1906 unsigned int count;
1907 unsigned int s_pkt = 0;
1908 unsigned int trb_status;
1909
1910 trace_dwc3_complete_trb(dep, trb);
1911
1912 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1913 /*
1914 * We continue despite the error. There is not much we
1915 * can do. If we don't clean it up we loop forever. If
1916 * we skip the TRB then it gets overwritten after a
1917 * while since we use them in a ring buffer. A BUG()
1918 * would help. Lets hope that if this occurs, someone
1919 * fixes the root cause instead of looking away :)
1920 */
1921 dev_err(dwc->dev, "%s's TRB (%pK) still owned by HW\n",
1922 dep->name, trb);
1923 count = trb->size & DWC3_TRB_SIZE_MASK;
1924
1925 if (dep->direction) {
1926 if (count) {
1927 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1928 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1929 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1930 dep->name);
1931 /*
1932 * If missed isoc occurred and there is
1933 * no request queued then issue END
1934 * TRANSFER, so that core generates
1935 * next xfernotready and we will issue
1936 * a fresh START TRANSFER.
1937 * If there are still queued request
1938 * then wait, do not issue either END
1939 * or UPDATE TRANSFER, just attach next
1940 * request in request_list during
1941 * giveback.If any future queued request
1942 * is successfully transferred then we
1943 * will issue UPDATE TRANSFER for all
1944 * request in the request_list.
1945 */
1946 dep->flags |= DWC3_EP_MISSED_ISOC;
1947 } else {
1948 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1949 dep->name);
1950 status = -ECONNRESET;
1951 }
1952 } else {
1953 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1954 }
1955 } else {
1956 if (count && (event->status & DEPEVT_STATUS_SHORT))
1957 s_pkt = 1;
1958 }
1959
1960 if (s_pkt)
1961 return 1;
1962 if ((event->status & DEPEVT_STATUS_LST) &&
1963 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1964 DWC3_TRB_CTRL_HWO)))
1965 return 1;
1966 if ((event->status & DEPEVT_STATUS_IOC) &&
1967 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1968 return 1;
1969 return 0;
1970 }
1971
dwc3_cleanup_done_reqs(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event,int status)1972 static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1973 const struct dwc3_event_depevt *event, int status)
1974 {
1975 struct dwc3_request *req;
1976 struct dwc3_trb *trb;
1977 unsigned int slot;
1978 unsigned int i;
1979 int count = 0;
1980 int ret;
1981
1982 do {
1983 req = next_request(&dep->req_queued);
1984 if (!req) {
1985 WARN_ON_ONCE(1);
1986 return 1;
1987 }
1988 i = 0;
1989 do {
1990 slot = req->start_slot + i;
1991 if ((slot == DWC3_TRB_NUM - 1) &&
1992 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1993 slot++;
1994 slot %= DWC3_TRB_NUM;
1995 trb = &dep->trb_pool[slot];
1996 count += trb->size & DWC3_TRB_SIZE_MASK;
1997
1998
1999 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
2000 event, status);
2001 if (ret)
2002 break;
2003 } while (++i < req->request.num_mapped_sgs);
2004
2005 /*
2006 * We assume here we will always receive the entire data block
2007 * which we should receive. Meaning, if we program RX to
2008 * receive 4K but we receive only 2K, we assume that's all we
2009 * should receive and we simply bounce the request back to the
2010 * gadget driver for further processing.
2011 */
2012 req->request.actual += req->request.length - count;
2013 dwc3_gadget_giveback(dep, req, status);
2014
2015 if (ret)
2016 break;
2017 } while (1);
2018
2019 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
2020 list_empty(&dep->req_queued)) {
2021 if (list_empty(&dep->request_list)) {
2022 /*
2023 * If there is no entry in request list then do
2024 * not issue END TRANSFER now. Just set PENDING
2025 * flag, so that END TRANSFER is issued when an
2026 * entry is added into request list.
2027 */
2028 dep->flags = DWC3_EP_PENDING_REQUEST;
2029 } else {
2030 dwc3_stop_active_transfer(dwc, dep->number, true);
2031 dep->flags = DWC3_EP_ENABLED;
2032 }
2033 return 1;
2034 }
2035
2036 if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
2037 if ((event->status & DEPEVT_STATUS_IOC) &&
2038 (trb->ctrl & DWC3_TRB_CTRL_IOC))
2039 return 0;
2040 return 1;
2041 }
2042
dwc3_endpoint_transfer_complete(struct dwc3 * dwc,struct dwc3_ep * dep,const struct dwc3_event_depevt * event)2043 static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
2044 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
2045 {
2046 unsigned status = 0;
2047 int clean_busy;
2048 u32 is_xfer_complete;
2049
2050 is_xfer_complete = (event->endpoint_event == DWC3_DEPEVT_XFERCOMPLETE);
2051
2052 if (event->status & DEPEVT_STATUS_BUSERR)
2053 status = -ECONNRESET;
2054
2055 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
2056 if (clean_busy && (is_xfer_complete ||
2057 usb_endpoint_xfer_isoc(dep->endpoint.desc)))
2058 dep->flags &= ~DWC3_EP_BUSY;
2059
2060 /*
2061 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2062 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
2063 */
2064 if (dwc->revision < DWC3_REVISION_183A) {
2065 u32 reg;
2066 int i;
2067
2068 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
2069 dep = dwc->eps[i];
2070
2071 if (!(dep->flags & DWC3_EP_ENABLED))
2072 continue;
2073
2074 if (!list_empty(&dep->req_queued))
2075 return;
2076 }
2077
2078 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2079 reg |= dwc->u1u2;
2080 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2081
2082 dwc->u1u2 = 0;
2083 }
2084
2085 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2086 int ret;
2087
2088 ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
2089 if (!ret || ret == -EBUSY)
2090 return;
2091 }
2092 }
2093
dwc3_endpoint_interrupt(struct dwc3 * dwc,const struct dwc3_event_depevt * event)2094 static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
2095 const struct dwc3_event_depevt *event)
2096 {
2097 struct dwc3_ep *dep;
2098 u8 epnum = event->endpoint_number;
2099
2100 dep = dwc->eps[epnum];
2101
2102 if (!(dep->flags & DWC3_EP_ENABLED))
2103 return;
2104
2105 if (epnum == 0 || epnum == 1) {
2106 dwc3_ep0_interrupt(dwc, event);
2107 return;
2108 }
2109
2110 switch (event->endpoint_event) {
2111 case DWC3_DEPEVT_XFERCOMPLETE:
2112 dep->resource_index = 0;
2113
2114 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2115 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
2116 dep->name);
2117 return;
2118 }
2119
2120 dwc3_endpoint_transfer_complete(dwc, dep, event);
2121 break;
2122 case DWC3_DEPEVT_XFERINPROGRESS:
2123 dwc3_endpoint_transfer_complete(dwc, dep, event);
2124 break;
2125 case DWC3_DEPEVT_XFERNOTREADY:
2126 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
2127 dwc3_gadget_start_isoc(dwc, dep, event);
2128 } else {
2129 int active;
2130 int ret;
2131
2132 active = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
2133
2134 dwc3_trace(trace_dwc3_gadget, "%s: reason %s",
2135 dep->name, active ? "Transfer Active"
2136 : "Transfer Not Active");
2137
2138 ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
2139 if (!ret || ret == -EBUSY)
2140 return;
2141
2142 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
2143 dep->name);
2144 }
2145
2146 break;
2147 case DWC3_DEPEVT_STREAMEVT:
2148 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
2149 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
2150 dep->name);
2151 return;
2152 }
2153
2154 switch (event->status) {
2155 case DEPEVT_STREAMEVT_FOUND:
2156 dwc3_trace(trace_dwc3_gadget,
2157 "Stream %d found and started",
2158 event->parameters);
2159
2160 break;
2161 case DEPEVT_STREAMEVT_NOTFOUND:
2162 /* FALLTHROUGH */
2163 default:
2164 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2165 }
2166 break;
2167 case DWC3_DEPEVT_RXTXFIFOEVT:
2168 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2169 break;
2170 case DWC3_DEPEVT_EPCMDCMPLT:
2171 dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
2172 break;
2173 }
2174 }
2175
dwc3_disconnect_gadget(struct dwc3 * dwc)2176 static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2177 {
2178 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2179 spin_unlock(&dwc->lock);
2180 dwc->gadget_driver->disconnect(&dwc->gadget);
2181 spin_lock(&dwc->lock);
2182 }
2183 }
2184
dwc3_suspend_gadget(struct dwc3 * dwc)2185 static void dwc3_suspend_gadget(struct dwc3 *dwc)
2186 {
2187 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
2188 spin_unlock(&dwc->lock);
2189 dwc->gadget_driver->suspend(&dwc->gadget);
2190 spin_lock(&dwc->lock);
2191 }
2192 }
2193
dwc3_resume_gadget(struct dwc3 * dwc)2194 static void dwc3_resume_gadget(struct dwc3 *dwc)
2195 {
2196 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
2197 spin_unlock(&dwc->lock);
2198 dwc->gadget_driver->resume(&dwc->gadget);
2199 spin_lock(&dwc->lock);
2200 }
2201 }
2202
dwc3_reset_gadget(struct dwc3 * dwc)2203 static void dwc3_reset_gadget(struct dwc3 *dwc)
2204 {
2205 if (!dwc->gadget_driver)
2206 return;
2207
2208 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2209 spin_unlock(&dwc->lock);
2210 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
2211 spin_lock(&dwc->lock);
2212 }
2213 }
2214
dwc3_stop_active_transfer(struct dwc3 * dwc,u32 epnum,bool force)2215 static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
2216 {
2217 struct dwc3_ep *dep;
2218 struct dwc3_gadget_ep_cmd_params params;
2219 u32 cmd;
2220 int ret;
2221
2222 dep = dwc->eps[epnum];
2223
2224 if (!dep->resource_index)
2225 return;
2226
2227 /*
2228 * NOTICE: We are violating what the Databook says about the
2229 * EndTransfer command. Ideally we would _always_ wait for the
2230 * EndTransfer Command Completion IRQ, but that's causing too
2231 * much trouble synchronizing between us and gadget driver.
2232 *
2233 * We have discussed this with the IP Provider and it was
2234 * suggested to giveback all requests here, but give HW some
2235 * extra time to synchronize with the interconnect. We're using
2236 * an arbitrary 100us delay for that.
2237 *
2238 * Note also that a similar handling was tested by Synopsys
2239 * (thanks a lot Paul) and nothing bad has come out of it.
2240 * In short, what we're doing is:
2241 *
2242 * - Issue EndTransfer WITH CMDIOC bit set
2243 * - Wait 100us
2244 */
2245
2246 cmd = DWC3_DEPCMD_ENDTRANSFER;
2247 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2248 cmd |= DWC3_DEPCMD_CMDIOC;
2249 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
2250 memset(¶ms, 0, sizeof(params));
2251 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, ¶ms);
2252 WARN_ON_ONCE(ret);
2253 dep->resource_index = 0;
2254 dep->flags &= ~DWC3_EP_BUSY;
2255 udelay(100);
2256 }
2257
dwc3_stop_active_transfers(struct dwc3 * dwc)2258 static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2259 {
2260 u32 epnum;
2261
2262 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2263 struct dwc3_ep *dep;
2264
2265 dep = dwc->eps[epnum];
2266 if (!dep)
2267 continue;
2268
2269 if (!(dep->flags & DWC3_EP_ENABLED))
2270 continue;
2271
2272 dwc3_remove_requests(dwc, dep);
2273 }
2274 }
2275
dwc3_clear_stall_all_ep(struct dwc3 * dwc)2276 static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2277 {
2278 u32 epnum;
2279
2280 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2281 struct dwc3_ep *dep;
2282 struct dwc3_gadget_ep_cmd_params params;
2283 int ret;
2284
2285 dep = dwc->eps[epnum];
2286 if (!dep)
2287 continue;
2288
2289 if (!(dep->flags & DWC3_EP_STALL))
2290 continue;
2291
2292 dep->flags &= ~DWC3_EP_STALL;
2293
2294 memset(¶ms, 0, sizeof(params));
2295 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2296 DWC3_DEPCMD_CLEARSTALL, ¶ms);
2297 WARN_ON_ONCE(ret);
2298 }
2299 }
2300
dwc3_gadget_disconnect_interrupt(struct dwc3 * dwc)2301 static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2302 {
2303 int reg;
2304
2305 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2306 reg &= ~DWC3_DCTL_INITU1ENA;
2307 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2308
2309 reg &= ~DWC3_DCTL_INITU2ENA;
2310 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2311
2312 dwc3_disconnect_gadget(dwc);
2313
2314 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2315 dwc->setup_packet_pending = false;
2316 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
2317 }
2318
dwc3_gadget_reset_interrupt(struct dwc3 * dwc)2319 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2320 {
2321 u32 reg;
2322
2323 /*
2324 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2325 * would cause a missing Disconnect Event if there's a
2326 * pending Setup Packet in the FIFO.
2327 *
2328 * There's no suggested workaround on the official Bug
2329 * report, which states that "unless the driver/application
2330 * is doing any special handling of a disconnect event,
2331 * there is no functional issue".
2332 *
2333 * Unfortunately, it turns out that we _do_ some special
2334 * handling of a disconnect event, namely complete all
2335 * pending transfers, notify gadget driver of the
2336 * disconnection, and so on.
2337 *
2338 * Our suggested workaround is to follow the Disconnect
2339 * Event steps here, instead, based on a setup_packet_pending
2340 * flag. Such flag gets set whenever we have a XferNotReady
2341 * event on EP0 and gets cleared on XferComplete for the
2342 * same endpoint.
2343 *
2344 * Refers to:
2345 *
2346 * STAR#9000466709: RTL: Device : Disconnect event not
2347 * generated if setup packet pending in FIFO
2348 */
2349 if (dwc->revision < DWC3_REVISION_188A) {
2350 if (dwc->setup_packet_pending)
2351 dwc3_gadget_disconnect_interrupt(dwc);
2352 }
2353
2354 dwc3_reset_gadget(dwc);
2355
2356 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2357 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2358 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2359 dwc->test_mode = false;
2360
2361 dwc3_stop_active_transfers(dwc);
2362 dwc3_clear_stall_all_ep(dwc);
2363
2364 /* Reset device address to zero */
2365 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2366 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2367 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2368 }
2369
dwc3_update_ram_clk_sel(struct dwc3 * dwc,u32 speed)2370 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2371 {
2372 u32 reg;
2373 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2374
2375 /*
2376 * We change the clock only at SS but I dunno why I would want to do
2377 * this. Maybe it becomes part of the power saving plan.
2378 */
2379
2380 if (speed != DWC3_DSTS_SUPERSPEED)
2381 return;
2382
2383 /*
2384 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2385 * each time on Connect Done.
2386 */
2387 if (!usb30_clock)
2388 return;
2389
2390 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2391 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2392 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2393 }
2394
dwc3_gadget_conndone_interrupt(struct dwc3 * dwc)2395 static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2396 {
2397 struct dwc3_ep *dep;
2398 int ret;
2399 u32 reg;
2400 u8 speed;
2401
2402 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2403 speed = reg & DWC3_DSTS_CONNECTSPD;
2404 dwc->speed = speed;
2405
2406 dwc3_update_ram_clk_sel(dwc, speed);
2407
2408 switch (speed) {
2409 case DWC3_DCFG_SUPERSPEED:
2410 /*
2411 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2412 * would cause a missing USB3 Reset event.
2413 *
2414 * In such situations, we should force a USB3 Reset
2415 * event by calling our dwc3_gadget_reset_interrupt()
2416 * routine.
2417 *
2418 * Refers to:
2419 *
2420 * STAR#9000483510: RTL: SS : USB3 reset event may
2421 * not be generated always when the link enters poll
2422 */
2423 if (dwc->revision < DWC3_REVISION_190A)
2424 dwc3_gadget_reset_interrupt(dwc);
2425
2426 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2427 dwc->gadget.ep0->maxpacket = 512;
2428 dwc->gadget.speed = USB_SPEED_SUPER;
2429 break;
2430 case DWC3_DCFG_HIGHSPEED:
2431 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2432 dwc->gadget.ep0->maxpacket = 64;
2433 dwc->gadget.speed = USB_SPEED_HIGH;
2434 break;
2435 case DWC3_DCFG_FULLSPEED2:
2436 case DWC3_DCFG_FULLSPEED1:
2437 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2438 dwc->gadget.ep0->maxpacket = 64;
2439 dwc->gadget.speed = USB_SPEED_FULL;
2440 break;
2441 case DWC3_DCFG_LOWSPEED:
2442 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2443 dwc->gadget.ep0->maxpacket = 8;
2444 dwc->gadget.speed = USB_SPEED_LOW;
2445 break;
2446 }
2447
2448 dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
2449
2450 /* Enable USB2 LPM Capability */
2451
2452 if ((dwc->revision > DWC3_REVISION_194A)
2453 && (speed != DWC3_DCFG_SUPERSPEED)) {
2454 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2455 reg |= DWC3_DCFG_LPM_CAP;
2456 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2457
2458 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2459 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2460
2461 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2462
2463 /*
2464 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2465 * DCFG.LPMCap is set, core responses with an ACK and the
2466 * BESL value in the LPM token is less than or equal to LPM
2467 * NYET threshold.
2468 */
2469 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2470 && dwc->has_lpm_erratum,
2471 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2472
2473 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2474 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2475
2476 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2477 } else {
2478 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2479 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2480 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2481 }
2482
2483 dep = dwc->eps[0];
2484 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2485 false);
2486 if (ret) {
2487 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2488 return;
2489 }
2490
2491 dep = dwc->eps[1];
2492 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2493 false);
2494 if (ret) {
2495 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2496 return;
2497 }
2498
2499 /*
2500 * Configure PHY via GUSB3PIPECTLn if required.
2501 *
2502 * Update GTXFIFOSIZn
2503 *
2504 * In both cases reset values should be sufficient.
2505 */
2506 }
2507
dwc3_gadget_wakeup_interrupt(struct dwc3 * dwc)2508 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2509 {
2510 /*
2511 * TODO take core out of low power mode when that's
2512 * implemented.
2513 */
2514
2515 dwc->gadget_driver->resume(&dwc->gadget);
2516 }
2517
dwc3_gadget_linksts_change_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2518 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2519 unsigned int evtinfo)
2520 {
2521 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
2522 unsigned int pwropt;
2523
2524 /*
2525 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2526 * Hibernation mode enabled which would show up when device detects
2527 * host-initiated U3 exit.
2528 *
2529 * In that case, device will generate a Link State Change Interrupt
2530 * from U3 to RESUME which is only necessary if Hibernation is
2531 * configured in.
2532 *
2533 * There are no functional changes due to such spurious event and we
2534 * just need to ignore it.
2535 *
2536 * Refers to:
2537 *
2538 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2539 * operational mode
2540 */
2541 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2542 if ((dwc->revision < DWC3_REVISION_250A) &&
2543 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2544 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2545 (next == DWC3_LINK_STATE_RESUME)) {
2546 dwc3_trace(trace_dwc3_gadget,
2547 "ignoring transition U3 -> Resume");
2548 return;
2549 }
2550 }
2551
2552 /*
2553 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2554 * on the link partner, the USB session might do multiple entry/exit
2555 * of low power states before a transfer takes place.
2556 *
2557 * Due to this problem, we might experience lower throughput. The
2558 * suggested workaround is to disable DCTL[12:9] bits if we're
2559 * transitioning from U1/U2 to U0 and enable those bits again
2560 * after a transfer completes and there are no pending transfers
2561 * on any of the enabled endpoints.
2562 *
2563 * This is the first half of that workaround.
2564 *
2565 * Refers to:
2566 *
2567 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2568 * core send LGO_Ux entering U0
2569 */
2570 if (dwc->revision < DWC3_REVISION_183A) {
2571 if (next == DWC3_LINK_STATE_U0) {
2572 u32 u1u2;
2573 u32 reg;
2574
2575 switch (dwc->link_state) {
2576 case DWC3_LINK_STATE_U1:
2577 case DWC3_LINK_STATE_U2:
2578 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2579 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2580 | DWC3_DCTL_ACCEPTU2ENA
2581 | DWC3_DCTL_INITU1ENA
2582 | DWC3_DCTL_ACCEPTU1ENA);
2583
2584 if (!dwc->u1u2)
2585 dwc->u1u2 = reg & u1u2;
2586
2587 reg &= ~u1u2;
2588
2589 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2590 break;
2591 default:
2592 /* do nothing */
2593 break;
2594 }
2595 }
2596 }
2597
2598 switch (next) {
2599 case DWC3_LINK_STATE_U1:
2600 if (dwc->speed == USB_SPEED_SUPER)
2601 dwc3_suspend_gadget(dwc);
2602 break;
2603 case DWC3_LINK_STATE_U2:
2604 case DWC3_LINK_STATE_U3:
2605 dwc3_suspend_gadget(dwc);
2606 break;
2607 case DWC3_LINK_STATE_RESUME:
2608 dwc3_resume_gadget(dwc);
2609 break;
2610 default:
2611 /* do nothing */
2612 break;
2613 }
2614
2615 dwc->link_state = next;
2616 }
2617
dwc3_gadget_hibernation_interrupt(struct dwc3 * dwc,unsigned int evtinfo)2618 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2619 unsigned int evtinfo)
2620 {
2621 unsigned int is_ss = evtinfo & BIT(4);
2622
2623 /**
2624 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2625 * have a known issue which can cause USB CV TD.9.23 to fail
2626 * randomly.
2627 *
2628 * Because of this issue, core could generate bogus hibernation
2629 * events which SW needs to ignore.
2630 *
2631 * Refers to:
2632 *
2633 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2634 * Device Fallback from SuperSpeed
2635 */
2636 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2637 return;
2638
2639 /* enter hibernation here */
2640 }
2641
dwc3_gadget_interrupt(struct dwc3 * dwc,const struct dwc3_event_devt * event)2642 static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2643 const struct dwc3_event_devt *event)
2644 {
2645 switch (event->type) {
2646 case DWC3_DEVICE_EVENT_DISCONNECT:
2647 dwc3_gadget_disconnect_interrupt(dwc);
2648 break;
2649 case DWC3_DEVICE_EVENT_RESET:
2650 dwc3_gadget_reset_interrupt(dwc);
2651 break;
2652 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2653 dwc3_gadget_conndone_interrupt(dwc);
2654 break;
2655 case DWC3_DEVICE_EVENT_WAKEUP:
2656 dwc3_gadget_wakeup_interrupt(dwc);
2657 break;
2658 case DWC3_DEVICE_EVENT_HIBER_REQ:
2659 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2660 "unexpected hibernation event\n"))
2661 break;
2662
2663 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2664 break;
2665 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2666 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2667 break;
2668 case DWC3_DEVICE_EVENT_EOPF:
2669 dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
2670 break;
2671 case DWC3_DEVICE_EVENT_SOF:
2672 dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
2673 break;
2674 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2675 dwc3_trace(trace_dwc3_gadget, "Erratic Error");
2676 break;
2677 case DWC3_DEVICE_EVENT_CMD_CMPL:
2678 dwc3_trace(trace_dwc3_gadget, "Command Complete");
2679 break;
2680 case DWC3_DEVICE_EVENT_OVERFLOW:
2681 dwc3_trace(trace_dwc3_gadget, "Overflow");
2682 break;
2683 default:
2684 dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2685 }
2686 }
2687
dwc3_process_event_entry(struct dwc3 * dwc,const union dwc3_event * event)2688 static void dwc3_process_event_entry(struct dwc3 *dwc,
2689 const union dwc3_event *event)
2690 {
2691 trace_dwc3_event(event->raw);
2692
2693 /* Endpoint IRQ, handle it and return early */
2694 if (event->type.is_devspec == 0) {
2695 /* depevt */
2696 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2697 }
2698
2699 switch (event->type.type) {
2700 case DWC3_EVENT_TYPE_DEV:
2701 dwc3_gadget_interrupt(dwc, &event->devt);
2702 break;
2703 /* REVISIT what to do with Carkit and I2C events ? */
2704 default:
2705 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2706 }
2707 }
2708
dwc3_process_event_buf(struct dwc3 * dwc,u32 buf)2709 static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
2710 {
2711 struct dwc3_event_buffer *evt;
2712 irqreturn_t ret = IRQ_NONE;
2713 int left;
2714 u32 reg;
2715
2716 evt = dwc->ev_buffs[buf];
2717 left = evt->count;
2718
2719 if (!(evt->flags & DWC3_EVENT_PENDING))
2720 return IRQ_NONE;
2721
2722 while (left > 0) {
2723 union dwc3_event event;
2724
2725 event.raw = *(u32 *) (evt->buf + evt->lpos);
2726
2727 dwc3_process_event_entry(dwc, &event);
2728
2729 /*
2730 * FIXME we wrap around correctly to the next entry as
2731 * almost all entries are 4 bytes in size. There is one
2732 * entry which has 12 bytes which is a regular entry
2733 * followed by 8 bytes data. ATM I don't know how
2734 * things are organized if we get next to the a
2735 * boundary so I worry about that once we try to handle
2736 * that.
2737 */
2738 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2739 left -= 4;
2740
2741 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2742 }
2743
2744 evt->count = 0;
2745 evt->flags &= ~DWC3_EVENT_PENDING;
2746 ret = IRQ_HANDLED;
2747
2748 /* Unmask interrupt */
2749 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2750 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2751 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2752
2753 return ret;
2754 }
2755
dwc3_thread_interrupt(int irq,void * _dwc)2756 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2757 {
2758 struct dwc3 *dwc = _dwc;
2759 unsigned long flags;
2760 irqreturn_t ret = IRQ_NONE;
2761 int i;
2762
2763 spin_lock_irqsave(&dwc->lock, flags);
2764
2765 for (i = 0; i < dwc->num_event_buffers; i++)
2766 ret |= dwc3_process_event_buf(dwc, i);
2767
2768 spin_unlock_irqrestore(&dwc->lock, flags);
2769
2770 return ret;
2771 }
2772
dwc3_check_event_buf(struct dwc3 * dwc,u32 buf)2773 static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
2774 {
2775 struct dwc3_event_buffer *evt;
2776 u32 count;
2777 u32 reg;
2778
2779 evt = dwc->ev_buffs[buf];
2780
2781 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2782 count &= DWC3_GEVNTCOUNT_MASK;
2783 if (!count)
2784 return IRQ_NONE;
2785
2786 evt->count = count;
2787 evt->flags |= DWC3_EVENT_PENDING;
2788
2789 /* Mask interrupt */
2790 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2791 reg |= DWC3_GEVNTSIZ_INTMASK;
2792 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2793
2794 return IRQ_WAKE_THREAD;
2795 }
2796
dwc3_interrupt(int irq,void * _dwc)2797 static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2798 {
2799 struct dwc3 *dwc = _dwc;
2800 int i;
2801 irqreturn_t ret = IRQ_NONE;
2802
2803 for (i = 0; i < dwc->num_event_buffers; i++) {
2804 irqreturn_t status;
2805
2806 status = dwc3_check_event_buf(dwc, i);
2807 if (status == IRQ_WAKE_THREAD)
2808 ret = status;
2809 }
2810
2811 return ret;
2812 }
2813
2814 /**
2815 * dwc3_gadget_init - Initializes gadget related registers
2816 * @dwc: pointer to our controller context structure
2817 *
2818 * Returns 0 on success otherwise negative errno.
2819 */
dwc3_gadget_init(struct dwc3 * dwc)2820 int dwc3_gadget_init(struct dwc3 *dwc)
2821 {
2822 int ret;
2823
2824 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2825 &dwc->ctrl_req_addr, GFP_KERNEL);
2826 if (!dwc->ctrl_req) {
2827 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2828 ret = -ENOMEM;
2829 goto err0;
2830 }
2831
2832 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2833 &dwc->ep0_trb_addr, GFP_KERNEL);
2834 if (!dwc->ep0_trb) {
2835 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2836 ret = -ENOMEM;
2837 goto err1;
2838 }
2839
2840 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
2841 if (!dwc->setup_buf) {
2842 ret = -ENOMEM;
2843 goto err2;
2844 }
2845
2846 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
2847 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2848 GFP_KERNEL);
2849 if (!dwc->ep0_bounce) {
2850 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2851 ret = -ENOMEM;
2852 goto err3;
2853 }
2854
2855 dwc->zlp_buf = kzalloc(DWC3_ZLP_BUF_SIZE, GFP_KERNEL);
2856 if (!dwc->zlp_buf) {
2857 ret = -ENOMEM;
2858 goto err4;
2859 }
2860
2861 dwc->gadget.ops = &dwc3_gadget_ops;
2862 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2863 dwc->gadget.sg_supported = true;
2864 dwc->gadget.name = "dwc3-gadget";
2865
2866 /*
2867 * FIXME We might be setting max_speed to <SUPER, however versions
2868 * <2.20a of dwc3 have an issue with metastability (documented
2869 * elsewhere in this driver) which tells us we can't set max speed to
2870 * anything lower than SUPER.
2871 *
2872 * Because gadget.max_speed is only used by composite.c and function
2873 * drivers (i.e. it won't go into dwc3's registers) we are allowing this
2874 * to happen so we avoid sending SuperSpeed Capability descriptor
2875 * together with our BOS descriptor as that could confuse host into
2876 * thinking we can handle super speed.
2877 *
2878 * Note that, in fact, we won't even support GetBOS requests when speed
2879 * is less than super speed because we don't have means, yet, to tell
2880 * composite.c that we are USB 2.0 + LPM ECN.
2881 */
2882 if (dwc->revision < DWC3_REVISION_220A)
2883 dwc3_trace(trace_dwc3_gadget,
2884 "Changing max_speed on rev %08x\n",
2885 dwc->revision);
2886
2887 dwc->gadget.max_speed = dwc->maximum_speed;
2888
2889 /*
2890 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2891 * on ep out.
2892 */
2893 dwc->gadget.quirk_ep_out_aligned_size = true;
2894
2895 /*
2896 * REVISIT: Here we should clear all pending IRQs to be
2897 * sure we're starting from a well known location.
2898 */
2899
2900 ret = dwc3_gadget_init_endpoints(dwc);
2901 if (ret)
2902 goto err5;
2903
2904 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2905 if (ret) {
2906 dev_err(dwc->dev, "failed to register udc\n");
2907 goto err5;
2908 }
2909
2910 return 0;
2911
2912 err5:
2913 kfree(dwc->zlp_buf);
2914
2915 err4:
2916 dwc3_gadget_free_endpoints(dwc);
2917 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2918 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2919
2920 err3:
2921 kfree(dwc->setup_buf);
2922
2923 err2:
2924 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2925 dwc->ep0_trb, dwc->ep0_trb_addr);
2926
2927 err1:
2928 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2929 dwc->ctrl_req, dwc->ctrl_req_addr);
2930
2931 err0:
2932 return ret;
2933 }
2934
2935 /* -------------------------------------------------------------------------- */
2936
dwc3_gadget_exit(struct dwc3 * dwc)2937 void dwc3_gadget_exit(struct dwc3 *dwc)
2938 {
2939 usb_del_gadget_udc(&dwc->gadget);
2940
2941 dwc3_gadget_free_endpoints(dwc);
2942
2943 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2944 dwc->ep0_bounce, dwc->ep0_bounce_addr);
2945
2946 kfree(dwc->setup_buf);
2947 kfree(dwc->zlp_buf);
2948
2949 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
2950 dwc->ep0_trb, dwc->ep0_trb_addr);
2951
2952 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2953 dwc->ctrl_req, dwc->ctrl_req_addr);
2954 }
2955
dwc3_gadget_suspend(struct dwc3 * dwc)2956 int dwc3_gadget_suspend(struct dwc3 *dwc)
2957 {
2958 if (!dwc->gadget_driver)
2959 return 0;
2960
2961 if (dwc->pullups_connected) {
2962 dwc3_gadget_disable_irq(dwc);
2963 dwc3_gadget_run_stop(dwc, true, true);
2964 }
2965
2966 __dwc3_gadget_ep_disable(dwc->eps[0]);
2967 __dwc3_gadget_ep_disable(dwc->eps[1]);
2968
2969 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2970
2971 return 0;
2972 }
2973
dwc3_gadget_resume(struct dwc3 * dwc)2974 int dwc3_gadget_resume(struct dwc3 *dwc)
2975 {
2976 struct dwc3_ep *dep;
2977 int ret;
2978
2979 if (!dwc->gadget_driver)
2980 return 0;
2981
2982 /* Start with SuperSpeed Default */
2983 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2984
2985 dep = dwc->eps[0];
2986 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2987 false);
2988 if (ret)
2989 goto err0;
2990
2991 dep = dwc->eps[1];
2992 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2993 false);
2994 if (ret)
2995 goto err1;
2996
2997 /* begin to receive SETUP packets */
2998 dwc->ep0state = EP0_SETUP_PHASE;
2999 dwc3_ep0_out_start(dwc);
3000
3001 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
3002
3003 if (dwc->pullups_connected) {
3004 dwc3_gadget_enable_irq(dwc);
3005 dwc3_gadget_run_stop(dwc, true, false);
3006 }
3007
3008 return 0;
3009
3010 err1:
3011 __dwc3_gadget_ep_disable(dwc->eps[0]);
3012
3013 err0:
3014 return ret;
3015 }
3016